#include "cpu/testers/directedtest/DirectedGenerator.hh"
#include "cpu/testers/directedtest/RubyDirectedTester.hh"
-#include "mem/protocol/InvalidateGeneratorStatus.hh"
+#include "mem/ruby/protocol/InvalidateGeneratorStatus.hh"
#include "params/InvalidateGenerator.hh"
class InvalidateGenerator : public DirectedGenerator
#include "cpu/testers/directedtest/DirectedGenerator.hh"
#include "cpu/testers/directedtest/RubyDirectedTester.hh"
-#include "mem/protocol/SeriesRequestGeneratorStatus.hh"
+#include "mem/ruby/protocol/SeriesRequestGeneratorStatus.hh"
#include "params/SeriesRequestGenerator.hh"
class SeriesRequestGenerator : public DirectedGenerator
#include <iostream>
#include "cpu/testers/rubytest/RubyTester.hh"
-#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/protocol/TesterStatus.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/protocol/RubyAccessMode.hh"
+#include "mem/ruby/protocol/TesterStatus.hh"
class SubBlock;
// across different directories, so query the network.
out_msg.Destination.add(mapAddressToMachine(address,
MachineType:Directory));
- // See mem/protocol/RubySlicc_Exports.sm for possible sizes.
+ // See mem/ruby/protocol/RubySlicc_Exports.sm for possible sizes.
out_msg.MessageSize := MessageSizeType:Control;
// Set that the reqeustor is this machine so we get the response.
out_msg.Requestor := machineID;
Import('*')
-# NOTE: All SLICC setup code found in src/mem/protocol/SConscript
+# NOTE: All SLICC setup code found in src/mem/ruby/protocol/SConscript
# Register this protocol with gem5/SCons
all_protocols.extend([
+++ /dev/null
-/*
- * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:SQC, "GPU SQC (L1 I Cache)")
- : Sequencer* sequencer;
- CacheMemory * L1cache;
- int TCC_select_num_bits;
- Cycles issue_latency := 80; // time to send data down to TCC
- Cycles l2_hit_latency := 18;
-
- MessageBuffer * requestFromSQC, network="To", virtual_network="1", vnet_type="request";
- MessageBuffer * responseFromSQC, network="To", virtual_network="3", vnet_type="response";
- MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
-
- MessageBuffer * probeToSQC, network="From", virtual_network="1", vnet_type="request";
- MessageBuffer * responseToSQC, network="From", virtual_network="3", vnet_type="response";
-
- MessageBuffer * mandatoryQueue;
-{
- state_declaration(State, desc="SQC Cache States", default="SQC_State_I") {
- I, AccessPermission:Invalid, desc="Invalid";
- S, AccessPermission:Read_Only, desc="Shared";
-
- I_S, AccessPermission:Busy, desc="Invalid, issued RdBlkS, have not seen response yet";
- S_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for clean WB ack";
- I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from TCCdir for canceled WB";
- }
-
- enumeration(Event, desc="SQC Events") {
- // Core initiated
- Fetch, desc="Fetch";
-
- //TCC initiated
- TCC_AckS, desc="TCC Ack to Core Request";
- TCC_AckWB, desc="TCC Ack for WB";
- TCC_NackWB, desc="TCC Nack for WB";
-
- // Mem sys initiated
- Repl, desc="Replacing block from cache";
-
- // Probe Events
- PrbInvData, desc="probe, return M data";
- PrbInv, desc="probe, no need for data";
- PrbShrData, desc="probe downgrade, return data";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff than memory)?";
- DataBlock DataBlk, desc="data for the block";
- bool FromL2, default="false", desc="block just moved from L2";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
- bool Shared, desc="Victim hit by shared probe";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<SQC_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
-
- // Internal functions
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
- return cache_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return tbe.DataBlk;
- } else {
- return getCacheEntry(addr).DataBlk;
- }
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return SQC_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return SQC_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(SQC_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- // Out Ports
-
- out_port(requestNetwork_out, CPURequestMsg, requestFromSQC);
- out_port(responseNetwork_out, ResponseMsg, responseFromSQC);
- out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
-
- // In Ports
-
- in_port(probeNetwork_in, TDProbeRequestMsg, probeToSQC) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, TDProbeRequestMsg, block_on="addr") {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- assert(in_msg.ReturnData);
- trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
- in_port(responseToSQC_in, ResponseMsg, responseToSQC) {
- if (responseToSQC_in.isReady(clockEdge())) {
- peek(responseToSQC_in, ResponseMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == CoherenceResponseType:TDSysResp) {
- if (in_msg.State == CoherenceState:Shared) {
- trigger(Event:TCC_AckS, in_msg.addr, cache_entry, tbe);
- } else {
- error("SQC should not receive TDSysResp other than CoherenceState:Shared");
- }
- } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
- trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:TDSysWBNack) {
- trigger(Event:TCC_NackWB, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
- Entry cache_entry := getCacheEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
-
- assert(in_msg.Type == RubyRequestType:IFETCH);
- if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
- trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
- } else {
- Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- }
- }
- }
-
- // Actions
-
- action(ic_invCache, "ic", desc="invalidate cache") {
- if(is_valid(cache_entry)) {
- L1cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(vc_victim, "vc", desc="Victimize E/S Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicClean;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:S) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(a_allocate, "a", desc="allocate block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1cache.allocate(address, new Entry));
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
- tbe.Dirty := cache_entry.Dirty;
- tbe.Shared := false;
- }
-
- action(d_deallocateTBE, "d", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
- responseToSQC_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(l_loadDone, "l", desc="local load done") {
- assert(is_valid(cache_entry));
- sequencer.readCallback(address, cache_entry.DataBlk,
- false, MachineType:L1Cache);
- APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
- }
-
- action(xl_loadDone, "xl", desc="remote load done") {
- peek(responseToSQC_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- sequencer.readCallback(address,
- cache_entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
- }
- }
-
- action(w_writeCache, "w", desc="write data to cache") {
- peek(responseToSQC_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
- peek(responseToSQC_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:StaleNotif;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(wb_data, "wb", desc="write back data") {
- peek(responseToSQC_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUData;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Shared) {
- out_msg.NbReqShared := true;
- } else {
- out_msg.NbReqShared := false;
- }
- out_msg.State := CoherenceState:Shared; // faux info
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Ntsl := true;
- out_msg.Hit := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false; // only true if sending back data i think
- out_msg.Hit := false;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry) || is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := getDataBlock(address);
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- } else {
- out_msg.Dirty := cache_entry.Dirty;
- }
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry) || is_valid(tbe));
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := getDataBlock(address);
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- } else {
- out_msg.Dirty := cache_entry.Dirty;
- }
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
- assert(is_valid(tbe));
- tbe.Shared := true;
- }
-
- action(uu_sendUnblock, "uu", desc="state changed, unblock") {
- enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
- probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
- mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- // Transitions
-
- // transitions from base
- transition(I, Fetch, I_S) {TagArrayRead, TagArrayWrite} {
- a_allocate;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- // simple hit transitions
- transition(S, Fetch) {TagArrayRead, DataArrayRead} {
- l_loadDone;
- p_popMandatoryQueue;
- }
-
- // recycles from transients
- transition({I_S, S_I, I_C}, {Fetch, Repl}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition(S, Repl, S_I) {TagArrayRead} {
- t_allocateTBE;
- vc_victim;
- ic_invCache;
- }
-
- // TCC event
- transition(I_S, TCC_AckS, S) {DataArrayRead, DataArrayWrite} {
- w_writeCache;
- xl_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S_I, TCC_NackWB, I){TagArrayWrite} {
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(S_I, TCC_AckWB, I) {TagArrayWrite} {
- wb_data;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(I_C, TCC_AckWB, I){TagArrayWrite} {
- ss_sendStaleNotification;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(I_C, TCC_NackWB, I) {TagArrayWrite} {
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- // Probe transitions
- transition({S, I}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbInvData, I_C) {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition({S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition({S}, PrbShrData, S) {DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({I, I_C}, PrbShrData) {TagArrayRead} {
- prm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbInv, I_C){
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(I_S, {PrbInv, PrbInvData}) {} {
- pi_sendProbeResponseInv;
- ic_invCache;
- a_allocate; // but make sure there is room for incoming data when it arrives
- pp_popProbeQueue;
- }
-
- transition(I_S, PrbShrData) {} {
- prm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(S_I, PrbInvData, I_C) {TagArrayWrite} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(S_I, PrbInv, I_C) {TagArrayWrite} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(S_I, PrbShrData) {DataArrayRead} {
- pd_sendProbeResponseData;
- sf_setSharedFlip;
- pp_popProbeQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:TCC, "TCC Cache")
- : CacheMemory * L2cache;
- WireBuffer * w_reqToTCCDir;
- WireBuffer * w_respToTCCDir;
- WireBuffer * w_TCCUnblockToTCCDir;
- WireBuffer * w_reqToTCC;
- WireBuffer * w_probeToTCC;
- WireBuffer * w_respToTCC;
- int TCC_select_num_bits;
- Cycles l2_request_latency := 1;
- Cycles l2_response_latency := 20;
-
- // To the general response network
- MessageBuffer * responseFromTCC, network="To", virtual_network="3", vnet_type="response";
-
- // From the general response network
- MessageBuffer * responseToTCC, network="From", virtual_network="3", vnet_type="response";
-
-{
- // EVENTS
- enumeration(Event, desc="TCC Events") {
- // Requests coming from the Cores
- RdBlk, desc="CPU RdBlk event";
- RdBlkM, desc="CPU RdBlkM event";
- RdBlkS, desc="CPU RdBlkS event";
- CtoD, desc="Change to Dirty request";
- WrVicBlk, desc="L1 Victim (dirty)";
- WrVicBlkShared, desc="L1 Victim (dirty)";
- ClVicBlk, desc="L1 Victim (clean)";
- ClVicBlkShared, desc="L1 Victim (clean)";
-
- CPUData, desc="WB data from CPU";
- CPUDataShared, desc="WB data from CPU, NBReqShared 1";
- StaleWB, desc="Stale WB, No data";
-
- L2_Repl, desc="L2 Replacement";
-
- // Probes
- PrbInvData, desc="Invalidating probe, return dirty data";
- PrbInv, desc="Invalidating probe, no need to return data";
- PrbShrData, desc="Downgrading probe, return data";
-
- // Coming from Memory Controller
- WBAck, desc="ack from memory";
-
- CancelWB, desc="Cancel WB from L2";
- }
-
- // STATES
- state_declaration(State, desc="TCC State", default="TCC_State_I") {
- M, AccessPermission:Read_Write, desc="Modified"; // No other cache has copy, memory stale
- O, AccessPermission:Read_Only, desc="Owned"; // Correct most recent copy, others may exist in S
- E, AccessPermission:Read_Write, desc="Exclusive"; // Correct, most recent, and only copy (and == Memory)
- S, AccessPermission:Read_Only, desc="Shared"; // Correct, most recent. If no one in O, then == Memory
- I, AccessPermission:Invalid, desc="Invalid";
-
- I_M, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
- I_O, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
- I_E, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
- I_S, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
- S_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to M";
- S_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
- S_E, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to E";
- S_S, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to S";
- E_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
- E_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
- E_E, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
- E_S, AccessPermission:Busy, desc="Shared, received WrVicBlk, sent Ack, waiting for Data";
- O_M, AccessPermission:Busy, desc="...";
- O_O, AccessPermission:Busy, desc="...";
- O_E, AccessPermission:Busy, desc="...";
- M_M, AccessPermission:Busy, desc="...";
- M_O, AccessPermission:Busy, desc="...";
- M_E, AccessPermission:Busy, desc="...";
- M_S, AccessPermission:Busy, desc="...";
- D_I, AccessPermission:Invalid, desc="drop WB data on the floor when receive";
- MOD_I, AccessPermission:Busy, desc="drop WB data on the floor, waiting for WBAck from Mem";
- MO_I, AccessPermission:Busy, desc="M or O, received L2_Repl, waiting for WBAck from Mem";
- ES_I, AccessPermission:Busy, desc="E or S, received L2_Repl, waiting for WBAck from Mem";
- I_C, AccessPermission:Invalid, desc="sent cancel, just waiting to receive mem wb ack so nothing gets confused";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
-
- // STRUCTURES
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff from memory?)";
- DataBlock DataBlk, desc="Data for the block";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, desc="Is the data dirty?";
- bool Shared, desc="Victim hit by shared probe";
- MachineID From, desc="Waiting for writeback from...";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
-
-
- // FUNCTION DEFINITIONS
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(addr));
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- return getCacheEntry(addr).DataBlk;
- }
-
- bool presentOrAvail(Addr addr) {
- return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return TCC_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return TCC_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(TCC_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
-
-
- // OUT PORTS
- out_port(w_requestNetwork_out, CPURequestMsg, w_reqToTCCDir);
- out_port(w_TCCResp_out, ResponseMsg, w_respToTCCDir);
- out_port(responseNetwork_out, ResponseMsg, responseFromTCC);
- out_port(w_unblockNetwork_out, UnblockMsg, w_TCCUnblockToTCCDir);
-
- // IN PORTS
- in_port(TDResponse_in, ResponseMsg, w_respToTCC) {
- if (TDResponse_in.isReady(clockEdge())) {
- peek(TDResponse_in, ResponseMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
- trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
- }
- else {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- error("Error on TDResponse Type");
- }
- }
- }
- }
-
- // Response Network
- in_port(responseNetwork_in, ResponseMsg, responseToTCC) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:CPUData) {
- if (in_msg.NbReqShared) {
- trigger(Event:CPUDataShared, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:CPUData, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
- trigger(Event:StaleWB, in_msg.addr, cache_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- error("Error on TDResponse Type");
- }
- }
- }
- }
-
- // probe network
- in_port(probeNetwork_in, TDProbeRequestMsg, w_probeToTCC) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, TDProbeRequestMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
- } else {
- error("Don't think I should get any of these");
- }
- }
- }
- }
- }
-
- // Request Network
- in_port(requestNetwork_in, CPURequestMsg, w_reqToTCC) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, CPURequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
- trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (presentOrAvail(in_msg.addr)) {
- if (in_msg.Shared) {
- trigger(Event:ClVicBlkShared, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:ClVicBlk, in_msg.addr, cache_entry, tbe);
- }
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.addr);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
- if (presentOrAvail(in_msg.addr)) {
- if (in_msg.Shared) {
- trigger(Event:WrVicBlkShared, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
- }
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.addr);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
- }
- }
- }
-
- // BEGIN ACTIONS
-
- action(i_invL2, "i", desc="invalidate TCC cache block") {
- if (is_valid(cache_entry)) {
- L2cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(rm_sendResponseM, "rm", desc="send Modified response") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.State := CoherenceState:Modified;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(rs_sendResponseS, "rs", desc="send Shared response") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.State := CoherenceState:Shared;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
-
- action(r_requestToTD, "r", desc="Miss in L2, pass on") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Shared := false; // unneeded for this request
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- if (is_valid(cache_entry)) {
- tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
- tbe.Dirty := cache_entry.Dirty;
- }
- tbe.From := machineID;
- }
-
- action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(vc_vicClean, "vc", desc="Victimize Clean L2 data") {
- enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:VicClean;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(vd_vicDirty, "vd", desc="Victimize dirty L2 data") {
- enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:VicDirty;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(w_sendResponseWBAck, "w", desc="send WB Ack") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBAck;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(ph_sendProbeResponseHit, "ph", desc="send probe ack, no data") {
- enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Hit := true;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pm_sendProbeResponseMiss, "pm", desc="send probe ack, no data") {
- enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
- enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := cache_entry.DataBlk;
- //assert(cache_entry.Dirty); Not needed in TCC where TCC can supply clean data
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
- enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := tbe.DataBlk;
- //assert(tbe.Dirty);
- out_msg.Dirty := tbe.Dirty;
- out_msg.Hit := true;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.State := CoherenceState:NA;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(mc_cancelMemWriteback, "mc", desc="send writeback cancel to memory") {
- enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WrCancel;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(a_allocateBlock, "a", desc="allocate TCC block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L2cache.allocate(address, new Entry));
- }
- }
-
- action(d_writeData, "d", desc="write data to TCC") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty) {
- cache_entry.Dirty := in_msg.Dirty;
- }
- cache_entry.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
- }
- }
-
- action(rd_copyDataFromRequest, "rd", desc="write data to TCC") {
- peek(requestNetwork_in, CPURequestMsg) {
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := true;
- }
- }
-
- action(f_setFrom, "f", desc="set who WB is expected to come from") {
- peek(requestNetwork_in, CPURequestMsg) {
- tbe.From := in_msg.Requestor;
- }
- }
-
- action(rf_resetFrom, "rf", desc="reset From") {
- tbe.From := machineID;
- }
-
- action(wb_data, "wb", desc="write back data") {
- enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUData;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Shared) {
- out_msg.NbReqShared := true;
- } else {
- out_msg.NbReqShared := false;
- }
- out_msg.State := CoherenceState:Shared; // faux info
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(wt_writeDataToTBE, "wt", desc="write WB data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- }
- }
-
- action(uo_sendUnblockOwner, "uo", desc="state changed to E, M, or O, unblock") {
- enqueue(w_unblockNetwork_out, UnblockMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- out_msg.currentOwner := true;
- out_msg.valid := true;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(us_sendUnblockSharer, "us", desc="state changed to S , unblock") {
- enqueue(w_unblockNetwork_out, UnblockMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- out_msg.currentOwner := false;
- out_msg.valid := true;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(un_sendUnblockNotValid, "un", desc="state changed toI, unblock") {
- enqueue(w_unblockNetwork_out, UnblockMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- out_msg.currentOwner := false;
- out_msg.valid := false;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
- L2cache.setMRU(address);
- }
-
- action(p_popRequestQueue, "p", desc="pop request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(pn_popTDResponseQueue, "pn", desc="pop TD response queue") {
- TDResponse_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(zz_recycleRequestQueue, "\z", desc="recycle request queue") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
-
- // END ACTIONS
-
- // BEGIN TRANSITIONS
-
- // transitions from base
-
- transition({I, I_C}, {RdBlk, RdBlkS, RdBlkM, CtoD}){TagArrayRead} {
- // TCCdir already knows that the block is not here. This is to allocate and get the block.
- r_requestToTD;
- p_popRequestQueue;
- }
-
-// check
- transition({M, O}, RdBlk, O){TagArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- // detect 2nd chancing
- p_popRequestQueue;
- }
-
-//check
- transition({E, S}, RdBlk, S){TagArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- // detect 2nd chancing
- p_popRequestQueue;
- }
-
-// check
- transition({M, O}, RdBlkS, O){TagArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- // detect 2nd chance sharing
- p_popRequestQueue;
- }
-
-//check
- transition({E, S}, RdBlkS, S){TagArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- // detect 2nd chance sharing
- p_popRequestQueue;
- }
-
-// check
- transition(M, RdBlkM, I){TagArrayRead, TagArrayWrite} {
- rm_sendResponseM;
- i_invL2;
- p_popRequestQueue;
- }
-
- //check
- transition(E, RdBlkM, I){TagArrayRead, TagArrayWrite} {
- rm_sendResponseM;
- i_invL2;
- p_popRequestQueue;
- }
-
-// check
- transition({I}, WrVicBlk, I_M){TagArrayRead} {
- a_allocateBlock;
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(I_C, {WrVicBlk, WrVicBlkShared, ClVicBlk, ClVicBlkShared}) {
- zz_recycleRequestQueue;
- }
-
-//check
- transition({I}, WrVicBlkShared, I_O) {TagArrayRead}{
- a_allocateBlock;
- t_allocateTBE;
- f_setFrom;
-// rd_copyDataFromRequest;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-//check
- transition(S, WrVicBlkShared, S_O){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(S, WrVicBlk, S_S){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(E, WrVicBlk, E_E){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(E, WrVicBlkShared, E_E){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(O, WrVicBlk, O_O){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(O, WrVicBlkShared, O_O){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(M, WrVicBlk, M_M){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(M, WrVicBlkShared, M_O){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-//check
- transition({I}, ClVicBlk, I_E){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- a_allocateBlock;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition({I}, ClVicBlkShared, I_S){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- a_allocateBlock;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-//check
- transition(S, ClVicBlkShared, S_S){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(E, ClVicBlk, E_E){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(E, ClVicBlkShared, E_S){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(O, ClVicBlk, O_O){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// check. Original L3 ahd it going from O to O_S. Something can go from O to S only on writeback.
- transition(O, ClVicBlkShared, O_O){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(M, ClVicBlk, M_E){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-// a stale writeback
- transition(M, ClVicBlkShared, M_S){TagArrayRead} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
-
- transition({MO_I}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {
- a_allocateBlock;
- t_allocateTBE;
- f_setFrom;
- r_requestToTD;
- p_popRequestQueue;
- }
-
- transition(MO_I, {WrVicBlkShared, WrVicBlk, ClVicBlk, ClVicBlkShared}, MOD_I) {
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(I_M, CPUData, M){TagArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_M, CPUDataShared, O){TagArrayWrite, DataArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_O, {CPUData, CPUDataShared}, O){TagArrayWrite, DataArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_E, CPUData, E){TagArrayWrite, DataArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_E, CPUDataShared, S){TagArrayWrite, DataArrayWrite} {
- us_sendUnblockSharer;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_S, {CPUData, CPUDataShared}, S){TagArrayWrite, DataArrayWrite} {
- us_sendUnblockSharer;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(S_M, CPUDataShared, O){TagArrayWrite, DataArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(S_O, {CPUData, CPUDataShared}, O){TagArrayWrite, DataArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(S_E, CPUDataShared, S){TagArrayWrite, DataArrayWrite} {
- us_sendUnblockSharer;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(S_S, {CPUData, CPUDataShared}, S){TagArrayWrite, DataArrayWrite} {
- us_sendUnblockSharer;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(O_E, CPUDataShared, O){TagArrayWrite, DataArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(O_O, {CPUData, CPUDataShared}, O){TagArrayWrite, DataArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition({D_I}, {CPUData, CPUDataShared}, I){TagArrayWrite} {
- un_sendUnblockNotValid;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(MOD_I, {CPUData, CPUDataShared}, MO_I) {
- un_sendUnblockNotValid;
- rf_resetFrom;
- pr_popResponseQueue;
- }
-
- transition({O,S,I}, CPUData) {
- pr_popResponseQueue;
- }
-
- transition({M, O}, L2_Repl, MO_I){TagArrayRead, DataArrayRead} {
- t_allocateTBE;
- vd_vicDirty;
- i_invL2;
- }
-
- transition({E, S,}, L2_Repl, ES_I){TagArrayRead, DataArrayRead} {
- t_allocateTBE;
- vc_vicClean;
- i_invL2;
- }
-
- transition({I_M, I_O, S_M, S_O, E_M, E_O}, L2_Repl) {
- zz_recycleRequestQueue;
- }
-
- transition({O_M, O_O, O_E, M_M, M_O, M_E, M_S}, L2_Repl) {
- zz_recycleRequestQueue;
- }
-
- transition({I_E, I_S, S_E, S_S, E_E, E_S}, L2_Repl) {
- zz_recycleRequestQueue;
- }
-
- transition({M, O}, PrbInvData, I){TagArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- i_invL2;
- pp_popProbeQueue;
- }
-
- transition(I, PrbInvData){TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({E, S}, PrbInvData, I){TagArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- i_invL2;
- pp_popProbeQueue;
- }
-
- transition({M, O, E, S, I}, PrbInv, I){TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- i_invL2; // nothing will happen in I
- pp_popProbeQueue;
- }
-
- transition({M, O}, PrbShrData, O){TagArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({E, S}, PrbShrData, S){TagArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition(I, PrbShrData){TagArrayRead} {
- pm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInvData, I_C) {
- pdt_sendProbeResponseDataFromTBE;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbInvData, I_C) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({ES_I,MO_I}, PrbInv, I_C) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({ES_I, MO_I}, PrbShrData) {
- pdt_sendProbeResponseDataFromTBE;
- pp_popProbeQueue;
- }
-
- transition(I_C, {PrbInvData, PrbInv}) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbShrData) {
- pm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(MOD_I, WBAck, D_I) {
- pn_popTDResponseQueue;
- }
-
- transition(MO_I, WBAck, I){TagArrayWrite} {
- dt_deallocateTBE;
- pn_popTDResponseQueue;
- }
-
- // this can only be a spurious CPUData from a shared block.
- transition(MO_I, CPUData) {
- pr_popResponseQueue;
- }
-
- transition(ES_I, WBAck, I){TagArrayWrite} {
- dt_deallocateTBE;
- pn_popTDResponseQueue;
- }
-
- transition(I_C, {WBAck}, I){TagArrayWrite} {
- dt_deallocateTBE;
- pn_popTDResponseQueue;
- }
-
- transition({I_M, I_O, I_E, I_S}, StaleWB, I){TagArrayWrite} {
- un_sendUnblockNotValid;
- dt_deallocateTBE;
- i_invL2;
- pr_popResponseQueue;
- }
-
- transition({S_S, S_O, S_M, S_E}, StaleWB, S){TagArrayWrite} {
- us_sendUnblockSharer;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition({E_M, E_O, E_E, E_S}, StaleWB, E){TagArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition({O_M, O_O, O_E}, StaleWB, O){TagArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition({M_M, M_O, M_E, M_S}, StaleWB, M){TagArrayWrite} {
- uo_sendUnblockOwner;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(D_I, StaleWB, I) {TagArrayWrite}{
- un_sendUnblockNotValid;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(MOD_I, StaleWB, MO_I) {
- un_sendUnblockNotValid;
- rf_resetFrom;
- pr_popResponseQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Mithuna Thottethodi
- */
-
-machine(MachineType:TCCdir, "AMD read-for-ownership directory for TCC (aka GPU L2)")
-: CacheMemory * directory;
- // Convention: wire buffers are prefixed with "w_" for clarity
- WireBuffer * w_reqToTCCDir;
- WireBuffer * w_respToTCCDir;
- WireBuffer * w_TCCUnblockToTCCDir;
- WireBuffer * w_reqToTCC;
- WireBuffer * w_probeToTCC;
- WireBuffer * w_respToTCC;
- int TCC_select_num_bits;
- Cycles response_latency := 5;
- Cycles directory_latency := 6;
- Cycles issue_latency := 120;
-
- // From the TCPs or SQCs
- MessageBuffer * requestFromTCP, network="From", virtual_network="1", vnet_type="request";
- MessageBuffer * responseFromTCP, network="From", virtual_network="3", vnet_type="response";
- MessageBuffer * unblockFromTCP, network="From", virtual_network="5", vnet_type="unblock";
-
- // To the Cores. TCC deals only with TCPs/SQCs. CP cores do not communicate directly with TCC.
- MessageBuffer * probeToCore, network="To", virtual_network="1", vnet_type="request";
- MessageBuffer * responseToCore, network="To", virtual_network="3", vnet_type="response";
-
- // From the NB
- MessageBuffer * probeFromNB, network="From", virtual_network="0", vnet_type="request";
- MessageBuffer * responseFromNB, network="From", virtual_network="2", vnet_type="response";
- // To the NB
- MessageBuffer * requestToNB, network="To", virtual_network="0", vnet_type="request";
- MessageBuffer * responseToNB, network="To", virtual_network="2", vnet_type="response";
- MessageBuffer * unblockToNB, network="To", virtual_network="4", vnet_type="unblock";
-
- MessageBuffer * triggerQueue, random="false";
-{
- // STATES
- state_declaration(State, desc="Directory states", default="TCCdir_State_I") {
- // Base states
- I, AccessPermission:Invalid, desc="Invalid";
- S, AccessPermission:Invalid, desc="Shared";
- E, AccessPermission:Invalid, desc="Shared";
- O, AccessPermission:Invalid, desc="Owner";
- M, AccessPermission:Invalid, desc="Modified";
-
- CP_I, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to invalid";
- B_I, AccessPermission:Invalid, desc="Blocked, need not send data after acks are in, going to invalid";
- CP_O, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to owned";
- CP_S, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to shared";
- CP_OM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to O_M";
- CP_SM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to S_M";
- CP_ISM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to I_M";
- CP_IOM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to I_M";
- CP_OSIW, AccessPermission:Invalid, desc="Blocked, must send data after acks+CancelWB are in, going to I_C";
-
-
- // Transient states and busy states used for handling side (TCC-facing) interactions
- BW_S, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
- BW_E, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
- BW_O, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
- BW_M, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
-
- // Transient states and busy states used for handling upward (TCP-facing) interactions
- I_M, AccessPermission:Invalid, desc="Invalid, issued RdBlkM, have not seen response yet";
- I_ES, AccessPermission:Invalid, desc="Invalid, issued RdBlk, have not seen response yet";
- I_S, AccessPermission:Invalid, desc="Invalid, issued RdBlkS, have not seen response yet";
- BBS_S, AccessPermission:Invalid, desc="Blocked, going from S to S";
- BBO_O, AccessPermission:Invalid, desc="Blocked, going from O to O";
- BBM_M, AccessPermission:Invalid, desc="Blocked, going from M to M, waiting for data to forward";
- BBM_O, AccessPermission:Invalid, desc="Blocked, going from M to O, waiting for data to forward";
- BB_M, AccessPermission:Invalid, desc="Blocked, going from M to M, waiting for unblock";
- BB_O, AccessPermission:Invalid, desc="Blocked, going from M to O, waiting for unblock";
- BB_OO, AccessPermission:Invalid, desc="Blocked, going from O to O (adding sharers), waiting for unblock";
- BB_S, AccessPermission:Invalid, desc="Blocked, going to S, waiting for (possible multiple) unblock(s)";
- BBS_M, AccessPermission:Invalid, desc="Blocked, going from S or O to M";
- BBO_M, AccessPermission:Invalid, desc="Blocked, going from S or O to M";
- BBS_UM, AccessPermission:Invalid, desc="Blocked, going from S or O to M via upgrade";
- BBO_UM, AccessPermission:Invalid, desc="Blocked, going from S or O to M via upgrade";
- S_M, AccessPermission:Invalid, desc="Shared, issued CtoD, have not seen response yet";
- O_M, AccessPermission:Invalid, desc="Shared, issued CtoD, have not seen response yet";
-
- //
- BBB_S, AccessPermission:Invalid, desc="Blocked, going to S after core unblock";
- BBB_M, AccessPermission:Invalid, desc="Blocked, going to M after core unblock";
- BBB_E, AccessPermission:Invalid, desc="Blocked, going to E after core unblock";
-
- VES_I, AccessPermission:Invalid, desc="TCC replacement, waiting for clean WB ack";
- VM_I, AccessPermission:Invalid, desc="TCC replacement, waiting for dirty WB ack";
- VO_I, AccessPermission:Invalid, desc="TCC replacement, waiting for dirty WB ack";
- VO_S, AccessPermission:Invalid, desc="TCC owner replacement, waiting for dirty WB ack";
-
- ES_I, AccessPermission:Invalid, desc="L1 replacement, waiting for clean WB ack";
- MO_I, AccessPermission:Invalid, desc="L1 replacement, waiting for dirty WB ack";
-
- I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from NB for canceled WB";
- I_W, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from NB; canceled WB raced with directory invalidation";
-
- // Recall States
- BRWD_I, AccessPermission:Invalid, desc="Recalling, waiting for WBAck and Probe Data responses";
- BRW_I, AccessPermission:Read_Write, desc="Recalling, waiting for WBAck";
- BRD_I, AccessPermission:Invalid, desc="Recalling, waiting for Probe Data responses";
-
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
-
-
- // EVENTS
- enumeration(Event, desc="TCC Directory Events") {
- // Upward facing events (TCCdir w.r.t. TCP/SQC and TCC behaves like NBdir behaves with TCP/SQC and L3
-
- // Directory Recall
- Recall, desc="directory cache is full";
- // CPU requests
- CPUWrite, desc="Initial req from core, sent to TCC";
- NoCPUWrite, desc="Initial req from core, but non-exclusive clean data; can be discarded";
- CPUWriteCancel, desc="Initial req from core, sent to TCC";
-
- // Requests from the TCPs
- RdBlk, desc="RdBlk event";
- RdBlkM, desc="RdBlkM event";
- RdBlkS, desc="RdBlkS event";
- CtoD, desc="Change to Dirty request";
-
- // TCC writebacks
- VicDirty, desc="...";
- VicDirtyLast, desc="...";
- VicClean, desc="...";
- NoVic, desc="...";
- StaleVic, desc="...";
- CancelWB, desc="TCC got invalidating probe, canceled WB";
-
- // Probe Responses from TCP/SQCs
- CPUPrbResp, desc="Probe response from TCP/SQC";
- TCCPrbResp, desc="Probe response from TCC";
-
- ProbeAcksComplete, desc="All acks received";
- ProbeAcksCompleteReissue, desc="All acks received, changing CtoD to reissue";
-
- CoreUnblock, desc="unblock from TCP/SQC";
- LastCoreUnblock, desc="Last unblock from TCP/SQC";
- TCCUnblock, desc="unblock from TCC (current owner)";
- TCCUnblock_Sharer, desc="unblock from TCC (a sharer, not owner)";
- TCCUnblock_NotValid,desc="unblock from TCC (not valid...caused by stale writebacks)";
-
- // Downward facing events
-
- // NB initiated
- NB_AckS, desc="NB Ack to TCC Request";
- NB_AckE, desc="NB Ack to TCC Request";
- NB_AckM, desc="NB Ack to TCC Request";
- NB_AckCtoD, desc="NB Ack to TCC Request";
- NB_AckWB, desc="NB Ack for clean WB";
-
-
- // Incoming Probes from NB
- PrbInvData, desc="Invalidating probe, return dirty data";
- PrbInv, desc="Invalidating probe, no need to return data";
- PrbShrData, desc="Downgrading probe, return data";
- }
-
-
- // TYPES
-
- // Entry for directory
- structure(Entry, desc="...", interface='AbstractCacheEntry') {
- State CacheState, desc="Cache state (Cache of directory entries)";
- DataBlock DataBlk, desc="data for the block";
- NetDest Sharers, desc="Sharers for this block";
- NetDest Owner, desc="Owner of this block";
- NetDest MergedSharers, desc="Read sharers who are merged on a request";
- int WaitingUnblocks, desc="Number of acks we're waiting for";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="DataBlk";
- bool Dirty, desc="Is the data dirty?";
- MachineID Requestor, desc="requestor";
- int NumPendingAcks, desc="num acks expected";
- MachineID OriginalRequestor, desc="Original Requestor";
- MachineID UntransferredOwner, desc = "Untransferred owner for an upgrade transaction";
- bool UntransferredOwnerExists, desc = "1 if Untransferred owner exists for an upgrade transaction";
- bool Cached, desc="data hit in Cache";
- bool Shared, desc="victim hit by shared probe";
- bool Upgrade, desc="An upgrade request in progress";
- bool CtoD, desc="Saved sysack info";
- CoherenceState CohState, desc="Saved sysack info";
- MessageSizeType MessageSize, desc="Saved sysack info";
- MachineID Sender, desc="sender";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- // ** OBJECTS **
- TBETable TBEs, template="<TCCdir_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
- NetDest TCC_dir_subtree;
- NetDest temp;
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- bool presentOrAvail(Addr addr) {
- return directory.isTagPresent(addr) || directory.cacheAvail(addr);
- }
-
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", directory.lookup(addr));
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return tbe.DataBlk;
- } else {
- assert(false);
- return getCacheEntry(addr).DataBlk;
- }
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(TCCdir_State_to_permission(state));
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return TCCdir_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return TCCdir_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
-
- if (state == State:S) {
- assert(cache_entry.Owner.count() == 0);
- }
-
- if (state == State:O) {
- assert(cache_entry.Owner.count() == 1);
- assert(cache_entry.Sharers.isSuperset(cache_entry.Owner) == false);
- }
-
- if (state == State:M) {
- assert(cache_entry.Owner.count() == 1);
- assert(cache_entry.Sharers.count() == 0);
- }
-
- if (state == State:E) {
- assert(cache_entry.Owner.count() == 0);
- assert(cache_entry.Sharers.count() == 1);
- }
- }
- }
-
-
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- directory.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- directory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- directory.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- directory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return directory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return directory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return directory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return directory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- // ** OUT_PORTS **
-
- // Three classes of ports
- // Class 1: downward facing network links to NB
- out_port(requestToNB_out, CPURequestMsg, requestToNB);
- out_port(responseToNB_out, ResponseMsg, responseToNB);
- out_port(unblockToNB_out, UnblockMsg, unblockToNB);
-
-
- // Class 2: upward facing ports to GPU cores
- out_port(probeToCore_out, TDProbeRequestMsg, probeToCore);
- out_port(responseToCore_out, ResponseMsg, responseToCore);
-
- // Class 3: sideward facing ports (on "wirebuffer" links) to TCC
- out_port(w_requestTCC_out, CPURequestMsg, w_reqToTCC);
- out_port(w_probeTCC_out, NBProbeRequestMsg, w_probeToTCC);
- out_port(w_respTCC_out, ResponseMsg, w_respToTCC);
-
-
- // local trigger port
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- //
- // request queue going to NB
- //
-
- // ** IN_PORTS **
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=8) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- assert(is_valid(tbe));
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if ((in_msg.Type == TriggerType:AcksComplete) && (tbe.Upgrade == false)) {
- trigger(Event:ProbeAcksComplete, in_msg.addr, cache_entry, tbe);
- } else if ((in_msg.Type == TriggerType:AcksComplete) && (tbe.Upgrade == true)) {
- trigger(Event:ProbeAcksCompleteReissue, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
- // Unblock Networks (TCCdir can receive unblocks from TCC, TCPs)
- // Port on first (of three) wire buffers from TCC
- in_port(w_TCCUnblock_in, UnblockMsg, w_TCCUnblockToTCCDir, rank=7) {
- if (w_TCCUnblock_in.isReady(clockEdge())) {
- peek(w_TCCUnblock_in, UnblockMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.currentOwner) {
- trigger(Event:TCCUnblock, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.valid) {
- trigger(Event:TCCUnblock_Sharer, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:TCCUnblock_NotValid, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
- in_port(unblockNetwork_in, UnblockMsg, unblockFromTCP, rank=6) {
- if (unblockNetwork_in.isReady(clockEdge())) {
- peek(unblockNetwork_in, UnblockMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if(cache_entry.WaitingUnblocks == 1) {
- trigger(Event:LastCoreUnblock, in_msg.addr, cache_entry, tbe);
- }
- else {
- trigger(Event:CoreUnblock, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
-
- //Responses from TCC, and Cores
- // Port on second (of three) wire buffers from TCC
- in_port(w_TCCResponse_in, ResponseMsg, w_respToTCCDir, rank=5) {
- if (w_TCCResponse_in.isReady(clockEdge())) {
- peek(w_TCCResponse_in, ResponseMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
- trigger(Event:TCCPrbResp, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseFromTCP, rank=4) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
- trigger(Event:CPUPrbResp, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
-
- // Port on third (of three) wire buffers from TCC
- in_port(w_TCCRequest_in, CPURequestMsg, w_reqToTCCDir, rank=3) {
- if(w_TCCRequest_in.isReady(clockEdge())) {
- peek(w_TCCRequest_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceRequestType:WrCancel) {
- trigger(Event:CancelWB, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
- if (is_valid(cache_entry) && cache_entry.Owner.isElement(in_msg.Requestor)) {
- // if modified, or owner with no other sharers
- if ((cache_entry.CacheState == State:M) || (cache_entry.Sharers.count() == 0)) {
- assert(cache_entry.Owner.count()==1);
- trigger(Event:VicDirtyLast, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:VicDirty, in_msg.addr, cache_entry, tbe);
- }
- } else {
- trigger(Event:StaleVic, in_msg.addr, cache_entry, tbe);
- }
- } else {
- if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (is_valid(cache_entry) && cache_entry.Sharers.isElement(in_msg.Requestor)) {
- if (cache_entry.Sharers.count() == 1) {
- // Last copy, victimize to L3
- trigger(Event:VicClean, in_msg.addr, cache_entry, tbe);
- } else {
- // Either not the last copy or stall. No need to victimmize
- // remove sharer from sharer list
- assert(cache_entry.Sharers.count() > 1);
- trigger(Event:NoVic, in_msg.addr, cache_entry, tbe);
- }
- } else {
- trigger(Event:StaleVic, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
- }
- }
-
- in_port(responseFromNB_in, ResponseMsg, responseFromNB, rank=2) {
- if (responseFromNB_in.isReady(clockEdge())) {
- peek(responseFromNB_in, ResponseMsg, block_on="addr") {
-
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:NBSysResp) {
- if (in_msg.State == CoherenceState:Modified) {
- if (in_msg.CtoD) {
- trigger(Event:NB_AckCtoD, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:NB_AckM, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.State == CoherenceState:Shared) {
- trigger(Event:NB_AckS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.State == CoherenceState:Exclusive) {
- trigger(Event:NB_AckE, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
- trigger(Event:NB_AckWB, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- // Finally handling incoming requests (from TCP) and probes (from NB).
-
- in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB, rank=1) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- DPRINTF(RubySlicc, "machineID: %s\n", machineID);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- assert(in_msg.ReturnData);
- trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
-
- in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
- if (coreRequestNetwork_in.isReady(clockEdge())) {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (presentOrAvail(in_msg.addr)) {
- if (in_msg.Type == CoherenceRequestType:VicDirty) {
- trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (is_valid(cache_entry) && cache_entry.Owner.isElement(in_msg.Requestor)) {
- trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
- } else if(is_valid(cache_entry) && (cache_entry.Sharers.count() + cache_entry.Owner.count() ) >1) {
- trigger(Event:NoCPUWrite, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
- trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WrCancel) {
- trigger(Event:CPUWriteCancel, in_msg.addr, cache_entry, tbe);
- }
- } else {
- // All requests require a directory entry
- Addr victim := directory.cacheProbe(in_msg.addr);
- trigger(Event:Recall, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- }
- }
- }
-
-
-
-
- // Actions
-
- //Downward facing actions
-
- action(c_clearOwner, "c", desc="Clear the owner field") {
- cache_entry.Owner.clear();
- }
-
- action(rS_removeRequesterFromSharers, "rS", desc="Remove unblocker from sharer list") {
- peek(unblockNetwork_in, UnblockMsg) {
- cache_entry.Sharers.remove(in_msg.Sender);
- }
- }
-
- action(rT_removeTCCFromSharers, "rT", desc="Remove TCC from sharer list") {
- peek(w_TCCRequest_in, CPURequestMsg) {
- cache_entry.Sharers.remove(in_msg.Requestor);
- }
- }
-
- action(rO_removeOriginalRequestorFromSharers, "rO", desc="Remove replacing core from sharer list") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- cache_entry.Sharers.remove(in_msg.Requestor);
- }
- }
-
- action(rC_removeCoreFromSharers, "rC", desc="Remove replacing core from sharer list") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- cache_entry.Sharers.remove(in_msg.Requestor);
- }
- }
-
- action(rCo_removeCoreFromOwner, "rCo", desc="Remove replacing core from sharer list") {
- // Note that under some cases this action will try to remove a stale owner
- peek(coreRequestNetwork_in, CPURequestMsg) {
- cache_entry.Owner.remove(in_msg.Requestor);
- }
- }
-
- action(rR_removeResponderFromSharers, "rR", desc="Remove responder from sharer list") {
- peek(responseNetwork_in, ResponseMsg) {
- cache_entry.Sharers.remove(in_msg.Sender);
- }
- }
-
- action(nC_sendNullWBAckToCore, "nC", desc = "send a null WB Ack to release core") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(responseToCore_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBNack;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := in_msg.MessageSize;
- }
- }
- }
-
- action(nT_sendNullWBAckToTCC, "nT", desc = "send a null WB Ack to release TCC") {
- peek(w_TCCRequest_in, CPURequestMsg) {
- enqueue(w_respTCC_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBAck;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := in_msg.MessageSize;
- }
- }
- }
-
- action(eto_moveExSharerToOwner, "eto", desc="move the current exclusive sharer to owner") {
- assert(cache_entry.Sharers.count() == 1);
- assert(cache_entry.Owner.count() == 0);
- cache_entry.Owner := cache_entry.Sharers;
- cache_entry.Sharers.clear();
- APPEND_TRANSITION_COMMENT(" new owner ");
- APPEND_TRANSITION_COMMENT(cache_entry.Owner);
- }
-
- action(aT_addTCCToSharers, "aT", desc="Add TCC to sharer list") {
- peek(w_TCCUnblock_in, UnblockMsg) {
- cache_entry.Sharers.add(in_msg.Sender);
- }
- }
-
- action(as_addToSharers, "as", desc="Add unblocker to sharer list") {
- peek(unblockNetwork_in, UnblockMsg) {
- cache_entry.Sharers.add(in_msg.Sender);
- }
- }
-
- action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
- cache_entry.Sharers.addNetDest(cache_entry.Owner);
- cache_entry.Owner.clear();
- }
-
- action(cc_clearSharers, "\c", desc="Clear the sharers field") {
- cache_entry.Sharers.clear();
- }
-
- action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
- peek(unblockNetwork_in, UnblockMsg) {
- cache_entry.Owner.clear();
- cache_entry.Owner.add(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" tcp_ub owner ");
- APPEND_TRANSITION_COMMENT(cache_entry.Owner);
- }
- }
-
- action(eT_ownerIsUnblocker, "eT", desc="TCC (unblocker) is now owner") {
- peek(w_TCCUnblock_in, UnblockMsg) {
- cache_entry.Owner.clear();
- cache_entry.Owner.add(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" tcc_ub owner ");
- APPEND_TRANSITION_COMMENT(cache_entry.Owner);
- }
- }
-
- action(ctr_copyTCCResponseToTBE, "ctr", desc="Copy TCC probe response data to TBE") {
- peek(w_TCCResponse_in, ResponseMsg) {
- // Overwrite data if tbe does not hold dirty data. Stop once it is dirty.
- if(tbe.Dirty == false) {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- tbe.Sender := in_msg.Sender;
- }
- DPRINTF(RubySlicc, "%s\n", (tbe.DataBlk));
- }
- }
-
- action(ccr_copyCoreResponseToTBE, "ccr", desc="Copy core probe response data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- // Overwrite data if tbe does not hold dirty data. Stop once it is dirty.
- if(tbe.Dirty == false) {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
-
- if(tbe.Sender == machineID) {
- tbe.Sender := in_msg.Sender;
- }
- }
- DPRINTF(RubySlicc, "%s\n", (tbe.DataBlk));
- }
- }
-
- action(cd_clearDirtyBitTBE, "cd", desc="Clear Dirty bit in TBE") {
- tbe.Dirty := false;
- }
-
- action(n_issueRdBlk, "n-", desc="Issue RdBlk") {
- enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlk;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
- enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
- enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkM;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(rU_rememberUpgrade, "rU", desc="Remember that this was an upgrade") {
- tbe.Upgrade := true;
- }
-
- action(ruo_rememberUntransferredOwner, "ruo", desc="Remember the untransferred owner") {
- peek(responseNetwork_in, ResponseMsg) {
- if(in_msg.UntransferredOwner == true) {
- tbe.UntransferredOwner := in_msg.Sender;
- tbe.UntransferredOwnerExists := true;
- }
- DPRINTF(RubySlicc, "%s\n", (in_msg));
- }
- }
-
- action(ruoT_rememberUntransferredOwnerTCC, "ruoT", desc="Remember the untransferred owner") {
- peek(w_TCCResponse_in, ResponseMsg) {
- if(in_msg.UntransferredOwner == true) {
- tbe.UntransferredOwner := in_msg.Sender;
- tbe.UntransferredOwnerExists := true;
- }
- DPRINTF(RubySlicc, "%s\n", (in_msg));
- }
- }
-
- action(vd_victim, "vd", desc="Victimize M/O Data") {
- enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicDirty;
- if (cache_entry.CacheState == State:O) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- out_msg.Dirty := true;
- }
- }
-
- action(vc_victim, "vc", desc="Victimize E/S Data") {
- enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicClean;
- if (cache_entry.CacheState == State:S) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- out_msg.Dirty := false;
- }
- }
-
-
- action(sT_sendRequestToTCC, "sT", desc="send request to TCC") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(w_requestTCC_out, CPURequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- }
- APPEND_TRANSITION_COMMENT(" requestor ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
-
- }
- }
-
-
- action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
-
- temp := cache_entry.Sharers;
- temp.addNetDest(cache_entry.Owner);
- if (temp.isElement(tcc)) {
- temp.remove(tcc);
- }
- if (temp.count() > 0) {
- enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination := temp;
- tbe.NumPendingAcks := temp.count();
- if(cache_entry.CacheState == State:M) {
- assert(tbe.NumPendingAcks == 1);
- }
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- }
- }
- }
-
- action(ls2_probeShrL2Data, "ls2", desc="local probe downgrade L2, return data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
- enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(tcc);
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- DPRINTF(RubySlicc, "%s\n", out_msg);
-
- }
- }
- }
-
- action(s2_probeShrL2Data, "s2", desc="probe shared L2, return data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
- enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(tcc);
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- DPRINTF(RubySlicc, "%s\n", out_msg);
-
- }
- }
- }
-
- action(ldc_probeInvCoreData, "ldc", desc="local probe to inv cores, return data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- peek(coreRequestNetwork_in, CPURequestMsg) {
- NetDest dest:= cache_entry.Sharers;
- dest.addNetDest(cache_entry.Owner);
- if(dest.isElement(tcc)){
- dest.remove(tcc);
- }
- dest.remove(in_msg.Requestor);
- tbe.NumPendingAcks := dest.count();
- if (dest.count()>0){
- enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
-
- out_msg.Destination.addNetDest(dest);
- if(cache_entry.CacheState == State:M) {
- assert(tbe.NumPendingAcks == 1);
- }
-
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- }
- }
- }
- }
-
- action(ld2_probeInvL2Data, "ld2", desc="local probe inv L2, return data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
- enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(tcc);
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- DPRINTF(RubySlicc, "%s\n", out_msg);
-
- }
- }
- }
-
- action(dc_probeInvCoreData, "dc", desc="probe inv cores + TCC, return data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
-
- out_msg.Destination.addNetDest(cache_entry.Sharers);
- out_msg.Destination.addNetDest(cache_entry.Owner);
- tbe.NumPendingAcks := cache_entry.Sharers.count() + cache_entry.Owner.count();
- if(cache_entry.CacheState == State:M) {
- assert(tbe.NumPendingAcks == 1);
- }
- if (out_msg.Destination.isElement(tcc)) {
- out_msg.Destination.remove(tcc);
- tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
- }
-
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- }
- }
-
- action(d2_probeInvL2Data, "d2", desc="probe inv L2, return data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
- enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(tcc);
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- DPRINTF(RubySlicc, "%s\n", out_msg);
-
- }
- }
- }
-
- action(lpc_probeInvCore, "lpc", desc="local probe inv cores, no data") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- TCC_dir_subtree.broadcast(MachineType:TCP);
- TCC_dir_subtree.broadcast(MachineType:SQC);
-
- temp := cache_entry.Sharers;
- temp := temp.OR(cache_entry.Owner);
- TCC_dir_subtree := TCC_dir_subtree.AND(temp);
- tbe.NumPendingAcks := TCC_dir_subtree.count();
- if(cache_entry.CacheState == State:M) {
- assert(tbe.NumPendingAcks == 1);
- }
- if(TCC_dir_subtree.isElement(in_msg.Requestor)) {
- TCC_dir_subtree.remove(in_msg.Requestor);
- tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
- }
-
- if(TCC_dir_subtree.count() > 0) {
- enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := false;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.localCtoD := true;
-
- out_msg.Destination.addNetDest(TCC_dir_subtree);
-
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- }
- }
- }
- }
-
- action(ipc_probeInvCore, "ipc", desc="probe inv cores, no data") {
- TCC_dir_subtree.broadcast(MachineType:TCP);
- TCC_dir_subtree.broadcast(MachineType:SQC);
-
- temp := cache_entry.Sharers;
- temp := temp.OR(cache_entry.Owner);
- TCC_dir_subtree := TCC_dir_subtree.AND(temp);
- tbe.NumPendingAcks := TCC_dir_subtree.count();
- if(TCC_dir_subtree.count() > 0) {
-
- enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := false;
- out_msg.MessageSize := MessageSizeType:Control;
-
- out_msg.Destination.addNetDest(TCC_dir_subtree);
- if(cache_entry.CacheState == State:M) {
- assert(tbe.NumPendingAcks == 1);
- }
-
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- }
- }
- }
-
- action(i2_probeInvL2, "i2", desc="probe inv L2, no data") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
- enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := false;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(tcc);
- DPRINTF(RubySlicc, "%s\n", out_msg);
-
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(responseToNB_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
- enqueue(responseToNB_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Ntsl := true;
- out_msg.Hit := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
- enqueue(responseToNB_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false; // only true if sending back data i think
- out_msg.Hit := false;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
-
-
- action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
- enqueue(responseToNB_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry) || is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := getDataBlock(address);
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- }
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
-
- action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
- enqueue(responseToNB_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry) || is_valid(tbe));
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := getDataBlock(address);
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- }
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(mc_cancelWB, "mc", desc="send writeback cancel to NB directory") {
- enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WrCancel;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Requestor := machineID;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(sCS_sendCollectiveResponseS, "sCS", desc="send shared response to all merged TCP/SQC") {
- enqueue(responseToCore_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := tbe.Sender;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.CtoD := false;
- out_msg.State := CoherenceState:Shared;
- out_msg.Destination.addNetDest(cache_entry.MergedSharers);
- out_msg.Shared := tbe.Shared;
- out_msg.Dirty := tbe.Dirty;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(sS_sendResponseS, "sS", desc="send shared response to TCP/SQC") {
- enqueue(responseToCore_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := tbe.Sender;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.CtoD := false;
- out_msg.State := CoherenceState:Shared;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.Shared := tbe.Shared;
- out_msg.Dirty := tbe.Dirty;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(sM_sendResponseM, "sM", desc="send response to TCP/SQC") {
- enqueue(responseToCore_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := tbe.Sender;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.CtoD := false;
- out_msg.State := CoherenceState:Modified;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.Shared := tbe.Shared;
- out_msg.Dirty := tbe.Dirty;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
-
-
- action(fw2_forwardWBAck, "fw2", desc="forward WBAck to TCC") {
- peek(responseFromNB_in, ResponseMsg) {
- if(tbe.OriginalRequestor != machineID) {
- enqueue(w_respTCC_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBAck;
- out_msg.Sender := machineID;
- //out_msg.DataBlk := tbe.DataBlk;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.MessageSize := in_msg.MessageSize;
- }
- }
- }
- }
-
- action(sa_saveSysAck, "sa", desc="Save SysAck ") {
- peek(responseFromNB_in, ResponseMsg) {
- tbe.Dirty := in_msg.Dirty;
- if (tbe.Dirty == false) {
- tbe.DataBlk := in_msg.DataBlk;
- }
- else {
- tbe.DataBlk := tbe.DataBlk;
- }
- tbe.CtoD := in_msg.CtoD;
- tbe.CohState := in_msg.State;
- tbe.Shared := in_msg.Shared;
- tbe.MessageSize := in_msg.MessageSize;
- }
- }
-
- action(fsa_forwardSavedAck, "fsa", desc="forward saved SysAck to TCP or SQC") {
- enqueue(responseToCore_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- if (tbe.Dirty == false) {
- out_msg.DataBlk := tbe.DataBlk;
- }
- else {
- out_msg.DataBlk := tbe.DataBlk;
- }
- out_msg.CtoD := tbe.CtoD;
- out_msg.State := tbe.CohState;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.Shared := tbe.Shared;
- out_msg.MessageSize := tbe.MessageSize;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Sender := tbe.Sender;
- }
- }
-
- action(fa_forwardSysAck, "fa", desc="forward SysAck to TCP or SQC") {
- peek(responseFromNB_in, ResponseMsg) {
- enqueue(responseToCore_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- if (tbe.Dirty == false) {
- out_msg.DataBlk := in_msg.DataBlk;
- tbe.Sender := machineID;
- }
- else {
- out_msg.DataBlk := tbe.DataBlk;
- }
- out_msg.CtoD := in_msg.CtoD;
- out_msg.State := in_msg.State;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.Sender := tbe.Sender;
- DPRINTF(RubySlicc, "%s\n", (out_msg.DataBlk));
- }
- }
- }
-
- action(pso_probeSharedDataOwner, "pso", desc="probe shared data at owner") {
- MachineID tcc := mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- if (cache_entry.Owner.isElement(tcc)) {
- enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(tcc);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- else { // i.e., owner is a core
- enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.addNetDest(cache_entry.Owner);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- tbe.NumPendingAcks := 1;
- }
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- coreRequestNetwork_in.dequeue(clockEdge());
- }
-
- action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue(clockEdge());
- }
-
- action(pk_popResponseQueue, "pk", desc="Pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="Pop incoming probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(pR_popResponseFromNBQueue, "pR", desc="Pop incoming Response queue From NB") {
- responseFromNB_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(pl_popTCCRequestQueue, "pl", desc="pop TCC request queue") {
- w_TCCRequest_in.dequeue(clockEdge());
- }
-
- action(plr_popTCCResponseQueue, "plr", desc="pop TCC response queue") {
- w_TCCResponse_in.dequeue(clockEdge());
- }
-
- action(plu_popTCCUnblockQueue, "plu", desc="pop TCC unblock queue") {
- w_TCCUnblock_in.dequeue(clockEdge());
- }
-
-
- action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
- peek(unblockNetwork_in, UnblockMsg) {
- cache_entry.Sharers.add(in_msg.Sender);
- cache_entry.MergedSharers.remove(in_msg.Sender);
- assert(cache_entry.WaitingUnblocks >= 0);
- cache_entry.WaitingUnblocks := cache_entry.WaitingUnblocks - 1;
- }
- }
-
- action(q_addOutstandingMergedSharer, "q", desc="Increment outstanding requests") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- cache_entry.MergedSharers.add(in_msg.Requestor);
- cache_entry.WaitingUnblocks := cache_entry.WaitingUnblocks + 1;
- }
- }
-
- action(uu_sendUnblock, "uu", desc="state changed, unblock") {
- enqueue(unblockToNB_out, UnblockMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
- coreRequestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(yy_recycleTCCRequestQueue, "yy", desc="recycle yy request queue") {
- w_TCCRequest_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(xz_recycleResponseQueue, "xz", desc="recycle response queue") {
- responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(xx_recycleTCCResponseQueue, "xx", desc="recycle TCC response queue") {
- w_TCCResponse_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(vv_recycleTCCUnblockQueue, "vv", desc="Recycle the probe request queue") {
- w_TCCUnblock_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(xy_recycleUnblockQueue, "xy", desc="Recycle the probe request queue") {
- w_TCCUnblock_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(ww_recycleProbeRequest, "ww", desc="Recycle the probe request queue") {
- probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(x_decrementAcks, "x", desc="decrement Acks pending") {
- tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
- }
-
- action(o_checkForAckCompletion, "o", desc="check for ack completion") {
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- APPEND_TRANSITION_COMMENT(" tbe acks ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(tp_allocateTBE, "tp", desc="allocate TBE Entry for upward transactions") {
- check_allocate(TBEs);
- peek(probeNetwork_in, NBProbeRequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.Dirty := false;
- tbe.NumPendingAcks := 0;
- tbe.UntransferredOwnerExists := false;
- }
- }
-
- action(tv_allocateTBE, "tv", desc="allocate TBE Entry for TCC transactions") {
- check_allocate(TBEs);
- peek(w_TCCRequest_in, CPURequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.DataBlk := in_msg.DataBlk; // Data only for WBs
- tbe.Dirty := false;
- tbe.OriginalRequestor := in_msg.Requestor;
- tbe.NumPendingAcks := 0;
- tbe.UntransferredOwnerExists := false;
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);//check whether resources are full
- peek(coreRequestNetwork_in, CPURequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
- tbe.Dirty := false;
- tbe.Upgrade := false;
- tbe.OriginalRequestor := in_msg.Requestor;
- tbe.NumPendingAcks := 0;
- tbe.UntransferredOwnerExists := false;
- tbe.Sender := machineID;
- }
- }
-
- action(tr_allocateTBE, "tr", desc="allocate TBE Entry for recall") {
- check_allocate(TBEs);//check whether resources are full
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
- tbe.Dirty := false;
- tbe.Upgrade := false;
- tbe.OriginalRequestor := machineID; //Recall request, Self initiated
- tbe.NumPendingAcks := 0;
- tbe.UntransferredOwnerExists := false;
- }
-
- action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
-
- action(d_allocateDir, "d", desc="allocate Directory Cache") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(directory.allocate(address, new Entry));
- }
- }
-
- action(dd_deallocateDir, "dd", desc="deallocate Directory Cache") {
- if (is_valid(cache_entry)) {
- directory.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
- enqueue(responseToNB_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:StaleNotif;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(wb_data, "wb", desc="write back data") {
- enqueue(responseToNB_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUData;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Shared) {
- out_msg.NbReqShared := true;
- } else {
- out_msg.NbReqShared := false;
- }
- out_msg.State := CoherenceState:Shared; // faux info
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
- assert(is_valid(tbe));
- tbe.Shared := true;
- }
-
- action(y_writeDataToTBE, "y", desc="write Probe Data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- if (!tbe.Dirty || in_msg.Dirty) {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- }
- if (in_msg.Hit) {
- tbe.Cached := true;
- }
- }
- }
-
- action(ty_writeTCCDataToTBE, "ty", desc="write TCC Probe Data to TBE") {
- peek(w_TCCResponse_in, ResponseMsg) {
- if (!tbe.Dirty || in_msg.Dirty) {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- }
- if (in_msg.Hit) {
- tbe.Cached := true;
- }
- }
- }
-
-
- action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
- directory.setMRU(address);
- }
-
- // TRANSITIONS
-
- // Handling TCP/SQC requests (similar to how NB dir handles TCC events with some changes to account for stateful directory).
-
-
- // transitions from base
- transition(I, RdBlk, I_ES){TagArrayRead} {
- d_allocateDir;
- t_allocateTBE;
- n_issueRdBlk;
- i_popIncomingRequestQueue;
- }
-
- transition(I, RdBlkS, I_S){TagArrayRead} {
- d_allocateDir;
- t_allocateTBE;
- nS_issueRdBlkS;
- i_popIncomingRequestQueue;
- }
-
-
- transition(I_S, NB_AckS, BBB_S) {
- fa_forwardSysAck;
- pR_popResponseFromNBQueue;
- }
-
- transition(I_ES, NB_AckS, BBB_S) {
- fa_forwardSysAck;
- pR_popResponseFromNBQueue;
- }
-
- transition(I_ES, NB_AckE, BBB_E) {
- fa_forwardSysAck;
- pR_popResponseFromNBQueue;
- }
-
- transition({S_M, O_M}, {NB_AckCtoD,NB_AckM}, BBB_M) {
- fa_forwardSysAck;
- pR_popResponseFromNBQueue;
- }
-
- transition(I_M, NB_AckM, BBB_M) {
- fa_forwardSysAck;
- pR_popResponseFromNBQueue;
- }
-
- transition(BBB_M, CoreUnblock, M){TagArrayWrite} {
- c_clearOwner;
- cc_clearSharers;
- e_ownerIsUnblocker;
- uu_sendUnblock;
- dt_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
- transition(BBB_S, CoreUnblock, S){TagArrayWrite} {
- as_addToSharers;
- uu_sendUnblock;
- dt_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
- transition(BBB_E, CoreUnblock, E){TagArrayWrite} {
- as_addToSharers;
- uu_sendUnblock;
- dt_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
-
- transition(I, RdBlkM, I_M){TagArrayRead} {
- d_allocateDir;
- t_allocateTBE;
- nM_issueRdBlkM;
- i_popIncomingRequestQueue;
- }
-
- //
- transition(S, {RdBlk, RdBlkS}, BBS_S){TagArrayRead} {
- t_allocateTBE;
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- q_addOutstandingMergedSharer;
- i_popIncomingRequestQueue;
- }
- // Merging of read sharing into a single request
- transition(BBS_S, {RdBlk, RdBlkS}) {
- q_addOutstandingMergedSharer;
- i_popIncomingRequestQueue;
- }
- // Wait for probe acks to be complete
- transition(BBS_S, CPUPrbResp) {
- ccr_copyCoreResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition(BBS_S, TCCPrbResp) {
- ctr_copyTCCResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- // Window for merging complete with this transition
- // Send responses to all outstanding
- transition(BBS_S, ProbeAcksComplete, BB_S) {
- sCS_sendCollectiveResponseS;
- pt_popTriggerQueue;
- }
-
- transition(BB_S, CoreUnblock, BB_S) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition(BB_S, LastCoreUnblock, S) {
- m_addUnlockerToSharers;
- dt_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
- transition(O, {RdBlk, RdBlkS}, BBO_O){TagArrayRead} {
- t_allocateTBE;
- pso_probeSharedDataOwner;
- q_addOutstandingMergedSharer;
- i_popIncomingRequestQueue;
- }
- // Merging of read sharing into a single request
- transition(BBO_O, {RdBlk, RdBlkS}) {
- q_addOutstandingMergedSharer;
- i_popIncomingRequestQueue;
- }
-
- // Wait for probe acks to be complete
- transition(BBO_O, CPUPrbResp) {
- ccr_copyCoreResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition(BBO_O, TCCPrbResp) {
- ctr_copyTCCResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- // Window for merging complete with this transition
- // Send responses to all outstanding
- transition(BBO_O, ProbeAcksComplete, BB_OO) {
- sCS_sendCollectiveResponseS;
- pt_popTriggerQueue;
- }
-
- transition(BB_OO, CoreUnblock) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition(BB_OO, LastCoreUnblock, O){TagArrayWrite} {
- m_addUnlockerToSharers;
- dt_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
- transition(S, CPUWrite, BW_S){TagArrayRead} {
- t_allocateTBE;
- rC_removeCoreFromSharers;
- sT_sendRequestToTCC;
- i_popIncomingRequestQueue;
- }
-
- transition(E, CPUWrite, BW_E){TagArrayRead} {
- t_allocateTBE;
- rC_removeCoreFromSharers;
- sT_sendRequestToTCC;
- i_popIncomingRequestQueue;
- }
-
- transition(O, CPUWrite, BW_O){TagArrayRead} {
- t_allocateTBE;
- rCo_removeCoreFromOwner;
- rC_removeCoreFromSharers;
- sT_sendRequestToTCC;
- i_popIncomingRequestQueue;
- }
-
- transition(M, CPUWrite, BW_M){TagArrayRead} {
- t_allocateTBE;
- rCo_removeCoreFromOwner;
- rC_removeCoreFromSharers;
- sT_sendRequestToTCC;
- i_popIncomingRequestQueue;
- }
-
- transition(BW_S, TCCUnblock_Sharer, S){TagArrayWrite} {
- aT_addTCCToSharers;
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition(BW_S, TCCUnblock_NotValid, S){TagArrayWrite} {
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition(BW_E, TCCUnblock, E){TagArrayWrite} {
- cc_clearSharers;
- aT_addTCCToSharers;
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition(BW_E, TCCUnblock_NotValid, E) {
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition(BW_M, TCCUnblock, M) {
- c_clearOwner;
- cc_clearSharers;
- eT_ownerIsUnblocker;
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition(BW_M, TCCUnblock_NotValid, M) {
- // Note this transition should only be executed if we received a stale wb
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition(BW_O, TCCUnblock, O) {
- c_clearOwner;
- eT_ownerIsUnblocker;
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition(BW_O, TCCUnblock_NotValid, O) {
- // Note this transition should only be executed if we received a stale wb
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- // We lost the owner likely do to an invalidation racing with a 'O' wb
- transition(BW_O, TCCUnblock_Sharer, S) {
- c_clearOwner;
- aT_addTCCToSharers;
- dt_deallocateTBE;
- plu_popTCCUnblockQueue;
- }
-
- transition({BW_M, BW_S, BW_E, BW_O}, {PrbInv,PrbInvData,PrbShrData}) {
- ww_recycleProbeRequest;
- }
-
- transition(BRWD_I, {PrbInvData, PrbInv, PrbShrData}) {
- ww_recycleProbeRequest;
- }
-
- // Three step process: locally invalidate others, issue CtoD, wait for NB_AckCtoD
- transition(S, CtoD, BBS_UM) {TagArrayRead} {
- t_allocateTBE;
- lpc_probeInvCore;
- i2_probeInvL2;
- o_checkForAckCompletion;
- i_popIncomingRequestQueue;
- }
-
- transition(BBS_UM, CPUPrbResp, BBS_UM) {
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition(BBS_UM, TCCPrbResp) {
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- transition(BBS_UM, ProbeAcksComplete, S_M) {
- rU_rememberUpgrade;
- nM_issueRdBlkM;
- pt_popTriggerQueue;
- }
-
- // Three step process: locally invalidate others, issue CtoD, wait for NB_AckCtoD
- transition(O, CtoD, BBO_UM){TagArrayRead} {
- t_allocateTBE;
- lpc_probeInvCore;
- i2_probeInvL2;
- o_checkForAckCompletion;
- i_popIncomingRequestQueue;
- }
-
- transition(BBO_UM, CPUPrbResp, BBO_UM) {
- ruo_rememberUntransferredOwner;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition(BBO_UM, TCCPrbResp) {
- ruoT_rememberUntransferredOwnerTCC;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- transition(BBO_UM, ProbeAcksComplete, O_M) {
- rU_rememberUpgrade;
- nM_issueRdBlkM;
- pt_popTriggerQueue;
- }
-
- transition({S,E}, RdBlkM, BBS_M){TagArrayWrite} {
- t_allocateTBE;
- ldc_probeInvCoreData;
- ld2_probeInvL2Data;
- o_checkForAckCompletion;
- i_popIncomingRequestQueue;
- }
-
- transition(BBS_M, CPUPrbResp) {
- ccr_copyCoreResponseToTBE;
- rR_removeResponderFromSharers;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition(BBS_M, TCCPrbResp) {
- ctr_copyTCCResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- transition(BBS_M, ProbeAcksComplete, S_M) {
- nM_issueRdBlkM;
- pt_popTriggerQueue;
- }
-
- transition(O, RdBlkM, BBO_M){TagArrayRead} {
- t_allocateTBE;
- ldc_probeInvCoreData;
- ld2_probeInvL2Data;
- o_checkForAckCompletion;
- i_popIncomingRequestQueue;
- }
-
- transition(BBO_M, CPUPrbResp) {
- ccr_copyCoreResponseToTBE;
- rR_removeResponderFromSharers;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition(BBO_M, TCCPrbResp) {
- ctr_copyTCCResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- transition(BBO_M, ProbeAcksComplete, O_M) {
- nM_issueRdBlkM;
- pt_popTriggerQueue;
- }
-
- //
- transition(M, RdBlkM, BBM_M){TagArrayRead} {
- t_allocateTBE;
- ldc_probeInvCoreData;
- ld2_probeInvL2Data;
- i_popIncomingRequestQueue;
- }
-
- transition(BBM_M, CPUPrbResp) {
- ccr_copyCoreResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- // TCP recalled block before receiving probe
- transition({BBM_M, BBS_M, BBO_M}, {CPUWrite,NoCPUWrite}) {
- zz_recycleRequest;
- }
-
- transition(BBM_M, TCCPrbResp) {
- ctr_copyTCCResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- transition(BBM_M, ProbeAcksComplete, BB_M) {
- sM_sendResponseM;
- pt_popTriggerQueue;
- }
-
- transition(BB_M, CoreUnblock, M){TagArrayWrite} {
- e_ownerIsUnblocker;
- dt_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
- transition(M, {RdBlkS, RdBlk}, BBM_O){TagArrayRead} {
- t_allocateTBE;
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- i_popIncomingRequestQueue;
- }
-
- transition(E, {RdBlkS, RdBlk}, BBM_O){TagArrayRead} {
- t_allocateTBE;
- eto_moveExSharerToOwner;
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- i_popIncomingRequestQueue;
- }
-
- transition(BBM_O, CPUPrbResp) {
- ccr_copyCoreResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- transition(BBM_O, TCCPrbResp) {
- ctr_copyTCCResponseToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- transition(BBM_O, ProbeAcksComplete, BB_O) {
- sS_sendResponseS;
- pt_popTriggerQueue;
- }
-
- transition(BB_O, CoreUnblock, O){TagArrayWrite} {
- as_addToSharers;
- dt_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
- transition({BBO_O, BBM_M, BBS_S, BBM_O, BB_M, BB_O, BB_S, BBO_UM, BBS_UM, BBS_M, BBO_M, BB_OO}, {PrbInvData, PrbInv,PrbShrData}) {
- ww_recycleProbeRequest;
- }
-
- transition({BBM_O, BBS_S, CP_S, CP_O, CP_SM, CP_OM, BBO_O}, {CPUWrite,NoCPUWrite}) {
- zz_recycleRequest;
- }
-
- // stale CtoD raced with external invalidation
- transition({I, CP_I, B_I, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, CtoD) {
- i_popIncomingRequestQueue;
- }
-
- // stale CtoD raced with internal RdBlkM
- transition({BBM_M, BBS_M, BBO_M, BBB_M, BBS_UM, BBO_UM}, CtoD) {
- i_popIncomingRequestQueue;
- }
-
- transition({E, M}, CtoD) {
- i_popIncomingRequestQueue;
- }
-
-
- // TCC-directory has sent out (And potentially received acks for) probes.
- // TCP/SQC replacement (known to be stale subsequent) are popped off.
- transition({BBO_UM, BBS_UM}, {CPUWrite,NoCPUWrite}) {
- nC_sendNullWBAckToCore;
- i_popIncomingRequestQueue;
- }
-
- transition(S_M, {NoCPUWrite, CPUWrite}) {
- zz_recycleRequest;
- }
-
- transition(O_M, {NoCPUWrite, CPUWrite}) {
- zz_recycleRequest;
- }
-
-
- transition({BBM_M, BBS_M, BBO_M, BBO_UM, BBS_UM}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
- transition({CP_S, CP_O, CP_OM, CP_SM}, {VicDirty, VicClean, VicDirtyLast, CancelWB, NoVic}) {
- yy_recycleTCCRequestQueue;
- }
-
- // However, when TCCdir has sent out PrbSharedData, one cannot ignore.
- transition({BBS_S, BBO_O, BBM_O, S_M, O_M, BBB_M, BBB_S, BBB_E}, {VicDirty, VicClean, VicDirtyLast,CancelWB}) {
- yy_recycleTCCRequestQueue;
- }
-
- transition({BW_S,BW_E,BW_O, BW_M}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
- yy_recycleTCCRequestQueue;
- }
-
- transition({BW_S,BW_E,BW_O, BW_M}, CancelWB) {
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
-
- /// recycle if waiting for unblocks.
- transition({BB_M,BB_O,BB_S,BB_OO}, {VicDirty, VicClean, VicDirtyLast,NoVic,CancelWB}) {
- yy_recycleTCCRequestQueue;
- }
-
- transition({BBS_S, BBO_O}, NoVic) {
- rT_removeTCCFromSharers;
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
- // stale. Pop message and send dummy ack.
- transition({I_S, I_ES, I_M}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
- transition(M, VicDirtyLast, VM_I){TagArrayRead} {
- tv_allocateTBE;
- vd_victim;
- pl_popTCCRequestQueue;
- }
-
- transition(E, VicDirty, VM_I){TagArrayRead} {
- tv_allocateTBE;
- vd_victim;
- pl_popTCCRequestQueue;
- }
-
- transition(O, VicDirty, VO_S){TagArrayRead} {
- tv_allocateTBE;
- vd_victim;
- pl_popTCCRequestQueue;
- }
-
- transition(O, {VicDirtyLast, VicClean}, VO_I){TagArrayRead} {
- tv_allocateTBE;
- vd_victim;
- pl_popTCCRequestQueue;
- }
-
- transition({E, S}, VicClean, VES_I){TagArrayRead} {
- tv_allocateTBE;
- vc_victim;
- pl_popTCCRequestQueue;
- }
-
- transition({O, S}, NoVic){TagArrayRead} {
- rT_removeTCCFromSharers;
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
- transition({O,S}, NoCPUWrite){TagArrayRead} {
- rC_removeCoreFromSharers;
- nC_sendNullWBAckToCore;
- i_popIncomingRequestQueue;
- }
-
- transition({M,E}, NoCPUWrite){TagArrayRead} {
- rC_removeCoreFromSharers;
- nC_sendNullWBAckToCore;
- i_popIncomingRequestQueue;
- }
-
- // This can only happen if it is race. (TCCdir sent out probes which caused this cancel in the first place.)
- transition({VM_I, VES_I, VO_I}, CancelWB) {
- pl_popTCCRequestQueue;
- }
-
- transition({VM_I, VES_I, VO_I}, NB_AckWB, I){TagArrayWrite} {
- c_clearOwner;
- cc_clearSharers;
- wb_data;
- fw2_forwardWBAck;
- dt_deallocateTBE;
- dd_deallocateDir;
- pR_popResponseFromNBQueue;
- }
-
- transition(VO_S, NB_AckWB, S){TagArrayWrite} {
- c_clearOwner;
- wb_data;
- fw2_forwardWBAck;
- dt_deallocateTBE;
- pR_popResponseFromNBQueue;
- }
-
- transition(I_C, NB_AckWB, I){TagArrayWrite} {
- c_clearOwner;
- cc_clearSharers;
- ss_sendStaleNotification;
- fw2_forwardWBAck;
- dt_deallocateTBE;
- dd_deallocateDir;
- pR_popResponseFromNBQueue;
- }
-
- transition(I_W, NB_AckWB, I) {
- ss_sendStaleNotification;
- dt_deallocateTBE;
- dd_deallocateDir;
- pR_popResponseFromNBQueue;
- }
-
-
-
- // Do not handle replacements, reads of any kind or writebacks from transients; recycle
- transition({I_M, I_ES, I_S, MO_I, ES_I, S_M, O_M, VES_I, VO_I, VO_S, VM_I, I_C, I_W}, {RdBlkS,RdBlkM,RdBlk,CtoD}) {
- zz_recycleRequest;
- }
-
- transition( VO_S, NoCPUWrite) {
- zz_recycleRequest;
- }
-
- transition({BW_M, BW_S, BW_O, BW_E}, {RdBlkS,RdBlkM,RdBlk,CtoD,NoCPUWrite, CPUWrite}) {
- zz_recycleRequest;
- }
-
- transition({BBB_M, BBB_S, BBB_E, BB_O, BB_M, BB_S, BB_OO}, { RdBlk, RdBlkS, RdBlkM, CPUWrite, NoCPUWrite}) {
- zz_recycleRequest;
- }
-
- transition({BBB_S, BBB_E, BB_O, BB_S, BB_OO}, { CtoD}) {
- zz_recycleRequest;
- }
-
- transition({BBS_UM, BBO_UM, BBM_M, BBM_O, BBS_M, BBO_M}, { RdBlk, RdBlkS, RdBlkM}) {
- zz_recycleRequest;
- }
-
- transition(BBM_O, CtoD) {
- zz_recycleRequest;
- }
-
- transition({BBS_S, BBO_O}, {RdBlkM, CtoD}) {
- zz_recycleRequest;
- }
-
- transition({B_I, CP_I, CP_S, CP_O, CP_OM, CP_SM, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, {RdBlk, RdBlkS, RdBlkM}) {
- zz_recycleRequest;
- }
-
- transition({CP_O, CP_S, CP_OM}, CtoD) {
- zz_recycleRequest;
- }
-
- // Ignore replacement related messages after probe got in.
- transition({CP_I, B_I, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, {CPUWrite, NoCPUWrite}) {
- zz_recycleRequest;
- }
-
- // Ignore replacement related messages after probes processed
- transition({I, I_S, I_ES, I_M, I_C, I_W}, {CPUWrite,NoCPUWrite}) {
- nC_sendNullWBAckToCore;
- i_popIncomingRequestQueue;
- }
- // cannot ignore cancel... otherwise TCP/SQC will be stuck in I_C
- transition({I, I_S, I_ES, I_M, I_C, I_W, S_M, M, O, E, S}, CPUWriteCancel){TagArrayRead} {
- nC_sendNullWBAckToCore;
- i_popIncomingRequestQueue;
- }
-
- transition({CP_I, B_I, CP_IOM, CP_ISM, BRWD_I, BRW_I, BRD_I}, {NoVic, VicClean, VicDirty, VicDirtyLast}){
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
- // Handling Probes from NB (General process: (1) propagate up, go to blocking state (2) process acks (3) on last ack downward.)
-
- // step 1
- transition({M, O, E, S}, PrbInvData, CP_I){TagArrayRead} {
- tp_allocateTBE;
- dc_probeInvCoreData;
- d2_probeInvL2Data;
- pp_popProbeQueue;
- }
- // step 2a
- transition(CP_I, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(CP_I, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(CP_I, ProbeAcksComplete, I){TagArrayWrite} {
- pd_sendProbeResponseData;
- c_clearOwner;
- cc_clearSharers;
- dt_deallocateTBE;
- dd_deallocateDir;
- pt_popTriggerQueue;
- }
-
- // step 1
- transition({M, O, E, S}, PrbInv, B_I){TagArrayWrite} {
- tp_allocateTBE;
- ipc_probeInvCore;
- i2_probeInvL2;
- pp_popProbeQueue;
- }
- // step 2
- transition(B_I, CPUPrbResp) {
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(B_I, TCCPrbResp) {
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(B_I, ProbeAcksComplete, I){TagArrayWrite} {
- // send response down to NB
- pi_sendProbeResponseInv;
- c_clearOwner;
- cc_clearSharers;
- dt_deallocateTBE;
- dd_deallocateDir;
- pt_popTriggerQueue;
- }
-
-
- // step 1
- transition({M, O}, PrbShrData, CP_O){TagArrayRead} {
- tp_allocateTBE;
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- pp_popProbeQueue;
- }
-
- transition(E, PrbShrData, CP_O){TagArrayRead} {
- tp_allocateTBE;
- eto_moveExSharerToOwner;
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- pp_popProbeQueue;
- }
- // step 2
- transition(CP_O, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(CP_O, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(CP_O, ProbeAcksComplete, O){TagArrayWrite} {
- // send response down to NB
- pd_sendProbeResponseData;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- //step 1
- transition(S, PrbShrData, CP_S) {
- tp_allocateTBE;
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- pp_popProbeQueue;
- }
- // step 2
- transition(CP_S, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(CP_S, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(CP_S, ProbeAcksComplete, S) {
- // send response down to NB
- pd_sendProbeResponseData;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- // step 1
- transition(O_M, PrbInvData, CP_IOM) {
- dc_probeInvCoreData;
- d2_probeInvL2Data;
- pp_popProbeQueue;
- }
- // step 2a
- transition(CP_IOM, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(CP_IOM, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(CP_IOM, ProbeAcksComplete, I_M) {
- pdm_sendProbeResponseDataMs;
- c_clearOwner;
- cc_clearSharers;
- cd_clearDirtyBitTBE;
- pt_popTriggerQueue;
- }
-
- transition(CP_IOM, ProbeAcksCompleteReissue, I){TagArrayWrite} {
- pdm_sendProbeResponseDataMs;
- c_clearOwner;
- cc_clearSharers;
- dt_deallocateTBE;
- dd_deallocateDir;
- pt_popTriggerQueue;
- }
-
- // step 1
- transition(S_M, PrbInvData, CP_ISM) {
- dc_probeInvCoreData;
- d2_probeInvL2Data;
- o_checkForAckCompletion;
- pp_popProbeQueue;
- }
- // step 2a
- transition(CP_ISM, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(CP_ISM, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(CP_ISM, ProbeAcksComplete, I_M) {
- pdm_sendProbeResponseDataMs;
- c_clearOwner;
- cc_clearSharers;
- cd_clearDirtyBitTBE;
-
- //dt_deallocateTBE;
- pt_popTriggerQueue;
- }
- transition(CP_ISM, ProbeAcksCompleteReissue, I){TagArrayWrite} {
- pim_sendProbeResponseInvMs;
- c_clearOwner;
- cc_clearSharers;
- dt_deallocateTBE;
- dd_deallocateDir;
- pt_popTriggerQueue;
- }
-
- // step 1
- transition({S_M, O_M}, {PrbInv}, CP_ISM) {
- dc_probeInvCoreData;
- d2_probeInvL2Data;
- pp_popProbeQueue;
- }
- // next steps inherited from BS_ISM
-
- // Simpler cases
-
- transition({I_C, I_W}, {PrbInvData, PrbInv, PrbShrData}) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- //If the directory is certain that the block is not present, one can send an acknowledgement right away.
- // No need for three step process.
- transition(I, {PrbInv,PrbShrData,PrbInvData}){TagArrayRead} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({I_M, I_ES, I_S}, {PrbInv, PrbInvData}) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({I_M, I_ES, I_S}, PrbShrData) {
- prm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- //step 1
- transition(S_M, PrbShrData, CP_SM) {
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- o_checkForAckCompletion;
- pp_popProbeQueue;
- }
- // step 2
- transition(CP_SM, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(CP_SM, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(CP_SM, {ProbeAcksComplete,ProbeAcksCompleteReissue}, S_M){DataArrayRead} {
- // send response down to NB
- pd_sendProbeResponseData;
- pt_popTriggerQueue;
- }
-
- //step 1
- transition(O_M, PrbShrData, CP_OM) {
- sc_probeShrCoreData;
- s2_probeShrL2Data;
- pp_popProbeQueue;
- }
- // step 2
- transition(CP_OM, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
- // step 2b
- transition(CP_OM, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- // step 3
- transition(CP_OM, {ProbeAcksComplete,ProbeAcksCompleteReissue}, O_M) {
- // send response down to NB
- pd_sendProbeResponseData;
- pt_popTriggerQueue;
- }
-
- transition(BRW_I, PrbInvData, I_W) {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({VM_I,VO_I}, PrbInvData, I_C) {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition(VES_I, {PrbInvData,PrbInv}, I_C) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({VM_I, VO_I, BRW_I}, PrbInv, I_W) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({VM_I, VO_I, VO_S, VES_I, BRW_I}, PrbShrData) {
- pd_sendProbeResponseData;
- sf_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition(VO_S, PrbInvData, CP_OSIW) {
- dc_probeInvCoreData;
- d2_probeInvL2Data;
- pp_popProbeQueue;
- }
-
- transition(CP_OSIW, TCCPrbResp) {
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
- transition(CP_OSIW, CPUPrbResp) {
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition(CP_OSIW, ProbeAcksComplete, I_C) {
- pd_sendProbeResponseData;
- cd_clearDirtyBitTBE;
- pt_popTriggerQueue;
- }
-
- transition({I, S, E, O, M, CP_O, CP_S, CP_OM, CP_SM, CP_OSIW, BW_S, BW_E, BW_O, BW_M, I_M, I_ES, I_S, BBS_S, BBO_O, BBM_M, BBM_O, BB_M, BB_O, BB_OO, BB_S, BBS_M, BBO_M, BBO_UM, BBS_UM, S_M, O_M, BBB_S, BBB_M, BBB_E, VES_I, VM_I, VO_I, VO_S, ES_I, MO_I, I_C, I_W}, StaleVic) {
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
- transition({CP_I, B_I, CP_IOM, CP_ISM, BRWD_I, BRW_I, BRD_I}, StaleVic) {
- nT_sendNullWBAckToTCC;
- pl_popTCCRequestQueue;
- }
-
- // Recall Transistions
- // transient states still require the directory state
- transition({M, O}, Recall, BRWD_I) {
- tr_allocateTBE;
- vd_victim;
- dc_probeInvCoreData;
- d2_probeInvL2Data;
- }
-
- transition({E, S}, Recall, BRWD_I) {
- tr_allocateTBE;
- vc_victim;
- dc_probeInvCoreData;
- d2_probeInvL2Data;
- }
-
- transition(I, Recall) {
- dd_deallocateDir;
- }
-
- transition({BRWD_I, BRD_I}, CPUPrbResp) {
- y_writeDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- pk_popResponseQueue;
- }
-
- transition({BRWD_I, BRD_I}, TCCPrbResp) {
- ty_writeTCCDataToTBE;
- x_decrementAcks;
- o_checkForAckCompletion;
- plr_popTCCResponseQueue;
- }
-
- transition(BRWD_I, NB_AckWB, BRD_I) {
- pR_popResponseFromNBQueue;
- }
-
- transition(BRWD_I, ProbeAcksComplete, BRW_I) {
- pt_popTriggerQueue;
- }
-
- transition(BRW_I, NB_AckWB, I) {
- wb_data;
- dt_deallocateTBE;
- dd_deallocateDir;
- pR_popResponseFromNBQueue;
- }
-
- transition(BRD_I, ProbeAcksComplete, I) {
- wb_data;
- dt_deallocateTBE;
- dd_deallocateDir;
- pt_popTriggerQueue;
- }
-
- // wait for stable state for Recall
- transition({BRWD_I,BRD_I,BRW_I,CP_O, CP_S, CP_OM, CP_SM, CP_OSIW, BW_S, BW_E, BW_O, BW_M, I_M, I_ES, I_S, BBS_S, BBO_O, BBM_M, BBM_O, BB_M, BB_O, BB_OO, BB_S, BBS_M, BBO_M, BBO_UM, BBS_UM, S_M, O_M, BBB_S, BBB_M, BBB_E, VES_I, VM_I, VO_I, VO_S, ES_I, MO_I, I_C, I_W, CP_I}, Recall) {
- zz_recycleRequest; // stall and wait would be for the wrong address
- ut_updateTag; // try to find an easier recall
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:TCP, "GPU TCP (L1 Data Cache)")
- : GPUCoalescer* coalescer;
- Sequencer* sequencer;
- bool use_seq_not_coal;
- CacheMemory * L1cache;
- int TCC_select_num_bits;
- Cycles issue_latency := 40; // time to send data down to TCC
- Cycles l2_hit_latency := 18;
-
- MessageBuffer * requestFromTCP, network="To", virtual_network="1", vnet_type="request";
- MessageBuffer * responseFromTCP, network="To", virtual_network="3", vnet_type="response";
- MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
-
- MessageBuffer * probeToTCP, network="From", virtual_network="1", vnet_type="request";
- MessageBuffer * responseToTCP, network="From", virtual_network="3", vnet_type="response";
-
- MessageBuffer * mandatoryQueue;
-{
- state_declaration(State, desc="TCP Cache States", default="TCP_State_I") {
- I, AccessPermission:Invalid, desc="Invalid";
- S, AccessPermission:Read_Only, desc="Shared";
- E, AccessPermission:Read_Write, desc="Exclusive";
- O, AccessPermission:Read_Only, desc="Owner state in core, both clusters and other cores may be sharing line";
- M, AccessPermission:Read_Write, desc="Modified";
-
- I_M, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
- I_ES, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
- S_M, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- O_M, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
-
- ES_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for clean WB ack";
- MO_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for dirty WB ack";
-
- MO_PI, AccessPermission:Read_Only, desc="L1 downgrade, waiting for CtoD ack (or ProbeInvalidateData)";
-
- I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from TCC for canceled WB";
- }
-
- enumeration(Event, desc="TCP Events") {
- // Core initiated
- Load, desc="Load";
- Store, desc="Store";
-
- // TCC initiated
- TCC_AckS, desc="TCC Ack to Core Request";
- TCC_AckE, desc="TCC Ack to Core Request";
- TCC_AckM, desc="TCC Ack to Core Request";
- TCC_AckCtoD, desc="TCC Ack to Core Request";
- TCC_AckWB, desc="TCC Ack for clean WB";
- TCC_NackWB, desc="TCC Nack for clean WB";
-
- // Mem sys initiated
- Repl, desc="Replacing block from cache";
-
- // Probe Events
- PrbInvData, desc="probe, return O or M data";
- PrbInv, desc="probe, no need for data";
- LocalPrbInv, desc="local probe, no need for data";
- PrbShrData, desc="probe downgrade, return O or M data";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff than memory)?";
- DataBlock DataBlk, desc="data for the block";
- bool FromL2, default="false", desc="block just moved from L2";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
- bool Shared, desc="Victim hit by shared probe";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<TCP_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
-
- // Internal functions
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
- return cache_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return tbe.DataBlk;
- } else {
- return getCacheEntry(addr).DataBlk;
- }
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return TCP_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return TCP_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- bool isValid(Addr addr) {
- AccessPermission perm := getAccessPermission(addr);
- if (perm == AccessPermission:NotPresent ||
- perm == AccessPermission:Invalid ||
- perm == AccessPermission:Busy) {
- return false;
- } else {
- return true;
- }
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(TCP_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- MachineType getCoherenceType(MachineID myMachID,
- MachineID senderMachID) {
- if(myMachID == senderMachID) {
- return MachineType:TCP;
- } else if(machineIDToMachineType(senderMachID) == MachineType:TCP) {
- return MachineType:L1Cache_wCC;
- } else if(machineIDToMachineType(senderMachID) == MachineType:TCC) {
- return MachineType:TCC;
- } else {
- return MachineType:TCCdir;
- }
- }
-
- // Out Ports
-
- out_port(requestNetwork_out, CPURequestMsg, requestFromTCP);
- out_port(responseNetwork_out, ResponseMsg, responseFromTCP);
- out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
-
- // In Ports
-
- in_port(probeNetwork_in, TDProbeRequestMsg, probeToTCP) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, TDProbeRequestMsg, block_on="addr") {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- DPRINTF(RubySlicc, "machineID: %s\n", machineID);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
- } else {
- if(in_msg.localCtoD) {
- trigger(Event:LocalPrbInv, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- }
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- assert(in_msg.ReturnData);
- trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
- in_port(responseToTCP_in, ResponseMsg, responseToTCP) {
- if (responseToTCP_in.isReady(clockEdge())) {
- peek(responseToTCP_in, ResponseMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == CoherenceResponseType:TDSysResp) {
- if (in_msg.State == CoherenceState:Modified) {
- if (in_msg.CtoD) {
- trigger(Event:TCC_AckCtoD, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:TCC_AckM, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.State == CoherenceState:Shared) {
- trigger(Event:TCC_AckS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.State == CoherenceState:Exclusive) {
- trigger(Event:TCC_AckE, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
- trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:TDSysWBNack) {
- trigger(Event:TCC_NackWB, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
- Entry cache_entry := getCacheEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
- DPRINTF(RubySlicc, "%s\n", in_msg);
- if (in_msg.Type == RubyRequestType:LD) {
- if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
- trigger(Event:Load, in_msg.LineAddress, cache_entry, tbe);
- } else {
- Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else {
- if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
- trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
- } else {
- Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- }
- }
- }
- }
-
- // Actions
-
- action(ic_invCache, "ic", desc="invalidate cache") {
- if(is_valid(cache_entry)) {
- L1cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(n_issueRdBlk, "n", desc="Issue RdBlk") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlk;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkM;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(vd_victim, "vd", desc="Victimize M/O Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- assert(is_valid(cache_entry));
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicDirty;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:O) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- out_msg.Dirty := cache_entry.Dirty;
- }
- }
-
- action(vc_victim, "vc", desc="Victimize E/S Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicClean;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:S) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- }
- }
-
- action(a_allocate, "a", desc="allocate block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1cache.allocate(address, new Entry));
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
- tbe.Dirty := cache_entry.Dirty;
- tbe.Shared := false;
- }
-
- action(d_deallocateTBE, "d", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
- responseToTCP_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(l_loadDone, "l", desc="local load done") {
- assert(is_valid(cache_entry));
- if (use_seq_not_coal) {
- sequencer.readCallback(address, cache_entry.DataBlk,
- false, MachineType:TCP);
- } else {
- coalescer.readCallback(address, MachineType:TCP, cache_entry.DataBlk);
- }
- }
-
- action(xl_loadDone, "xl", desc="remote load done") {
- peek(responseToTCP_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- if (use_seq_not_coal) {
- coalescer.recordCPReadCallBack(machineID, in_msg.Sender);
- sequencer.readCallback(address,
- cache_entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- } else {
- MachineType cc_mach_type := getCoherenceType(machineID,
- in_msg.Sender);
- coalescer.readCallback(address,
- cc_mach_type,
- cache_entry.DataBlk,
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
- }
-
- action(s_storeDone, "s", desc="local store done") {
- assert(is_valid(cache_entry));
- if (use_seq_not_coal) {
- coalescer.recordCPWriteCallBack(machineID, machineID);
- sequencer.writeCallback(address, cache_entry.DataBlk,
- false, MachineType:TCP);
- } else {
- coalescer.writeCallback(address, MachineType:TCP, cache_entry.DataBlk);
- }
- cache_entry.Dirty := true;
- }
-
- action(xs_storeDone, "xs", desc="remote store done") {
- peek(responseToTCP_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- if (use_seq_not_coal) {
- coalescer.recordCPWriteCallBack(machineID, in_msg.Sender);
- sequencer.writeCallback(address,
- cache_entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- } else {
- MachineType cc_mach_type := getCoherenceType(machineID,
- in_msg.Sender);
- coalescer.writeCallback(address,
- cc_mach_type,
- cache_entry.DataBlk,
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- cache_entry.Dirty := true;
- }
- }
-
- action(w_writeCache, "w", desc="write data to cache") {
- peek(responseToTCP_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
- peek(responseToTCP_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:StaleNotif;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(wb_data, "wb", desc="write back data") {
- peek(responseToTCP_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUData;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Shared) {
- out_msg.NbReqShared := true;
- } else {
- out_msg.NbReqShared := false;
- }
- out_msg.State := CoherenceState:Shared; // faux info
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(piu_sendProbeResponseInvUntransferredOwnership, "piu", desc="send probe ack inv, no data, retain ownership") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.UntransferredOwner :=true;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false;
- out_msg.Ntsl := true;
- out_msg.Hit := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.Dirty := false; // only true if sending back data i think
- out_msg.Hit := false;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry) || is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := getDataBlock(address);
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- } else {
- out_msg.Dirty := cache_entry.Dirty;
- }
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.isValid := isValid(address);
- APPEND_TRANSITION_COMMENT("Sending ack with dirty ");
- APPEND_TRANSITION_COMMENT(out_msg.Dirty);
- }
- }
-
- action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry) || is_valid(tbe));
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.DataBlk := getDataBlock(address);
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- } else {
- out_msg.Dirty := cache_entry.Dirty;
- }
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.isValid := isValid(address);
- APPEND_TRANSITION_COMMENT("Sending ack with dirty ");
- APPEND_TRANSITION_COMMENT(out_msg.Dirty);
- DPRINTF(RubySlicc, "Data is %s\n", out_msg.DataBlk);
- }
- }
-
- action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
- assert(is_valid(tbe));
- tbe.Shared := true;
- }
-
- action(mru_updateMRU, "mru", desc="Touch block for replacement policy") {
- L1cache.setMRU(address);
- }
-
- action(uu_sendUnblock, "uu", desc="state changed, unblock") {
- enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- out_msg.wasValid := isValid(address);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
- probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
- mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- // Transitions
-
- // transitions from base
- transition(I, Load, I_ES) {TagArrayRead} {
- a_allocate;
- n_issueRdBlk;
- p_popMandatoryQueue;
- }
-
- transition(I, Store, I_M) {TagArrayRead, TagArrayWrite} {
- a_allocate;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(S, Store, S_M) {TagArrayRead} {
- mru_updateMRU;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(E, Store, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- mru_updateMRU;
- s_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(O, Store, O_M) {TagArrayRead, DataArrayWrite} {
- mru_updateMRU;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(M, Store) {TagArrayRead, DataArrayWrite} {
- mru_updateMRU;
- s_storeDone;
- p_popMandatoryQueue;
- }
-
- // simple hit transitions
- transition({S, E, O, M}, Load) {TagArrayRead, DataArrayRead} {
- l_loadDone;
- mru_updateMRU;
- p_popMandatoryQueue;
- }
-
- // recycles from transients
- transition({I_M, I_ES, ES_I, MO_I, S_M, O_M, MO_PI, I_C}, {Load, Store, Repl}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({S, E}, Repl, ES_I) {TagArrayRead} {
- t_allocateTBE;
- vc_victim;
- ic_invCache;
- }
-
- transition({O, M}, Repl, MO_I) {TagArrayRead, DataArrayRead} {
- t_allocateTBE;
- vd_victim;
- ic_invCache;
- }
-
- // TD event transitions
- transition(I_M, {TCC_AckM, TCC_AckCtoD}, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- w_writeCache;
- xs_storeDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_ES, TCC_AckS, S) {TagArrayWrite, DataArrayWrite} {
- w_writeCache;
- xl_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_ES, TCC_AckE, E) {TagArrayWrite, DataArrayWrite} {
- w_writeCache;
- xl_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition({S_M, O_M}, TCC_AckM, M) {TagArrayWrite, DataArrayWrite} {
- xs_storeDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition({MO_I, ES_I}, TCC_NackWB, I){TagArrayWrite} {
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition({MO_I, ES_I}, TCC_AckWB, I) {TagArrayWrite, DataArrayRead} {
- wb_data;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(I_C, TCC_AckWB, I) {TagArrayWrite} {
- ss_sendStaleNotification;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(I_C, TCC_NackWB, I) {TagArrayWrite} {
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- // Probe transitions
- transition({M, O}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(I, PrbInvData) {TagArrayRead, TagArrayWrite} {
- prm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition({E, S}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbInvData, I_C) {} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- // Needed for TCC-based protocols. Must hold on to ownership till transfer complete
- transition({M, O}, LocalPrbInv, MO_PI){TagArrayRead, TagArrayWrite} {
- piu_sendProbeResponseInvUntransferredOwnership;
- pp_popProbeQueue;
- }
-
- // If there is a race and we see a probe invalidate, handle normally.
- transition(MO_PI, PrbInvData, I){TagArrayWrite} {
- pd_sendProbeResponseData;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(MO_PI, PrbInv, I){TagArrayWrite} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- // normal exit when ownership is successfully transferred
- transition(MO_PI, TCC_AckCtoD, I) {TagArrayWrite} {
- ic_invCache;
- pr_popResponseQueue;
- }
-
- transition({M, O, E, S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition({E, S, I}, LocalPrbInv, I){TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
-
- transition({M, E, O}, PrbShrData, O) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition(MO_PI, PrbShrData) {DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
-
- transition(S, PrbShrData, S) {TagArrayRead, DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({I, I_C}, PrbShrData) {TagArrayRead} {
- prm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbInv, I_C) {} {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition({I_M, I_ES}, {PrbInv, PrbInvData}){TagArrayRead} {
- pi_sendProbeResponseInv;
- ic_invCache;
- a_allocate; // but make sure there is room for incoming data when it arrives
- pp_popProbeQueue;
- }
-
- transition({I_M, I_ES}, PrbShrData) {} {
- prm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(S_M, PrbInvData, I_M) {TagArrayRead} {
- pim_sendProbeResponseInvMs;
- ic_invCache;
- a_allocate;
- pp_popProbeQueue;
- }
-
- transition(O_M, PrbInvData, I_M) {TagArrayRead,DataArrayRead} {
- pdm_sendProbeResponseDataMs;
- ic_invCache;
- a_allocate;
- pp_popProbeQueue;
- }
-
- transition({S_M, O_M}, {PrbInv}, I_M) {TagArrayRead} {
- pim_sendProbeResponseInvMs;
- ic_invCache;
- a_allocate;
- pp_popProbeQueue;
- }
-
- transition(S_M, {LocalPrbInv}, I_M) {TagArrayRead} {
- pim_sendProbeResponseInvMs;
- ic_invCache;
- a_allocate;
- pp_popProbeQueue;
- }
-
- transition(O_M, LocalPrbInv, I_M) {TagArrayRead} {
- piu_sendProbeResponseInvUntransferredOwnership;
- ic_invCache;
- a_allocate;
- pp_popProbeQueue;
- }
-
- transition({S_M, O_M}, PrbShrData) {DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbInvData, I_C){
- pd_sendProbeResponseData;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInvData, I_C) {DataArrayRead} {
- pd_sendProbeResponseData;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInv, I_C) {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbInv, I_C) {
- pi_sendProbeResponseInv;
- ic_invCache;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbShrData, ES_I) {DataArrayRead} {
- pd_sendProbeResponseData;
- sf_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbShrData, MO_I) {DataArrayRead} {
- pd_sendProbeResponseData;
- sf_setSharedFlip;
- pp_popProbeQueue;
- }
-
-}
+++ /dev/null
-protocol "GPU_AMD_Base";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_AMD_Base-msg.sm";
-include "MOESI_AMD_Base-dir.sm";
-include "MOESI_AMD_Base-CorePair.sm";
-include "GPU_RfO-TCP.sm";
-include "GPU_RfO-SQC.sm";
-include "GPU_RfO-TCC.sm";
-include "GPU_RfO-TCCdir.sm";
-include "MOESI_AMD_Base-L3cache.sm";
-include "MOESI_AMD_Base-RegionBuffer.sm";
+++ /dev/null
-/*
- * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Blake Hechtman
- */
-
-machine(MachineType:SQC, "GPU SQC (L1 I Cache)")
- : Sequencer* sequencer;
- CacheMemory * L1cache;
- int TCC_select_num_bits;
- Cycles issue_latency := 80; // time to send data down to TCC
- Cycles l2_hit_latency := 18; // for 1MB L2, 20 for 2MB
-
- MessageBuffer * requestFromSQC, network="To", virtual_network="1", vnet_type="request";
-
- MessageBuffer * probeToSQC, network="From", virtual_network="1", vnet_type="request";
- MessageBuffer * responseToSQC, network="From", virtual_network="3", vnet_type="response";
-
- MessageBuffer * mandatoryQueue;
-{
- state_declaration(State, desc="SQC Cache States", default="SQC_State_I") {
- I, AccessPermission:Invalid, desc="Invalid";
- V, AccessPermission:Read_Only, desc="Valid";
- }
-
- enumeration(Event, desc="SQC Events") {
- // Core initiated
- Fetch, desc="Fetch";
- // Mem sys initiated
- Repl, desc="Replacing block from cache";
- Data, desc="Received Data";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff than memory)?";
- DataBlock DataBlk, desc="data for the block";
- bool FromL2, default="false", desc="block just moved from L2";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
- bool Shared, desc="Victim hit by shared probe";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<SQC_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
-
- // Internal functions
- Tick clockEdge();
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
- return cache_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return tbe.DataBlk;
- } else {
- return getCacheEntry(addr).DataBlk;
- }
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes +
- functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return SQC_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return SQC_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(SQC_State_to_permission(state));
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- // Out Ports
-
- out_port(requestNetwork_out, CPURequestMsg, requestFromSQC);
-
- // In Ports
-
- in_port(responseToSQC_in, ResponseMsg, responseToSQC) {
- if (responseToSQC_in.isReady(clockEdge())) {
- peek(responseToSQC_in, ResponseMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == CoherenceResponseType:TDSysResp) {
- if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.addr)) {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- } else {
- Addr victim := L1cache.cacheProbe(in_msg.addr);
- trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
- Entry cache_entry := getCacheEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
-
- assert(in_msg.Type == RubyRequestType:IFETCH);
- trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
- }
- }
- }
-
- // Actions
-
- action(ic_invCache, "ic", desc="invalidate cache") {
- if(is_valid(cache_entry)) {
- L1cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlk;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(a_allocate, "a", desc="allocate block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1cache.allocate(address, new Entry));
- }
- }
-
- action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
- responseToSQC_in.dequeue(clockEdge());
- }
-
- action(l_loadDone, "l", desc="local load done") {
- assert(is_valid(cache_entry));
- sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
- APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
- }
-
- action(w_writeCache, "w", desc="write data to cache") {
- peek(responseToSQC_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := false;
- }
- }
-
- // Transitions
-
- // transitions from base
- transition({I, V}, Repl, I) {TagArrayRead, TagArrayWrite} {
- ic_invCache
- }
-
- transition(I, Data, V) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- a_allocate;
- w_writeCache
- l_loadDone;
- pr_popResponseQueue;
- }
-
- transition(I, Fetch) {TagArrayRead, TagArrayWrite} {
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- // simple hit transitions
- transition(V, Fetch) {TagArrayRead, DataArrayRead} {
- l_loadDone;
- p_popMandatoryQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Blake Hechtman
- */
-
-machine(MachineType:TCC, "TCC Cache")
- : CacheMemory * L2cache;
- bool WB; /*is this cache Writeback?*/
- Cycles l2_request_latency := 50;
- Cycles l2_response_latency := 20;
-
- // From the TCPs or SQCs
- MessageBuffer * requestFromTCP, network="From", virtual_network="1", vnet_type="request";
- // To the Cores. TCC deals only with TCPs/SQCs.
- MessageBuffer * responseToCore, network="To", virtual_network="3", vnet_type="response";
- // From the NB
- MessageBuffer * probeFromNB, network="From", virtual_network="0", vnet_type="request";
- MessageBuffer * responseFromNB, network="From", virtual_network="2", vnet_type="response";
- // To the NB
- MessageBuffer * requestToNB, network="To", virtual_network="0", vnet_type="request";
- MessageBuffer * responseToNB, network="To", virtual_network="2", vnet_type="response";
- MessageBuffer * unblockToNB, network="To", virtual_network="4", vnet_type="unblock";
-
- MessageBuffer * triggerQueue;
-
-{
- // EVENTS
- enumeration(Event, desc="TCC Events") {
- // Requests coming from the Cores
- RdBlk, desc="RdBlk event";
- WrVicBlk, desc="L1 Write Through";
- WrVicBlkBack, desc="L1 Write Through(dirty cache)";
- Atomic, desc="Atomic Op";
- AtomicDone, desc="AtomicOps Complete";
- AtomicNotDone, desc="AtomicOps not Complete";
- Data, desc="data messgae";
- // Coming from this TCC
- L2_Repl, desc="L2 Replacement";
- // Probes
- PrbInv, desc="Invalidating probe";
- // Coming from Memory Controller
- WBAck, desc="writethrough ack from memory";
- }
-
- // STATES
- state_declaration(State, desc="TCC State", default="TCC_State_I") {
- M, AccessPermission:Read_Write, desc="Modified(dirty cache only)";
- W, AccessPermission:Read_Write, desc="Written(dirty cache only)";
- V, AccessPermission:Read_Only, desc="Valid";
- I, AccessPermission:Invalid, desc="Invalid";
- IV, AccessPermission:Busy, desc="Waiting for Data";
- WI, AccessPermission:Busy, desc="Waiting on Writethrough Ack";
- A, AccessPermission:Busy, desc="Invalid waiting on atomici Data";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
-
- // STRUCTURES
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff from memory?)";
- DataBlock DataBlk, desc="Data for the block";
- WriteMask writeMask, desc="Dirty byte mask";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, desc="Is the data dirty?";
- bool Shared, desc="Victim hit by shared probe";
- MachineID From, desc="Waiting for writeback from...";
- NetDest Destination, desc="Data destination";
- int numAtomics, desc="number remaining atomics";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
-
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- // FUNCTION DEFINITIONS
- Tick clockEdge();
-
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(addr));
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- return getCacheEntry(addr).DataBlk;
- }
-
- bool presentOrAvail(Addr addr) {
- return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes +
- functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return TCC_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return TCC_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(TCC_State_to_permission(state));
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
-
- // ** OUT_PORTS **
-
- // Three classes of ports
- // Class 1: downward facing network links to NB
- out_port(requestToNB_out, CPURequestMsg, requestToNB);
- out_port(responseToNB_out, ResponseMsg, responseToNB);
- out_port(unblockToNB_out, UnblockMsg, unblockToNB);
-
- // Class 2: upward facing ports to GPU cores
- out_port(responseToCore_out, ResponseMsg, responseToCore);
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- //
- // request queue going to NB
- //
-
-
-// ** IN_PORTS **
- in_port(triggerQueue_in, TiggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (tbe.numAtomics == 0) {
- trigger(Event:AtomicDone, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:AtomicNotDone, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
-
-
- in_port(responseFromNB_in, ResponseMsg, responseFromNB) {
- if (responseFromNB_in.isReady(clockEdge())) {
- peek(responseFromNB_in, ResponseMsg, block_on="addr") {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:NBSysResp) {
- if(presentOrAvail(in_msg.addr)) {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.addr);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
- trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- // Finally handling incoming requests (from TCP) and probes (from NB).
- in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- }
- }
-
- in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
- if (coreRequestNetwork_in.isReady(clockEdge())) {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- if(WB) {
- if(presentOrAvail(in_msg.addr)) {
- trigger(Event:WrVicBlkBack, in_msg.addr, cache_entry, tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.addr);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else {
- trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:Atomic) {
- trigger(Event:Atomic, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
- // BEGIN ACTIONS
-
- action(i_invL2, "i", desc="invalidate TCC cache block") {
- if (is_valid(cache_entry)) {
- L2cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(sd_sendData, "sd", desc="send Shared response") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
-
- action(sdr_sendDataResponse, "sdr", desc="send Shared response") {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination := tbe.Destination;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- enqueue(unblockToNB_out, UnblockMsg, 1) {
- out_msg.addr := address;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
-
- action(rd_requestData, "r", desc="Miss in L2, pass on") {
- if(tbe.Destination.count()==1){
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Shared := false; // unneeded for this request
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
- }
-
- action(w_sendResponseWBAck, "w", desc="send WB Ack") {
- peek(responseFromNB_in, ResponseMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBAck;
- out_msg.Destination.clear();
- out_msg.Destination.add(in_msg.WTRequestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(swb_sendWBAck, "swb", desc="send WB Ack") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBAck;
- out_msg.Destination.clear();
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(ar_sendAtomicResponse, "ar", desc="send Atomic Ack") {
- peek(responseFromNB_in, ResponseMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Destination.add(in_msg.WTRequestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := in_msg.DataBlk;
- }
- }
- }
-
- action(a_allocateBlock, "a", desc="allocate TCC block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L2cache.allocate(address, new Entry));
- cache_entry.writeMask.clear();
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- if (is_invalid(tbe)) {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.Destination.clear();
- tbe.numAtomics := 0;
- }
- if (coreRequestNetwork_in.isReady(clockEdge())) {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- if(in_msg.Type == CoherenceRequestType:RdBlk || in_msg.Type == CoherenceRequestType:Atomic){
- tbe.Destination.add(in_msg.Requestor);
- }
- }
- }
- }
-
- action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
- tbe.Destination.clear();
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(wcb_writeCacheBlock, "wcb", desc="write data to TCC") {
- peek(responseFromNB_in, ResponseMsg) {
- cache_entry.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
- }
- }
-
- action(wdb_writeDirtyBytes, "wdb", desc="write data to TCC") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- cache_entry.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
- cache_entry.writeMask.orMask(in_msg.writeMask);
- DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
- }
- }
-
- action(wt_writeThrough, "wt", desc="write back data") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := in_msg.Requestor;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:WriteThrough;
- out_msg.Dirty := true;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.writeMask.orMask(in_msg.writeMask);
- }
- }
- }
-
- action(wb_writeBack, "wb", desc="write back data") {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:WriteThrough;
- out_msg.Dirty := true;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.writeMask.orMask(cache_entry.writeMask);
- }
- }
-
- action(at_atomicThrough, "at", desc="write back data") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := in_msg.Requestor;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:Atomic;
- out_msg.Dirty := true;
- out_msg.writeMask.orMask(in_msg.writeMask);
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(responseToNB_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
- L2cache.setMRU(address);
- }
-
- action(p_popRequestQueue, "p", desc="pop request queue") {
- coreRequestNetwork_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="pop response queue") {
- responseFromNB_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(z_stall, "z", desc="stall") {
- // built-in
- }
-
-
- action(ina_incrementNumAtomics, "ina", desc="inc num atomics") {
- tbe.numAtomics := tbe.numAtomics + 1;
- }
-
-
- action(dna_decrementNumAtomics, "dna", desc="inc num atomics") {
- tbe.numAtomics := tbe.numAtomics - 1;
- if (tbe.numAtomics==0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AtomicDone;
- }
- }
- }
-
- action(ptr_popTriggerQueue, "ptr", desc="pop Trigger") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- // END ACTIONS
-
- // BEGIN TRANSITIONS
- // transitions from base
- // Assumptions for ArrayRead/Write
- // TBE checked before tags
- // Data Read/Write requires Tag Read
-
- // Stalling transitions do NOT check the tag array...and if they do,
- // they can cause a resource stall deadlock!
-
- transition(WI, {RdBlk, WrVicBlk, Atomic, WrVicBlkBack}) { //TagArrayRead} {
- z_stall;
- }
- transition(A, {RdBlk, WrVicBlk, WrVicBlkBack}) { //TagArrayRead} {
- z_stall;
- }
- transition(IV, {WrVicBlk, Atomic, WrVicBlkBack}) { //TagArrayRead} {
- z_stall;
- }
- transition({M, V}, RdBlk) {TagArrayRead, DataArrayRead} {
- sd_sendData;
- ut_updateTag;
- p_popRequestQueue;
- }
- transition(W, RdBlk, WI) {TagArrayRead, DataArrayRead} {
- t_allocateTBE;
- wb_writeBack;
- }
-
- transition(I, RdBlk, IV) {TagArrayRead} {
- t_allocateTBE;
- rd_requestData;
- p_popRequestQueue;
- }
-
- transition(IV, RdBlk) {
- t_allocateTBE;
- rd_requestData;
- p_popRequestQueue;
- }
-
- transition({V, I},Atomic, A) {TagArrayRead} {
- i_invL2;
- t_allocateTBE;
- at_atomicThrough;
- ina_incrementNumAtomics;
- p_popRequestQueue;
- }
-
- transition(A, Atomic) {
- at_atomicThrough;
- ina_incrementNumAtomics;
- p_popRequestQueue;
- }
-
- transition({M, W}, Atomic, WI) {TagArrayRead} {
- t_allocateTBE;
- wb_writeBack;
- }
-
- transition(I, WrVicBlk) {TagArrayRead} {
- wt_writeThrough;
- p_popRequestQueue;
- }
-
- transition(V, WrVicBlk) {TagArrayRead, DataArrayWrite} {
- ut_updateTag;
- wdb_writeDirtyBytes;
- wt_writeThrough;
- p_popRequestQueue;
- }
-
- transition({V, M}, WrVicBlkBack, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- ut_updateTag;
- swb_sendWBAck;
- wdb_writeDirtyBytes;
- p_popRequestQueue;
- }
-
- transition(W, WrVicBlkBack) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- ut_updateTag;
- swb_sendWBAck;
- wdb_writeDirtyBytes;
- p_popRequestQueue;
- }
-
- transition(I, WrVicBlkBack, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocateBlock;
- ut_updateTag;
- swb_sendWBAck;
- wdb_writeDirtyBytes;
- p_popRequestQueue;
- }
-
- transition({W, M}, L2_Repl, WI) {TagArrayRead, DataArrayRead} {
- t_allocateTBE;
- wb_writeBack;
- i_invL2;
- }
-
- transition({I, V}, L2_Repl, I) {TagArrayRead, TagArrayWrite} {
- i_invL2;
- }
-
- transition({A, IV, WI}, L2_Repl) {
- i_invL2;
- }
-
- transition({I, V}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(M, PrbInv, W) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(W, PrbInv) {TagArrayRead} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({A, IV, WI}, PrbInv) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(IV, Data, V) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocateBlock;
- ut_updateTag;
- wcb_writeCacheBlock;
- sdr_sendDataResponse;
- pr_popResponseQueue;
- dt_deallocateTBE;
- }
-
- transition(A, Data) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocateBlock;
- ar_sendAtomicResponse;
- dna_decrementNumAtomics;
- pr_popResponseQueue;
- }
-
- transition(A, AtomicDone, I) {TagArrayRead, TagArrayWrite} {
- dt_deallocateTBE;
- ptr_popTriggerQueue;
- }
-
- transition(A, AtomicNotDone) {TagArrayRead} {
- ptr_popTriggerQueue;
- }
-
- //M,W should not see WBAck as the cache is in WB mode
- //WBAcks do not need to check tags
- transition({I, V, IV, A}, WBAck) {
- w_sendResponseWBAck;
- pr_popResponseQueue;
- }
-
- transition(WI, WBAck,I) {
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Blake Hechtman
- */
-
-machine(MachineType:TCP, "GPU TCP (L1 Data Cache)")
- : VIPERCoalescer* coalescer;
- Sequencer* sequencer;
- bool use_seq_not_coal;
- CacheMemory * L1cache;
- bool WB; /*is this cache Writeback?*/
- bool disableL1; /* bypass L1 cache? */
- int TCC_select_num_bits;
- Cycles issue_latency := 40; // time to send data down to TCC
- Cycles l2_hit_latency := 18;
-
- MessageBuffer * requestFromTCP, network="To", virtual_network="1", vnet_type="request";
- MessageBuffer * responseFromTCP, network="To", virtual_network="3", vnet_type="response";
- MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
-
- MessageBuffer * probeToTCP, network="From", virtual_network="1", vnet_type="request";
- MessageBuffer * responseToTCP, network="From", virtual_network="3", vnet_type="response";
- MessageBuffer * mandatoryQueue;
-
-{
- state_declaration(State, desc="TCP Cache States", default="TCP_State_I") {
- I, AccessPermission:Invalid, desc="Invalid";
- V, AccessPermission:Read_Only, desc="Valid";
- W, AccessPermission:Read_Write, desc="Written";
- M, AccessPermission:Read_Write, desc="Written and Valid";
- L, AccessPermission:Read_Write, desc="Local access is modifable";
- A, AccessPermission:Invalid, desc="Waiting on Atomic";
- }
-
- enumeration(Event, desc="TCP Events") {
- // Core initiated
- Load, desc="Load";
- Store, desc="Store to L1 (L1 is dirty)";
- StoreThrough, desc="Store directly to L2(L1 is clean)";
- StoreLocal, desc="Store to L1 but L1 is clean";
- Atomic, desc="Atomic";
- Flush, desc="Flush if dirty(wbL1 for Store Release)";
- Evict, desc="Evict if clean(invL1 for Load Acquire)";
- // Mem sys initiated
- Repl, desc="Replacing block from cache";
-
- // TCC initiated
- TCC_Ack, desc="TCC Ack to Core Request";
- TCC_AckWB, desc="TCC Ack for WB";
- // Disable L1 cache
- Bypass, desc="Bypass the entire L1 cache";
- }
-
- enumeration(RequestType,
- desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- TagArrayFlash, desc="Flash clear the data array";
- }
-
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff than memory)?";
- DataBlock DataBlk, desc="data for the block";
- bool FromL2, default="false", desc="block just moved from L2";
- WriteMask writeMask, desc="written bytes masks";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs,desc="Number of acks/data messages that this processor is waiting for";
- bool Shared, desc="Victim hit by shared probe";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<TCP_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
- int WTcnt, default="0";
- int Fcnt, default="0";
- bool inFlush, default="false";
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
-
- // Internal functions
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
- return cache_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return tbe.DataBlk;
- } else {
- return getCacheEntry(addr).DataBlk;
- }
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes +
- functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return TCP_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return TCP_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- bool isValid(Addr addr) {
- AccessPermission perm := getAccessPermission(addr);
- if (perm == AccessPermission:NotPresent ||
- perm == AccessPermission:Invalid ||
- perm == AccessPermission:Busy) {
- return false;
- } else {
- return true;
- }
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(TCP_State_to_permission(state));
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayFlash) {
- L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayFlash) {
- // FIXME should check once per cache, rather than once per cacheline
- return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- // Out Ports
-
- out_port(requestNetwork_out, CPURequestMsg, requestFromTCP);
-
- // In Ports
-
- in_port(responseToTCP_in, ResponseMsg, responseToTCP) {
- if (responseToTCP_in.isReady(clockEdge())) {
- peek(responseToTCP_in, ResponseMsg, block_on="addr") {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:TDSysResp) {
- // disable L1 cache
- if (disableL1) {
- trigger(Event:Bypass, in_msg.addr, cache_entry, tbe);
- } else {
- if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.addr)) {
- trigger(Event:TCC_Ack, in_msg.addr, cache_entry, tbe);
- } else {
- Addr victim := L1cache.cacheProbe(in_msg.addr);
- trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- }
- } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck ||
- in_msg.Type == CoherenceResponseType:NBSysWBAck) {
- trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
- Entry cache_entry := getCacheEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
- DPRINTF(RubySlicc, "%s\n", in_msg);
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:Load, in_msg.LineAddress, cache_entry, tbe);
- } else if (in_msg.Type == RubyRequestType:ATOMIC) {
- trigger(Event:Atomic, in_msg.LineAddress, cache_entry, tbe);
- } else if (in_msg.Type == RubyRequestType:ST) {
- if(disableL1) {
- trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
- } else {
- if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
- if (in_msg.segment == HSASegment:SPILL) {
- trigger(Event:StoreLocal, in_msg.LineAddress, cache_entry, tbe);
- } else if (WB) {
- trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
- } else {
- trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
- }
- } else {
- Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } // end if (disableL1)
- } else if (in_msg.Type == RubyRequestType:FLUSH) {
- trigger(Event:Flush, in_msg.LineAddress, cache_entry, tbe);
- } else if (in_msg.Type == RubyRequestType:REPLACEMENT){
- trigger(Event:Evict, in_msg.LineAddress, cache_entry, tbe);
- } else {
- error("Unexpected Request Message from VIC");
- if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
- if (WB) {
- trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
- } else {
- trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
- }
- } else {
- Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- }
- }
- }
- }
-
- // Actions
-
- action(ic_invCache, "ic", desc="invalidate cache") {
- if(is_valid(cache_entry)) {
- cache_entry.writeMask.clear();
- L1cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(n_issueRdBlk, "n", desc="Issue RdBlk") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlk;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(rb_bypassDone, "rb", desc="bypass L1 of read access") {
- peek(responseToTCP_in, ResponseMsg) {
- DataBlock tmp:= in_msg.DataBlk;
- if (use_seq_not_coal) {
- sequencer.readCallback(address, tmp, false, MachineType:L1Cache);
- } else {
- coalescer.readCallback(address, MachineType:L1Cache, tmp);
- }
- if(is_valid(cache_entry)) {
- unset_cache_entry();
- }
- }
- }
-
- action(wab_bypassDone, "wab", desc="bypass L1 of write access") {
- peek(responseToTCP_in, ResponseMsg) {
- DataBlock tmp := in_msg.DataBlk;
- if (use_seq_not_coal) {
- sequencer.writeCallback(address, tmp, false, MachineType:L1Cache);
- } else {
- coalescer.writeCallback(address, MachineType:L1Cache, tmp);
- }
- }
- }
-
- action(norl_issueRdBlkOrloadDone, "norl", desc="local load done") {
- peek(mandatoryQueue_in, RubyRequest){
- if (cache_entry.writeMask.cmpMask(in_msg.writeMask)) {
- if (use_seq_not_coal) {
- sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
- } else {
- coalescer.readCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
- }
- } else {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlk;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
- }
- }
-
- action(wt_writeThrough, "wt", desc="Flush dirty data") {
- WTcnt := WTcnt + 1;
- APPEND_TRANSITION_COMMENT("write++ = ");
- APPEND_TRANSITION_COMMENT(WTcnt);
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- assert(is_valid(cache_entry));
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.writeMask.clear();
- out_msg.writeMask.orMask(cache_entry.writeMask);
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:WriteThrough;
- out_msg.InitialRequestTime := curCycle();
- out_msg.Shared := false;
- }
- }
-
- action(at_atomicThrough, "at", desc="send Atomic") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.writeMask.clear();
- out_msg.writeMask.orMask(in_msg.writeMask);
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:Atomic;
- out_msg.InitialRequestTime := curCycle();
- out_msg.Shared := false;
- }
- }
- }
-
- action(a_allocate, "a", desc="allocate block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1cache.allocate(address, new Entry));
- }
- cache_entry.writeMask.clear();
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- }
-
- action(d_deallocateTBE, "d", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(sf_setFlush, "sf", desc="set flush") {
- inFlush := true;
- APPEND_TRANSITION_COMMENT(" inFlush is true");
- }
-
- action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
- responseToTCP_in.dequeue(clockEdge());
- }
-
- action(l_loadDone, "l", desc="local load done") {
- assert(is_valid(cache_entry));
- if (use_seq_not_coal) {
- sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
- } else {
- coalescer.readCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
- }
- }
-
- action(s_storeDone, "s", desc="local store done") {
- assert(is_valid(cache_entry));
-
- if (use_seq_not_coal) {
- sequencer.writeCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
- } else {
- coalescer.writeCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
- }
- cache_entry.Dirty := true;
- }
-
- action(inv_invDone, "inv", desc="local inv done") {
- if (use_seq_not_coal) {
- DPRINTF(RubySlicc, "Sequencer does not define invCallback!\n");
- assert(false);
- } else {
- coalescer.invCallback(address);
- }
- }
-
- action(wb_wbDone, "wb", desc="local wb done") {
- if (inFlush == true) {
- Fcnt := Fcnt + 1;
- if (Fcnt > WTcnt) {
- if (use_seq_not_coal) {
- DPRINTF(RubySlicc, "Sequencer does not define wbCallback!\n");
- assert(false);
- } else {
- coalescer.wbCallback(address);
- }
- Fcnt := Fcnt - 1;
- }
- if (WTcnt == 0 && Fcnt == 0) {
- inFlush := false;
- APPEND_TRANSITION_COMMENT(" inFlush is false");
- }
- }
- }
-
- action(wd_wtDone, "wd", desc="writethrough done") {
- WTcnt := WTcnt - 1;
- if (inFlush == true) {
- Fcnt := Fcnt -1;
- }
- assert(WTcnt >= 0);
- APPEND_TRANSITION_COMMENT("write-- = ");
- APPEND_TRANSITION_COMMENT(WTcnt);
- }
-
- action(dw_dirtyWrite, "dw", desc="update write mask"){
- peek(mandatoryQueue_in, RubyRequest) {
- cache_entry.DataBlk.copyPartial(in_msg.WTData,in_msg.writeMask);
- cache_entry.writeMask.orMask(in_msg.writeMask);
- }
- }
- action(w_writeCache, "w", desc="write data to cache") {
- peek(responseToTCP_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- DataBlock tmp := in_msg.DataBlk;
- tmp.copyPartial(cache_entry.DataBlk,cache_entry.writeMask);
- cache_entry.DataBlk := tmp;
- }
- }
-
- action(mru_updateMRU, "mru", desc="Touch block for replacement policy") {
- L1cache.setMRU(address);
- }
-
-// action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
-// mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
-// }
-
- action(z_stall, "z", desc="stall; built-in") {
- // built-int action
- }
-
- // Transitions
- // ArrayRead/Write assumptions:
- // All requests read Tag Array
- // TBE allocation write the TagArray to I
- // TBE only checked on misses
- // Stores will also write dirty bits in the tag
- // WriteThroughs still need to use cache entry as staging buffer for wavefront
-
- // Stalling transitions do NOT check the tag array...and if they do,
- // they can cause a resource stall deadlock!
-
- transition({A}, {Load, Store, Atomic, StoreThrough}) { //TagArrayRead} {
- z_stall;
- }
-
- transition({M, V, L}, Load) {TagArrayRead, DataArrayRead} {
- l_loadDone;
- mru_updateMRU;
- p_popMandatoryQueue;
- }
-
- transition(I, Load) {TagArrayRead} {
- n_issueRdBlk;
- p_popMandatoryQueue;
- }
-
- transition({V, I}, Atomic, A) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- mru_updateMRU;
- at_atomicThrough;
- p_popMandatoryQueue;
- }
-
- transition({M, W}, Atomic, A) {TagArrayRead, TagArrayWrite} {
- wt_writeThrough;
- t_allocateTBE;
- at_atomicThrough;
- ic_invCache;
- }
-
- transition(W, Load, I) {TagArrayRead, DataArrayRead} {
- wt_writeThrough;
- norl_issueRdBlkOrloadDone;
- p_popMandatoryQueue;
- }
-
- transition({I}, StoreLocal, L) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocate;
- dw_dirtyWrite;
- s_storeDone;
- p_popMandatoryQueue;
- }
-
- transition({L, V}, StoreLocal, L) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- dw_dirtyWrite;
- mru_updateMRU;
- s_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(I, Store, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocate;
- dw_dirtyWrite;
- s_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(V, Store, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- dw_dirtyWrite;
- mru_updateMRU;
- s_storeDone;
- p_popMandatoryQueue;
- }
-
- transition({M, W}, Store) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- dw_dirtyWrite;
- mru_updateMRU;
- s_storeDone;
- p_popMandatoryQueue;
- }
-
- //M,W should not see storeThrough
- transition(I, StoreThrough) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocate;
- dw_dirtyWrite;
- s_storeDone;
- wt_writeThrough;
- ic_invCache;
- p_popMandatoryQueue;
- }
-
- transition({V,L}, StoreThrough, I) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- dw_dirtyWrite;
- s_storeDone;
- wt_writeThrough;
- ic_invCache;
- p_popMandatoryQueue;
- }
-
- transition(I, TCC_Ack, V) {TagArrayRead, TagArrayWrite, DataArrayRead, DataArrayWrite} {
- a_allocate;
- w_writeCache;
- l_loadDone;
- pr_popResponseQueue;
- }
-
- transition(I, Bypass, I) {
- rb_bypassDone;
- pr_popResponseQueue;
- }
-
- transition(A, Bypass, I){
- d_deallocateTBE;
- wab_bypassDone;
- pr_popResponseQueue;
- }
-
- transition(A, TCC_Ack, I) {TagArrayRead, DataArrayRead, DataArrayWrite} {
- d_deallocateTBE;
- a_allocate;
- w_writeCache;
- s_storeDone;
- pr_popResponseQueue;
- ic_invCache;
- }
-
- transition(V, TCC_Ack, V) {TagArrayRead, DataArrayRead, DataArrayWrite} {
- w_writeCache;
- l_loadDone;
- pr_popResponseQueue;
- }
-
- transition({W, M}, TCC_Ack, M) {TagArrayRead, TagArrayWrite, DataArrayRead, DataArrayWrite} {
- w_writeCache;
- l_loadDone;
- pr_popResponseQueue;
- }
-
- transition({I, V}, Repl, I) {TagArrayRead, TagArrayWrite} {
- ic_invCache;
- }
-
- transition({A}, Repl) {TagArrayRead, TagArrayWrite} {
- ic_invCache;
- }
-
- transition({W, M}, Repl, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- wt_writeThrough;
- ic_invCache;
- }
-
- transition(L, Repl, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- wt_writeThrough;
- ic_invCache;
- }
-
- transition({W, M}, Flush, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- sf_setFlush;
- wt_writeThrough;
- ic_invCache;
- p_popMandatoryQueue;
- }
-
- transition({V, I, A, L},Flush) {TagArrayFlash} {
- sf_setFlush;
- wb_wbDone;
- p_popMandatoryQueue;
- }
-
- transition({I, V}, Evict, I) {TagArrayFlash} {
- inv_invDone;
- p_popMandatoryQueue;
- ic_invCache;
- }
-
- transition({W, M}, Evict, W) {TagArrayFlash} {
- inv_invDone;
- p_popMandatoryQueue;
- }
-
- transition({A, L}, Evict) {TagArrayFlash} {
- inv_invDone;
- p_popMandatoryQueue;
- }
-
- // TCC_AckWB only snoops TBE
- transition({V, I, A, M, W, L}, TCC_AckWB) {
- wd_wtDone;
- wb_wbDone;
- pr_popResponseQueue;
- }
-}
+++ /dev/null
-protocol "GPU_VIPER";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_AMD_Base-msg.sm";
-include "MOESI_AMD_Base-dir.sm";
-include "MOESI_AMD_Base-CorePair.sm";
-include "GPU_VIPER-TCP.sm";
-include "GPU_VIPER-SQC.sm";
-include "GPU_VIPER-TCC.sm";
-include "MOESI_AMD_Base-L3cache.sm";
+++ /dev/null
-protocol "GPU_VIPER";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_AMD_Base-msg.sm";
-include "MOESI_AMD_Base-probeFilter.sm";
-include "MOESI_AMD_Base-CorePair.sm";
-include "GPU_VIPER-TCP.sm";
-include "GPU_VIPER-SQC.sm";
-include "GPU_VIPER-TCC.sm";
-include "MOESI_AMD_Base-L3cache.sm";
+++ /dev/null
-/*
- * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Author: Sooraj Puthoor, Blake Hechtman
- */
-
-/*
- * This file is inherited from GPU_VIPER-TCC.sm and retains its structure.
- * There are very few modifications in this file from the original VIPER TCC
- */
-
-machine(MachineType:TCC, "TCC Cache")
- : CacheMemory * L2cache;
- bool WB; /*is this cache Writeback?*/
- int regionBufferNum;
- Cycles l2_request_latency := 50;
- Cycles l2_response_latency := 20;
-
- // From the TCPs or SQCs
- MessageBuffer * requestFromTCP, network="From", virtual_network="1", ordered="true", vnet_type="request";
- // To the Cores. TCC deals only with TCPs/SQCs. CP cores do not communicate directly with TCC.
- MessageBuffer * responseToCore, network="To", virtual_network="3", ordered="true", vnet_type="response";
- // From the NB
- MessageBuffer * probeFromNB, network="From", virtual_network="0", ordered="false", vnet_type="request";
- MessageBuffer * responseFromNB, network="From", virtual_network="2", ordered="false", vnet_type="response";
- // To the NB
- MessageBuffer * requestToNB, network="To", virtual_network="0", ordered="false", vnet_type="request";
- MessageBuffer * responseToNB, network="To", virtual_network="2", ordered="false", vnet_type="response";
- MessageBuffer * unblockToNB, network="To", virtual_network="4", ordered="false", vnet_type="unblock";
-
- MessageBuffer * triggerQueue, ordered="true", random="false";
-{
- // EVENTS
- enumeration(Event, desc="TCC Events") {
- // Requests coming from the Cores
- RdBlk, desc="RdBlk event";
- WrVicBlk, desc="L1 Write Through";
- WrVicBlkBack, desc="L1 Write Back(dirty cache)";
- Atomic, desc="Atomic Op";
- AtomicDone, desc="AtomicOps Complete";
- AtomicNotDone, desc="AtomicOps not Complete";
- Data, desc="data messgae";
- // Coming from this TCC
- L2_Repl, desc="L2 Replacement";
- // Probes
- PrbInv, desc="Invalidating probe";
- // Coming from Memory Controller
- WBAck, desc="writethrough ack from memory";
- }
-
- // STATES
- state_declaration(State, desc="TCC State", default="TCC_State_I") {
- M, AccessPermission:Read_Write, desc="Modified(dirty cache only)";
- W, AccessPermission:Read_Write, desc="Written(dirty cache only)";
- V, AccessPermission:Read_Only, desc="Valid";
- I, AccessPermission:Invalid, desc="Invalid";
- IV, AccessPermission:Busy, desc="Waiting for Data";
- WI, AccessPermission:Busy, desc="Waiting on Writethrough Ack";
- A, AccessPermission:Busy, desc="Invalid waiting on atomic Data";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
-
- // STRUCTURES
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff from memory?)";
- DataBlock DataBlk, desc="Data for the block";
- WriteMask writeMask, desc="Dirty byte mask";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, desc="Is the data dirty?";
- bool Shared, desc="Victim hit by shared probe";
- MachineID From, desc="Waiting for writeback from...";
- NetDest Destination, desc="Data destination";
- int numAtomics, desc="number remaining atomics";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
-
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- // FUNCTION DEFINITIONS
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- MachineID getPeer(MachineID mach) {
- return createMachineID(MachineType:RegionBuffer, intToID(regionBufferNum));
- }
-
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(addr));
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- return getCacheEntry(addr).DataBlk;
- }
-
- bool presentOrAvail(Addr addr) {
- return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes +
- functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return TCC_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return TCC_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(TCC_State_to_permission(state));
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- L2cache.recordRequestType(CacheRequestType:DataArrayRead,addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:DataArrayWrite,addr);
- } else if (request_type == RequestType:TagArrayRead) {
- L2cache.recordRequestType(CacheRequestType:TagArrayRead,addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:TagArrayWrite,addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
-
- // ** OUT_PORTS **
-
- // Three classes of ports
- // Class 1: downward facing network links to NB
- out_port(requestToNB_out, CPURequestMsg, requestToNB);
- out_port(responseToNB_out, ResponseMsg, responseToNB);
- out_port(unblockToNB_out, UnblockMsg, unblockToNB);
-
- // Class 2: upward facing ports to GPU cores
- out_port(responseToCore_out, ResponseMsg, responseToCore);
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- //
- // request queue going to NB
- //
-
-
-// ** IN_PORTS **
- in_port(triggerQueue_in, TiggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (tbe.numAtomics == 0) {
- trigger(Event:AtomicDone, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:AtomicNotDone, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
-
-
- in_port(responseFromNB_in, ResponseMsg, responseFromNB) {
- if (responseFromNB_in.isReady(clockEdge())) {
- peek(responseFromNB_in, ResponseMsg, block_on="addr") {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:NBSysResp) {
- if(presentOrAvail(in_msg.addr)) {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.addr);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
- trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- // Finally handling incoming requests (from TCP) and probes (from NB).
-
- in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- DPRINTF(RubySlicc, "machineID: %s\n", machineID);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- }
- }
-
-
- in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
- if (coreRequestNetwork_in.isReady(clockEdge())) {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- if(WB) {
- if(presentOrAvail(in_msg.addr)) {
- trigger(Event:WrVicBlkBack, in_msg.addr, cache_entry, tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.addr);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else {
- trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:Atomic) {
- trigger(Event:Atomic, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
- // BEGIN ACTIONS
-
- action(i_invL2, "i", desc="invalidate TCC cache block") {
- if (is_valid(cache_entry)) {
- L2cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- // Data available at TCC. Send the DATA to TCP
- action(sd_sendData, "sd", desc="send Shared response") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
-
- // Data was not available at TCC. So, TCC forwarded the request to
- // directory and directory responded back with data. Now, forward the
- // DATA to TCP and send the unblock ack back to directory.
- action(sdr_sendDataResponse, "sdr", desc="send Shared response") {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination := tbe.Destination;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- enqueue(unblockToNB_out, UnblockMsg, 1) {
- out_msg.addr := address;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
-
- action(rd_requestData, "r", desc="Miss in L2, pass on") {
- if(tbe.Destination.count()==1){
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.Shared := false; // unneeded for this request
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
- }
-
- action(w_sendResponseWBAck, "w", desc="send WB Ack") {
- peek(responseFromNB_in, ResponseMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBAck;
- out_msg.Destination.clear();
- out_msg.Destination.add(in_msg.WTRequestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(swb_sendWBAck, "swb", desc="send WB Ack") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysWBAck;
- out_msg.Destination.clear();
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(ar_sendAtomicResponse, "ar", desc="send Atomic Ack") {
- peek(responseFromNB_in, ResponseMsg) {
- enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:TDSysResp;
- out_msg.Destination.add(in_msg.WTRequestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := in_msg.DataBlk;
- }
- }
- }
- action(sd2rb_sendDone2RegionBuffer, "sd2rb", desc="Request finished, send done ack") {
- enqueue(unblockToNB_out, UnblockMsg, 1) {
- out_msg.addr := address;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.DoneAck := true;
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- } else {
- out_msg.Dirty := false;
- }
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(a_allocateBlock, "a", desc="allocate TCC block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L2cache.allocate(address, new Entry));
- cache_entry.writeMask.clear();
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- if (is_invalid(tbe)) {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.Destination.clear();
- tbe.numAtomics := 0;
- }
- if (coreRequestNetwork_in.isReady(clockEdge())) {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- if(in_msg.Type == CoherenceRequestType:RdBlk || in_msg.Type == CoherenceRequestType:Atomic){
- tbe.Destination.add(in_msg.Requestor);
- }
- }
- }
- }
-
- action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
- tbe.Destination.clear();
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(wcb_writeCacheBlock, "wcb", desc="write data to TCC") {
- peek(responseFromNB_in, ResponseMsg) {
- cache_entry.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
- }
- }
-
- action(wdb_writeDirtyBytes, "wdb", desc="write data to TCC") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- cache_entry.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
- cache_entry.writeMask.orMask(in_msg.writeMask);
- DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
- }
- }
-
- action(wt_writeThrough, "wt", desc="write through data") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := in_msg.Requestor;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:WriteThrough;
- out_msg.Dirty := true;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.writeMask.orMask(in_msg.writeMask);
- }
- }
- }
-
- action(wb_writeBack, "wb", desc="write back data") {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:WriteThrough;
- out_msg.Dirty := true;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.writeMask.orMask(cache_entry.writeMask);
- }
- }
-
- action(at_atomicThrough, "at", desc="write back data") {
- peek(coreRequestNetwork_in, CPURequestMsg) {
- enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := in_msg.Requestor;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Data;
- out_msg.Type := CoherenceRequestType:Atomic;
- out_msg.Dirty := true;
- out_msg.writeMask.orMask(in_msg.writeMask);
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(responseToNB_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
- L2cache.setMRU(address);
- }
-
- action(p_popRequestQueue, "p", desc="pop request queue") {
- coreRequestNetwork_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="pop response queue") {
- responseFromNB_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
- action(zz_recycleRequestQueue, "z", desc="stall"){
- coreRequestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
-
- action(ina_incrementNumAtomics, "ina", desc="inc num atomics") {
- tbe.numAtomics := tbe.numAtomics + 1;
- }
-
-
- action(dna_decrementNumAtomics, "dna", desc="dec num atomics") {
- tbe.numAtomics := tbe.numAtomics - 1;
- if (tbe.numAtomics==0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AtomicDone;
- }
- }
- }
-
- action(ptr_popTriggerQueue, "ptr", desc="pop Trigger") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- // END ACTIONS
-
- // BEGIN TRANSITIONS
- // transitions from base
- // Assumptions for ArrayRead/Write
- // TBE checked before tags
- // Data Read/Write requires Tag Read
-
- transition(WI, {RdBlk, WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
- zz_recycleRequestQueue;
- }
- transition(A, {RdBlk, WrVicBlk, WrVicBlkBack}) {TagArrayRead} {
- zz_recycleRequestQueue;
- }
- transition(IV, {WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
- zz_recycleRequestQueue;
- }
- transition({M, V}, RdBlk) {TagArrayRead, DataArrayRead} {
- sd_sendData;
- ut_updateTag;
- p_popRequestQueue;
- }
- transition(W, RdBlk, WI) {TagArrayRead, DataArrayRead} {
- t_allocateTBE;
- wb_writeBack;
- }
-
- transition(I, RdBlk, IV) {TagArrayRead} {
- t_allocateTBE;
- rd_requestData;
- p_popRequestQueue;
- }
-
- transition(IV, RdBlk) {
- t_allocateTBE;
- rd_requestData;
- p_popRequestQueue;
- }
-
- transition({V, I},Atomic, A) {TagArrayRead} {
- i_invL2;
- t_allocateTBE;
- at_atomicThrough;
- ina_incrementNumAtomics;
- p_popRequestQueue;
- }
-
- transition(A, Atomic) {
- at_atomicThrough;
- ina_incrementNumAtomics;
- p_popRequestQueue;
- }
-
- transition({M, W}, Atomic, WI) {TagArrayRead} {
- t_allocateTBE;
- wb_writeBack;
- }
-
- // Cahceblock stays in I state which implies
- // this TCC is a write-no-allocate cache
- transition(I, WrVicBlk) {TagArrayRead} {
- wt_writeThrough;
- p_popRequestQueue;
- }
-
- transition(V, WrVicBlk) {TagArrayRead, DataArrayWrite} {
- ut_updateTag;
- wdb_writeDirtyBytes;
- wt_writeThrough;
- p_popRequestQueue;
- }
-
- transition({V, M}, WrVicBlkBack, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- ut_updateTag;
- swb_sendWBAck;
- wdb_writeDirtyBytes;
- p_popRequestQueue;
- }
-
- transition(W, WrVicBlkBack) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- ut_updateTag;
- swb_sendWBAck;
- wdb_writeDirtyBytes;
- p_popRequestQueue;
- }
-
- transition(I, WrVicBlkBack, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocateBlock;
- ut_updateTag;
- swb_sendWBAck;
- wdb_writeDirtyBytes;
- p_popRequestQueue;
- }
-
- transition({W, M}, L2_Repl, WI) {TagArrayRead, DataArrayRead} {
- t_allocateTBE;
- wb_writeBack;
- i_invL2;
- }
-
- transition({I, V}, L2_Repl, I) {TagArrayRead, TagArrayWrite} {
- i_invL2;
- }
-
- transition({A, IV, WI}, L2_Repl) {
- i_invL2;
- }
-
- transition({I, V}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(M, PrbInv, W) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(W, PrbInv) {TagArrayRead} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition({A, IV, WI}, PrbInv) {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(IV, Data, V) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocateBlock;
- ut_updateTag;
- wcb_writeCacheBlock;
- sdr_sendDataResponse;
- sd2rb_sendDone2RegionBuffer;
- pr_popResponseQueue;
- dt_deallocateTBE;
- }
-
- transition(A, Data) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
- a_allocateBlock;
- ar_sendAtomicResponse;
- sd2rb_sendDone2RegionBuffer;
- dna_decrementNumAtomics;
- pr_popResponseQueue;
- }
-
- transition(A, AtomicDone, I) {TagArrayRead, TagArrayWrite} {
- dt_deallocateTBE;
- ptr_popTriggerQueue;
- }
-
- transition(A, AtomicNotDone) {TagArrayRead} {
- ptr_popTriggerQueue;
- }
-
- //M,W should not see WBAck as the cache is in WB mode
- //WBAcks do not need to check tags
- transition({I, V, IV, A}, WBAck) {
- w_sendResponseWBAck;
- sd2rb_sendDone2RegionBuffer;
- pr_popResponseQueue;
- }
-
- transition(WI, WBAck,I) {
- sd2rb_sendDone2RegionBuffer;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-}
+++ /dev/null
-protocol "GPU_VIPER_Region";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_AMD_Base-msg.sm";
-include "MOESI_AMD_Base-Region-CorePair.sm";
-include "MOESI_AMD_Base-L3cache.sm";
-include "MOESI_AMD_Base-Region-dir.sm";
-include "GPU_VIPER_Region-TCC.sm";
-include "GPU_VIPER-TCP.sm";
-include "GPU_VIPER-SQC.sm";
-include "MOESI_AMD_Base-RegionDir.sm";
-include "MOESI_AMD_Base-RegionBuffer.sm";
+++ /dev/null
-/*
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Authors: Brad Beckmann
- * Tushar Krishna
- */
-
-
-machine(MachineType:L1Cache, "Garnet_standalone L1 Cache")
- : Sequencer * sequencer;
- Cycles issue_latency := 2;
-
- // NETWORK BUFFERS
- MessageBuffer * requestFromCache, network="To", virtual_network="0",
- vnet_type = "request";
- MessageBuffer * forwardFromCache, network="To", virtual_network="1",
- vnet_type = "forward";
- MessageBuffer * responseFromCache, network="To", virtual_network="2",
- vnet_type = "response";
-
- MessageBuffer * mandatoryQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
- I, AccessPermission:Invalid, desc="Not Present/Invalid";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // From processor
- Request, desc="Request from Garnet_standalone";
- Forward, desc="Forward from Garnet_standalone";
- Response, desc="Response from Garnet_standalone";
- }
-
- // STRUCTURE DEFINITIONS
- DataBlock dummyData;
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- DataBlock DataBlk, desc="Data in the block";
- }
-
- // FUNCTIONS
- Tick clockEdge();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- // cpu/testers/networktest/networktest.cc generates packets of the type
- // ReadReq, INST_FETCH, and WriteReq.
- // These are converted to LD, IFETCH and ST by mem/ruby/system/RubyPort.cc.
- // These are then sent to the sequencer, which sends them here.
- // Garnet_standalone-cache.sm tags LD, IFETCH and ST as Request, Forward,
- // and Response Events respectively, which are then injected into
- // virtual networks 0, 1 and 2 respectively.
- // This models traffic of different types within the network.
- //
- // Note that requests and forwards are MessageSizeType:Control,
- // while responses are MessageSizeType:Data.
- //
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Request;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Forward;
- } else if (type == RubyRequestType:ST) {
- return Event:Response;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
-
- State getState(Entry cache_entry, Addr addr) {
- return State:I;
- }
-
- void setState(Entry cache_entry, Addr addr, State state) {
-
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- }
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return OOD;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("Garnet_standalone does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("Garnet_standalone does not support functional write.");
- }
-
- // NETWORK PORTS
-
- out_port(requestNetwork_out, RequestMsg, requestFromCache);
- out_port(forwardNetwork_out, RequestMsg, forwardFromCache);
- out_port(responseNetwork_out, RequestMsg, responseFromCache);
-
- // Mandatory Queue
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest) {
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
- }
- }
- }
-
- // ACTIONS
-
- // The destination directory of the packets is embedded in the address
- // map_Address_to_Directory is used to retrieve it.
-
- action(a_issueRequest, "a", desc="Issue a request") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:MSG;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
-
- // To send broadcasts in vnet0 (to emulate broadcast-based protocols),
- // replace the above line by the following:
- // out_msg.Destination := broadcast(MachineType:Directory);
-
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
-
- action(b_issueForward, "b", desc="Issue a forward") {
- enqueue(forwardNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:MSG;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
-
- action(c_issueResponse, "c", desc="Issue a response") {
- enqueue(responseNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:MSG;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Data;
- }
- }
-
- action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
- sequencer.readCallback(address, dummyData);
- }
-
- action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
- sequencer.writeCallback(address, dummyData);
- }
-
-
- // TRANSITIONS
-
- // sequencer hit call back is performed after injecting the packets.
- // The goal of the Garnet_standalone protocol is only to inject packets into
- // the network, not to keep track of them via TBEs.
-
- transition(I, Response) {
- s_store_hit;
- c_issueResponse;
- m_popMandatoryQueue;
- }
-
- transition(I, Request) {
- r_load_hit;
- a_issueRequest;
- m_popMandatoryQueue;
- }
- transition(I, Forward) {
- r_load_hit;
- b_issueForward;
- m_popMandatoryQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Authors: Brad Beckmann
- * Tushar Krishna
- */
-
-
-machine(MachineType:Directory, "Garnet_standalone Directory")
- : MessageBuffer * requestToDir, network="From", virtual_network="0",
- vnet_type = "request";
- MessageBuffer * forwardToDir, network="From", virtual_network="1",
- vnet_type = "forward";
- MessageBuffer * responseToDir, network="From", virtual_network="2",
- vnet_type = "response";
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, AccessPermission:Invalid, desc="Invalid";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- // processor requests
- Receive_Request, desc="Receive Message";
- Receive_Forward, desc="Receive Message";
- Receive_Response, desc="Receive Message";
- }
-
- // TYPES
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- DataBlock DataBlk, desc="data for the block";
- }
-
- // ** FUNCTIONS **
- Tick clockEdge();
-
- State getState(Addr addr) {
- return State:I;
- }
-
- void setState(Addr addr, State state) {
-
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("Garnet_standalone does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("Garnet_standalone does not support functional write.");
- }
-
- // ** IN_PORTS **
-
- in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady(clockEdge())) {
- peek(requestQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:MSG) {
- trigger(Event:Receive_Request, in_msg.addr);
- } else {
- error("Invalid message");
- }
- }
- }
- }
- in_port(forwardQueue_in, RequestMsg, forwardToDir) {
- if (forwardQueue_in.isReady(clockEdge())) {
- peek(forwardQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:MSG) {
- trigger(Event:Receive_Forward, in_msg.addr);
- } else {
- error("Invalid message");
- }
- }
- }
- }
- in_port(responseQueue_in, RequestMsg, responseToDir) {
- if (responseQueue_in.isReady(clockEdge())) {
- peek(responseQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:MSG) {
- trigger(Event:Receive_Response, in_msg.addr);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue(clockEdge());
- }
-
- action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
- forwardQueue_in.dequeue(clockEdge());
- }
-
- action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
- responseQueue_in.dequeue(clockEdge());
- }
-
- // TRANSITIONS
-
- // The directory simply drops the received packets.
- // The goal of Garnet_standalone is only to track network stats.
-
- transition(I, Receive_Request) {
- i_popIncomingRequestQueue;
- }
- transition(I, Receive_Forward) {
- f_popIncomingForwardQueue;
- }
- transition(I, Receive_Response) {
- r_popIncomingResponseQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- MSG, desc="Message";
-}
-
-// RequestMsg (and also forwarded requests)
-structure(RequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Multicast destination mask";
- DataBlock DataBlk, desc="data for the cache line";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- error("Garnet_standalone does not support functional accesses!");
- }
-
- bool functionalWrite(Packet *pkt) {
- error("Garnet_standalone does not support functional accesses!");
- }
-}
+++ /dev/null
-protocol "Garnet_standalone";
-include "RubySlicc_interfaces.slicc";
-include "Garnet_standalone-msg.sm";
-include "Garnet_standalone-cache.sm";
-include "Garnet_standalone-dir.sm";
+++ /dev/null
-/*
- * Copyright (c) 2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L0Cache, "MESI Directory L0 Cache")
- : Sequencer * sequencer;
- CacheMemory * Icache;
- CacheMemory * Dcache;
- Cycles request_latency := 2;
- Cycles response_latency := 2;
- bool send_evictions;
-
- // From this node's L0 cache to the network
- MessageBuffer * bufferToL1, network="To";
-
- // To this node's L0 cache FROM the network
- MessageBuffer * bufferFromL1, network="From";
-
- // Message queue between this controller and the processor
- MessageBuffer * mandatoryQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L0Cache_State_I") {
- // Base states
-
- // The cache entry has not been allocated.
- I, AccessPermission:Invalid;
-
- // The cache entry is in shared mode. The processor can read this entry
- // but it cannot write to it.
- S, AccessPermission:Read_Only;
-
- // The cache entry is in exclusive mode. The processor can read this
- // entry. It can write to this entry without informing the directory.
- // On writing, the entry moves to M state.
- E, AccessPermission:Read_Only;
-
- // The processor has read and write permissions on this entry.
- M, AccessPermission:Read_Write;
-
- // Transient States
-
- // The cache controller has requested an instruction. It will be stored
- // in the shared state so that the processor can read it.
- Inst_IS, AccessPermission:Busy;
-
- // The cache controller has requested that this entry be fetched in
- // shared state so that the processor can read it.
- IS, AccessPermission:Busy;
-
- // The cache controller has requested that this entry be fetched in
- // modify state so that the processor can read/write it.
- IM, AccessPermission:Busy;
-
- // The cache controller had read permission over the entry. But now the
- // processor needs to write to it. So, the controller has requested for
- // write permission.
- SM, AccessPermission:Read_Only;
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // L0 events
- Load, desc="Load request from the home processor";
- Ifetch, desc="I-fetch request from the home processor";
- Store, desc="Store request from the home processor";
-
- Inv, desc="Invalidate request from L2 bank";
-
- // internal generated request
- L0_Replacement, desc="L0 Replacement", format="!r";
-
- // other requests
- Fwd_GETX, desc="GETX from other processor";
- Fwd_GETS, desc="GETS from other processor";
- Fwd_GET_INSTR, desc="GET_INSTR from other processor";
-
- Data, desc="Data for processor";
- Data_Exclusive, desc="Data for processor";
- Data_Stale, desc="Data for processor, but not for storage";
-
- Ack, desc="Ack for processor";
- Ack_all, desc="Last ack for processor";
-
- WB_Ack, desc="Ack for replacement";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
- State CacheState, desc="cache state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Addr addr, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="data is dirty";
- int pendingAcks, default="0", desc="number of pending acks";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- Cycles ticksToCycles(Tick t);
- void set_cache_entry(AbstractCacheEntry a);
- void unset_cache_entry();
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpBuffers(Addr a);
- void wakeUpAllBuffers(Addr a);
- void profileMsgDelay(int virtualNetworkType, Cycles c);
-
- // inclusive cache returns L0 entries only
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
- if(is_valid(Dcache_entry)) {
- return Dcache_entry;
- }
-
- Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
- return Icache_entry;
- }
-
- Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
- return Dcache_entry;
- }
-
- Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
- return Icache_entry;
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
-
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
-
- // MUST CHANGE
- if(is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
- return L0Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(cache_entry.CacheState));
- return L0Cache_State_to_permission(cache_entry.CacheState);
- }
-
- DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L0Cache_State_to_permission(state));
- }
- }
-
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Load;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Ifetch;
- } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
- return Event:Store;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- int getPendingAcks(TBE tbe) {
- return tbe.pendingAcks;
- }
-
- out_port(requestNetwork_out, CoherenceMsg, bufferToL1);
-
- // Messages for this L0 cache from the L1 cache
- in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
- if (messgeBuffer_in.isReady(clockEdge())) {
- peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
- assert(in_msg.Dest == machineID);
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
- trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
- } else if(in_msg.Class == CoherenceClass:DATA) {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- } else if(in_msg.Class == CoherenceClass:STALE_DATA) {
- trigger(Event:Data_Stale, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Class == CoherenceClass:ACK) {
- trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Class == CoherenceClass:WB_ACK) {
- trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Class == CoherenceClass:INV) {
- trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Class == CoherenceClass:GETX ||
- in_msg.Class == CoherenceClass:UPGRADE) {
- // upgrade transforms to GETX due to race
- trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Class == CoherenceClass:GETS) {
- trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Class == CoherenceClass:GET_INSTR) {
- trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
- } else {
- error("Invalid forwarded request type");
- }
- }
- }
- }
-
- // Mandatory Queue betweens Node's CPU and it's L0 caches
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
-
- // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
-
- if (in_msg.Type == RubyRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
-
- Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
- if (is_valid(Icache_entry)) {
- // The tag matches for the L0, so the L0 asks the L2 for it.
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs[in_msg.LineAddress]);
- } else {
-
- // Check to see if it is in the OTHER L0
- Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
- if (is_valid(Dcache_entry)) {
- // The block is in the wrong L0, put the request on the queue to the shared L2
- trigger(Event:L0_Replacement, in_msg.LineAddress,
- Dcache_entry, TBEs[in_msg.LineAddress]);
- }
-
- if (Icache.cacheAvail(in_msg.LineAddress)) {
- // L0 does't have the line, but we have space for it
- // in the L0 so let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L0, so we need to make room in the L0
- // Check if the line we want to evict is not locked
- Addr addr := Icache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, addr);
- trigger(Event:L0_Replacement, addr,
- getICacheEntry(addr),
- TBEs[addr]);
- }
- }
- } else {
-
- // *** DATA ACCESS ***
- Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
- if (is_valid(Dcache_entry)) {
- // The tag matches for the L0, so the L0 ask the L1 for it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs[in_msg.LineAddress]);
- } else {
-
- // Check to see if it is in the OTHER L0
- Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
- if (is_valid(Icache_entry)) {
- // The block is in the wrong L0, put the request on the queue to the private L1
- trigger(Event:L0_Replacement, in_msg.LineAddress,
- Icache_entry, TBEs[in_msg.LineAddress]);
- }
-
- if (Dcache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it
- // in the L0 let's see if the L1 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L1, so we need to make room in the L0
- // Check if the line we want to evict is not locked
- Addr addr := Dcache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, addr);
- trigger(Event:L0_Replacement, addr,
- getDCacheEntry(addr),
- TBEs[addr]);
- }
- }
- }
- }
- }
- }
-
- // ACTIONS
- action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:GETS;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L1Cache, version);
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Dest);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:GETX;
- out_msg.Sender := machineID;
- DPRINTF(RubySlicc, "%s\n", machineID);
- out_msg.Dest := createMachineID(MachineType:L1Cache, version);
-
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Dest);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(c_issueUPGRADE, "c", desc="Issue GETX") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:UPGRADE;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L1Cache, version);
-
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Dest);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(f_sendDataToL1, "f", desc="send data to the L2 cache") {
- enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:INV_DATA;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L1Cache, version);
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- cache_entry.Dirty := false;
- }
-
- action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
- peek(messgeBuffer_in, CoherenceMsg) {
- enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:INV_ACK;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L1Cache, version);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(g_issuePUTX, "g", desc="send data to the L2 cache") {
- enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:PUTX;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender:= machineID;
- out_msg.Dest := createMachineID(MachineType:L1Cache, version);
-
- if (cache_entry.Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- out_msg.DataBlk := cache_entry.DataBlk;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, true);
- }
-
- action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, true);
- }
-
- action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk);
- cache_entry.Dirty := true;
- }
-
- action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk, true);
- cache_entry.Dirty := true;
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.Dirty := cache_entry.Dirty;
- tbe.DataBlk := cache_entry.DataBlk;
- }
-
- action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(l_popRequestQueue, "l",
- desc="Pop incoming request queue and profile the delay within this virtual network") {
- Tick delay := messgeBuffer_in.dequeue(clockEdge());
- profileMsgDelay(2, ticksToCycles(delay));
- }
-
- action(o_popIncomingResponseQueue, "o",
- desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- Tick delay := messgeBuffer_in.dequeue(clockEdge());
- profileMsgDelay(1, ticksToCycles(delay));
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(u_writeDataToCache, "u", desc="Write data to cache") {
- peek(messgeBuffer_in, CoherenceMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(u_writeInstToCache, "ui", desc="Write data to cache") {
- peek(messgeBuffer_in, CoherenceMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(ff_deallocateCacheBlock, "\f",
- desc="Deallocate L1 cache block.") {
- if (Dcache.isTagPresent(address)) {
- Dcache.deallocate(address);
- } else {
- Icache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(oo_allocateDCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(Dcache.allocate(address, new Entry));
- }
- }
-
- action(pp_allocateICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(Icache.allocate(address, new Entry));
- }
- }
-
- action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle cpu request queue") {
- stall_and_wait(mandatoryQueue_in, address);
- }
-
- action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
- wakeUpAllBuffers(address);
- }
-
- action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
- ++Icache.demand_misses;
- }
-
- action(uu_profileInstHit, "\uih", desc="Profile the demand miss") {
- ++Icache.demand_hits;
- }
-
- action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
- ++Dcache.demand_misses;
- }
-
- action(uu_profileDataHit, "\udh", desc="Profile the demand miss") {
- ++Dcache.demand_hits;
- }
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // Transitions for Load/Store/Replacement/WriteBack from transient states
- transition({Inst_IS, IS, IM, SM}, {Load, Ifetch, Store, L0_Replacement}) {
- z_stallAndWaitMandatoryQueue;
- }
-
- // Transitions from Idle
- transition(I, Load, IS) {
- oo_allocateDCacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, Ifetch, Inst_IS) {
- pp_allocateICacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileInstMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, Store, IM) {
- oo_allocateDCacheBlock;
- i_allocateTBE;
- b_issueGETX;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition({I, IS, IM, Inst_IS}, Inv) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(SM, Inv, IM) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transitions from Shared
- transition({S,E,M}, Load) {
- h_load_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
- uu_profileInstHit;
- k_popMandatoryQueue;
- }
-
- transition(S, Store, SM) {
- i_allocateTBE;
- c_issueUPGRADE;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(S, L0_Replacement, I) {
- forward_eviction_to_cpu;
- ff_deallocateCacheBlock;
- }
-
- transition(S, Inv, I) {
- forward_eviction_to_cpu;
- fi_sendInvAck;
- ff_deallocateCacheBlock;
- l_popRequestQueue;
- }
-
- // Transitions from Exclusive
- transition({E,M}, Store, M) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(E, L0_Replacement, I) {
- forward_eviction_to_cpu;
- g_issuePUTX;
- ff_deallocateCacheBlock;
- }
-
- transition(E, {Inv, Fwd_GETX}, I) {
- // don't send data
- forward_eviction_to_cpu;
- fi_sendInvAck;
- ff_deallocateCacheBlock;
- l_popRequestQueue;
- }
-
- transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- f_sendDataToL1;
- l_popRequestQueue;
- }
-
- // Transitions from Modified
- transition(M, L0_Replacement, I) {
- forward_eviction_to_cpu;
- g_issuePUTX;
- ff_deallocateCacheBlock;
- }
-
- transition(M, {Inv, Fwd_GETX}, I) {
- forward_eviction_to_cpu;
- f_sendDataToL1;
- ff_deallocateCacheBlock;
- l_popRequestQueue;
- }
-
- transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- f_sendDataToL1;
- l_popRequestQueue;
- }
-
- transition(IS, Data, S) {
- u_writeDataToCache;
- hx_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Data_Exclusive, E) {
- u_writeDataToCache;
- hx_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Data_Stale, I) {
- u_writeDataToCache;
- hx_load_hit;
- s_deallocateTBE;
- ff_deallocateCacheBlock;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(Inst_IS, Data, S) {
- u_writeInstToCache;
- hx_ifetch_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(Inst_IS, Data_Exclusive, E) {
- u_writeInstToCache;
- hx_ifetch_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(Inst_IS, Data_Stale, I) {
- u_writeInstToCache;
- hx_ifetch_hit;
- s_deallocateTBE;
- ff_deallocateCacheBlock;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition({IM,SM}, Data_Exclusive, M) {
- u_writeDataToCache;
- hhx_store_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
- : CacheMemory * cache;
- int l2_select_num_bits;
- Cycles l1_request_latency := 2;
- Cycles l1_response_latency := 2;
- Cycles to_l2_latency := 1;
-
- // Message Buffers between the L1 and the L0 Cache
- // From the L1 cache to the L0 cache
- MessageBuffer * bufferToL0, network="To";
-
- // From the L0 cache to the L1 cache
- MessageBuffer * bufferFromL0, network="From";
-
- // Message queue from this L1 cache TO the network / L2
- MessageBuffer * requestToL2, network="To", virtual_network="0",
- vnet_type="request";
-
- MessageBuffer * responseToL2, network="To", virtual_network="1",
- vnet_type="response";
- MessageBuffer * unblockToL2, network="To", virtual_network="2",
- vnet_type="unblock";
-
- // To this L1 cache FROM the network / L2
- MessageBuffer * requestFromL2, network="From", virtual_network="2",
- vnet_type="request";
- MessageBuffer * responseFromL2, network="From", virtual_network="1",
- vnet_type="response";
-
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
- // Base states
- I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
- S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
- SS, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
- E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
- EE, AccessPermission:Read_Write, desc="a L1 cache entry Exclusive";
- M, AccessPermission:Maybe_Stale, desc="a L1 cache entry Modified", format="!b";
- MM, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
-
- // Transient States
- IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
- IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
- SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
- IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
- M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
- SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
-
- // For all of the following states, invalidate
- // message has been sent to L0 cache. The response
- // from the L0 cache has not been seen yet.
- S_IL0, AccessPermission:Busy;
- E_IL0, AccessPermission:Busy;
- M_IL0, AccessPermission:Busy;
- MM_IL0, AccessPermission:Read_Write;
- SM_IL0, AccessPermission:Busy;
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // Requests from the L0 cache
- Load, desc="Load request";
- Store, desc="Store request";
- WriteBack, desc="Writeback request";
-
- // Responses from the L0 Cache
- // L0 cache received the invalidation message
- // and has sent the data.
- L0_DataAck;
-
- Inv, desc="Invalidate request from L2 bank";
-
- // internal generated request
- // Invalidate the line in L0 due to own requirements
- L0_Invalidate_Own;
- // Invalidate the line in L0 due to some other cache's requirements
- L0_Invalidate_Else;
- // Invalidate the line in the cache due to some one else / space needs.
- L1_Replacement;
-
- // other requests
- Fwd_GETX, desc="GETX from other processor";
- Fwd_GETS, desc="GETS from other processor";
-
- Data, desc="Data for processor";
- Data_Exclusive, desc="Data for processor";
- DataS_fromL1, desc="data for GETS request, need to unblock directory";
- Data_all_Acks, desc="Data for processor, all acks";
-
- L0_Ack, desc="Ack for processor";
- Ack, desc="Ack for processor";
- Ack_all, desc="Last ack for processor";
-
- WB_Ack, desc="Ack for replacement";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
- State CacheState, desc="cache state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Addr addr, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="data is dirty";
- int pendingAcks, default="0", desc="number of pending acks";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
-
- int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Cycles ticksToCycles(Tick t);
- void set_cache_entry(AbstractCacheEntry a);
- void unset_cache_entry();
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpBuffers(Addr a);
- void wakeUpAllBuffers(Addr a);
- void profileMsgDelay(int virtualNetworkType, Cycles c);
-
- // inclusive cache returns L1 entries only
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
- return cache_entry;
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- // MUST CHANGE
- if(is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
- return L1Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
- return L1Cache_State_to_permission(cache_entry.CacheState);
- }
-
- DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L1Cache_State_to_permission(state));
- }
- }
-
- Event mandatory_request_type_to_event(CoherenceClass type) {
- if (type == CoherenceClass:GETS) {
- return Event:Load;
- } else if ((type == CoherenceClass:GETX) ||
- (type == CoherenceClass:UPGRADE)) {
- return Event:Store;
- } else if (type == CoherenceClass:PUTX) {
- return Event:WriteBack;
- } else {
- error("Invalid RequestType");
- }
- }
-
- int getPendingAcks(TBE tbe) {
- return tbe.pendingAcks;
- }
-
- bool inL0Cache(State state) {
- if (state == State:S || state == State:E || state == State:M ||
- state == State:S_IL0 || state == State:E_IL0 ||
- state == State:M_IL0 || state == State:SM_IL0) {
- return true;
- }
-
- return false;
- }
-
- out_port(requestNetwork_out, RequestMsg, requestToL2);
- out_port(responseNetwork_out, ResponseMsg, responseToL2);
- out_port(unblockNetwork_out, ResponseMsg, unblockToL2);
- out_port(bufferToL0_out, CoherenceMsg, bufferToL0);
-
- // Response From the L2 Cache to this L1 cache
- in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
- } else if(in_msg.Type == CoherenceResponseType:DATA) {
- if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
- getState(tbe, cache_entry, in_msg.addr) == State:IS_I) &&
- machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
-
- trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
-
- } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
- trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
- trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
- trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
- } else {
- error("Invalid L1 response type");
- }
- }
- }
- }
-
- // Request to this L1 cache from the shared L2
- in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
- if(requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if (in_msg.Type == CoherenceRequestType:INV) {
- if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
- trigger(Event:L0_Invalidate_Else, in_msg.addr,
- cache_entry, tbe);
- } else {
- trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:GETX ||
- in_msg.Type == CoherenceRequestType:UPGRADE) {
- if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
- trigger(Event:L0_Invalidate_Else, in_msg.addr,
- cache_entry, tbe);
- } else {
- trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
- trigger(Event:L0_Invalidate_Else, in_msg.addr,
- cache_entry, tbe);
- } else {
- trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
- }
- } else {
- error("Invalid forwarded request type");
- }
- }
- }
- }
-
- // Requests to this L1 cache from the L0 cache.
- in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
- if (messageBufferFromL0_in.isReady(clockEdge())) {
- peek(messageBufferFromL0_in, CoherenceMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if(in_msg.Class == CoherenceClass:INV_DATA) {
- trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Class == CoherenceClass:INV_ACK) {
- trigger(Event:L0_Ack, in_msg.addr, cache_entry, tbe);
- } else {
- if (is_valid(cache_entry)) {
- trigger(mandatory_request_type_to_event(in_msg.Class),
- in_msg.addr, cache_entry, tbe);
- } else {
- if (cache.cacheAvail(in_msg.addr)) {
- // L1 does't have the line, but we have space for it
- // in the L1 let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Class),
- in_msg.addr, cache_entry, tbe);
- } else {
- // No room in the L1, so we need to make room in the L1
- Entry victim_entry :=
- getCacheEntry(cache.cacheProbe(in_msg.addr));
- TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
-
- if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
- trigger(Event:L0_Invalidate_Own,
- cache.cacheProbe(in_msg.addr),
- victim_entry, victim_tbe);
- } else {
- trigger(Event:L1_Replacement,
- cache.cacheProbe(in_msg.addr),
- victim_entry, victim_tbe);
- }
- }
- }
- }
- }
- }
- }
-
- // ACTIONS
- action(a_issueGETS, "a", desc="Issue GETS") {
- peek(messageBufferFromL0_in, CoherenceMsg) {
- enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(b_issueGETX, "b", desc="Issue GETX") {
- peek(messageBufferFromL0_in, CoherenceMsg) {
- enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- DPRINTF(RubySlicc, "%s\n", machineID);
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(c_issueUPGRADE, "c", desc="Issue GETX") {
- peek(messageBufferFromL0_in, CoherenceMsg) {
- enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:UPGRADE;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(d_sendDataToRequestor, "d", desc="send data to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- }
- }
- }
-
- action(forward_eviction_to_L0, "\cc", desc="sends eviction information to the processor") {
- enqueue(bufferToL0_out, CoherenceMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:INV;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L0Cache, version);
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
-
- action(g_issuePUTX, "g", desc="send data to the L2 cache") {
- enqueue(requestNetwork_out, RequestMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTX;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- if (cache_entry.Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- out_msg.DataBlk := cache_entry.DataBlk;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%#x\n", address);
- }
- }
-
- action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, clusterID));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%#x\n", address);
-
- }
- }
-
- action(h_data_to_l0, "h", desc="If not prefetch, send data to the L0 cache.") {
- enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
-
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:DATA;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L0Cache, version);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(hh_xdata_to_l0, "\h", desc="If not prefetch, notify sequencer that store completed.") {
- enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
-
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L0Cache, version);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- //cache_entry.Dirty := true;
- }
- }
-
- action(h_stale_data_to_l0, "hs", desc="If not prefetch, send data to the L0 cache.") {
- enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
-
- out_msg.addr := address;
- out_msg.Class := CoherenceClass:STALE_DATA;
- out_msg.Sender := machineID;
- out_msg.Dest := createMachineID(MachineType:L0Cache, version);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.Dirty := cache_entry.Dirty;
- tbe.DataBlk := cache_entry.DataBlk;
- }
-
- action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
- messageBufferFromL0_in.dequeue(clockEdge());
- }
-
- action(l_popL2RequestQueue, "l",
- desc="Pop incoming request queue and profile the delay within this virtual network") {
- Tick delay := requestNetwork_in.dequeue(clockEdge());
- profileMsgDelay(2, ticksToCycles(delay));
- }
-
- action(o_popL2ResponseQueue, "o",
- desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- Tick delay := responseNetwork_in.dequeue(clockEdge());
- profileMsgDelay(1, ticksToCycles(delay));
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(u_writeDataFromL0Request, "ureql0", desc="Write data to cache") {
- peek(messageBufferFromL0_in, CoherenceMsg) {
- assert(is_valid(cache_entry));
- if (in_msg.Dirty) {
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(u_writeDataFromL2Response, "uresl2", desc="Write data to cache") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(u_writeDataFromL0Response, "uresl0", desc="Write data to cache") {
- peek(messageBufferFromL0_in, CoherenceMsg) {
- assert(is_valid(cache_entry));
- if (in_msg.Dirty) {
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(q_updateAckCount, "q", desc="Update ack count") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" p: ");
- APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
- }
- }
-
- action(ff_deallocateCacheBlock, "\f",
- desc="Deallocate L1 cache block.") {
- if (cache.isTagPresent(address)) {
- cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(oo_allocateCacheBlock, "\o", desc="Set cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(cache.allocate(address, new Entry));
- }
- }
-
- action(z0_stallAndWaitL0Queue, "\z0", desc="recycle L0 request queue") {
- stall_and_wait(messageBufferFromL0_in, address);
- }
-
- action(z2_stallAndWaitL2Queue, "\z2", desc="recycle L2 request queue") {
- stall_and_wait(requestNetwork_in, address);
- }
-
- action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
- wakeUpAllBuffers(address);
- }
-
- action(uu_profileMiss, "\um", desc="Profile the demand miss") {
- ++cache.demand_misses;
- }
-
- action(uu_profileHit, "\uh", desc="Profile the demand hit") {
- ++cache.demand_hits;
- }
-
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // Transitions for Load/Store/Replacement/WriteBack from transient states
- transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, S_IL0, M_IL0, E_IL0, MM_IL0},
- {Load, Store, L1_Replacement}) {
- z0_stallAndWaitL0Queue;
- }
-
- transition(I, Load, IS) {
- oo_allocateCacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileMiss;
- k_popL0RequestQueue;
- }
-
- transition(I, Store, IM) {
- oo_allocateCacheBlock;
- i_allocateTBE;
- b_issueGETX;
- uu_profileMiss;
- k_popL0RequestQueue;
- }
-
- transition(I, Inv) {
- fi_sendInvAck;
- l_popL2RequestQueue;
- }
-
- // Transitions from Shared
- transition({S,SS}, Load, S) {
- h_data_to_l0;
- uu_profileHit;
- k_popL0RequestQueue;
- }
-
- transition(EE, Load, E) {
- hh_xdata_to_l0;
- uu_profileHit;
- k_popL0RequestQueue;
- }
-
- transition(MM, Load, M) {
- hh_xdata_to_l0;
- uu_profileHit;
- k_popL0RequestQueue;
- }
-
- transition({S,SS}, Store, SM) {
- i_allocateTBE;
- c_issueUPGRADE;
- uu_profileMiss;
- k_popL0RequestQueue;
- }
-
- transition(SS, L1_Replacement, I) {
- ff_deallocateCacheBlock;
- }
-
- transition(S, {L0_Invalidate_Own, L0_Invalidate_Else}, S_IL0) {
- forward_eviction_to_L0;
- }
-
- transition(SS, Inv, I) {
- fi_sendInvAck;
- ff_deallocateCacheBlock;
- l_popL2RequestQueue;
- }
-
- // Transitions from Exclusive
-
- transition({EE,MM}, Store, M) {
- hh_xdata_to_l0;
- uu_profileHit;
- k_popL0RequestQueue;
- }
-
- transition(EE, L1_Replacement, M_I) {
- // silent E replacement??
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateCacheBlock;
- }
-
- transition(EE, Inv, I) {
- // don't send data
- fi_sendInvAck;
- ff_deallocateCacheBlock;
- l_popL2RequestQueue;
- }
-
- transition(EE, Fwd_GETX, I) {
- d_sendDataToRequestor;
- ff_deallocateCacheBlock;
- l_popL2RequestQueue;
- }
-
- transition(EE, Fwd_GETS, SS) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popL2RequestQueue;
- }
-
- transition(E, {L0_Invalidate_Own, L0_Invalidate_Else}, E_IL0) {
- forward_eviction_to_L0;
- }
-
- // Transitions from Modified
- transition(MM, L1_Replacement, M_I) {
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateCacheBlock;
- }
-
- transition({M,E}, WriteBack, MM) {
- u_writeDataFromL0Request;
- k_popL0RequestQueue;
- }
-
- transition(M_I, WB_Ack, I) {
- s_deallocateTBE;
- o_popL2ResponseQueue;
- ff_deallocateCacheBlock;
- kd_wakeUpDependents;
- }
-
- transition(MM, Inv, I) {
- f_sendDataToL2;
- ff_deallocateCacheBlock;
- l_popL2RequestQueue;
- }
-
- transition(M_I, Inv, SINK_WB_ACK) {
- ft_sendDataToL2_fromTBE;
- l_popL2RequestQueue;
- }
-
- transition(MM, Fwd_GETX, I) {
- d_sendDataToRequestor;
- ff_deallocateCacheBlock;
- l_popL2RequestQueue;
- }
-
- transition(MM, Fwd_GETS, SS) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popL2RequestQueue;
- }
-
- transition(M, {L0_Invalidate_Own, L0_Invalidate_Else}, M_IL0) {
- forward_eviction_to_L0;
- }
-
- transition(M_I, Fwd_GETX, SINK_WB_ACK) {
- dt_sendDataToRequestor_fromTBE;
- l_popL2RequestQueue;
- }
-
- transition(M_I, Fwd_GETS, SINK_WB_ACK) {
- dt_sendDataToRequestor_fromTBE;
- d2t_sendDataToL2_fromTBE;
- l_popL2RequestQueue;
- }
-
- // Transitions from IS
- transition({IS,IS_I}, Inv, IS_I) {
- fi_sendInvAck;
- l_popL2RequestQueue;
- }
-
- transition(IS, Data_all_Acks, S) {
- u_writeDataFromL2Response;
- h_data_to_l0;
- s_deallocateTBE;
- o_popL2ResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS_I, Data_all_Acks, I) {
- u_writeDataFromL2Response;
- h_stale_data_to_l0;
- s_deallocateTBE;
- ff_deallocateCacheBlock;
- o_popL2ResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, DataS_fromL1, S) {
- u_writeDataFromL2Response;
- j_sendUnblock;
- h_data_to_l0;
- s_deallocateTBE;
- o_popL2ResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS_I, DataS_fromL1, I) {
- u_writeDataFromL2Response;
- j_sendUnblock;
- h_stale_data_to_l0;
- s_deallocateTBE;
- ff_deallocateCacheBlock;
- o_popL2ResponseQueue;
- kd_wakeUpDependents;
- }
-
- // directory is blocked when sending exclusive data
- transition({IS,IS_I}, Data_Exclusive, E) {
- u_writeDataFromL2Response;
- hh_xdata_to_l0;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- o_popL2ResponseQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from IM
- transition({IM,SM}, Inv, IM) {
- fi_sendInvAck;
- l_popL2RequestQueue;
- }
-
- transition(IM, Data, SM) {
- u_writeDataFromL2Response;
- q_updateAckCount;
- o_popL2ResponseQueue;
- }
-
- transition(IM, Data_all_Acks, M) {
- u_writeDataFromL2Response;
- hh_xdata_to_l0;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- o_popL2ResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition({SM, IM}, Ack) {
- q_updateAckCount;
- o_popL2ResponseQueue;
- }
-
- transition(SM, Ack_all, M) {
- jj_sendExclusiveUnblock;
- hh_xdata_to_l0;
- s_deallocateTBE;
- o_popL2ResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(SM, L0_Invalidate_Else, SM_IL0) {
- forward_eviction_to_L0;
- }
-
- transition(SINK_WB_ACK, Inv){
- fi_sendInvAck;
- l_popL2RequestQueue;
- }
-
- transition(SINK_WB_ACK, WB_Ack, I){
- s_deallocateTBE;
- o_popL2ResponseQueue;
- ff_deallocateCacheBlock;
- kd_wakeUpDependents;
- }
-
- transition({M_IL0, E_IL0}, WriteBack, MM_IL0) {
- u_writeDataFromL0Request;
- k_popL0RequestQueue;
- kd_wakeUpDependents;
- }
-
- transition({M_IL0, E_IL0}, L0_DataAck, MM) {
- u_writeDataFromL0Response;
- k_popL0RequestQueue;
- kd_wakeUpDependents;
- }
-
- transition({M_IL0, MM_IL0}, L0_Ack, MM) {
- k_popL0RequestQueue;
- kd_wakeUpDependents;
- }
-
- transition(E_IL0, L0_Ack, EE) {
- k_popL0RequestQueue;
- kd_wakeUpDependents;
- }
-
- transition(S_IL0, L0_Ack, SS) {
- k_popL0RequestQueue;
- kd_wakeUpDependents;
- }
-
- transition(SM_IL0, L0_Ack, IM) {
- k_popL0RequestQueue;
- kd_wakeUpDependents;
- }
-
- transition({S_IL0, M_IL0, E_IL0, SM_IL0, SM}, L0_Invalidate_Own) {
- z0_stallAndWaitL0Queue;
- }
-
- transition({S_IL0, M_IL0, E_IL0, SM_IL0}, L0_Invalidate_Else) {
- z2_stallAndWaitL2Queue;
- }
-
- transition({S_IL0, M_IL0, E_IL0, MM_IL0}, {Inv, Fwd_GETX, Fwd_GETS}) {
- z2_stallAndWaitL2Queue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// Various class of messages that can be exchanged between the L0 and the L1
-// controllers.
-enumeration(CoherenceClass, desc="...") {
- GETX, desc="Get eXclusive";
- UPGRADE, desc="UPGRADE to exclusive";
- GETS, desc="Get Shared";
- GET_INSTR, desc="Get Instruction";
- INV, desc="INValidate";
- PUTX, desc="Replacement message";
-
- WB_ACK, desc="Writeback ack";
-
- // Request types for sending data and acks from L0 to L1 cache
- // when an invalidation message is received.
- INV_DATA;
- INV_ACK;
-
- DATA, desc="Data block for L1 cache in S state";
- DATA_EXCLUSIVE, desc="Data block for L1 cache in M/E state";
- ACK, desc="Generic invalidate ack";
-
- // This is a special case in which the L1 cache lost permissions to the
- // shared block before it got the data. So the L0 cache can use the data
- // but not store it.
- STALE_DATA;
-}
-
-// Class for messages sent between the L0 and the L1 controllers.
-structure(CoherenceMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address of the cache block";
- CoherenceClass Class, desc="Type of message (GetS, GetX, PutX, etc)";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- MachineID Sender, desc="What component sent this message";
- MachineID Dest, desc="What machine receives this message";
- MessageSizeType MessageSize, desc="size category of the message";
- DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
- bool Dirty, default="false", desc="Dirty bit";
-
- bool functionalRead(Packet *pkt) {
- // Only PUTX messages contains the data block
- if (Class == CoherenceClass:PUTX) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
+++ /dev/null
-protocol "MESI_Three_Level";
-include "RubySlicc_interfaces.slicc";
-include "MESI_Two_Level-msg.sm";
-include "MESI_Three_Level-msg.sm";
-include "MESI_Three_Level-L0cache.sm";
-include "MESI_Three_Level-L1cache.sm";
-include "MESI_Two_Level-L2cache.sm";
-include "MESI_Two_Level-dir.sm";
-include "MESI_Two_Level-dma.sm";
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
- : Sequencer * sequencer;
- CacheMemory * L1Icache;
- CacheMemory * L1Dcache;
- Prefetcher * prefetcher;
- int l2_select_num_bits;
- Cycles l1_request_latency := 2;
- Cycles l1_response_latency := 2;
- Cycles to_l2_latency := 1;
- bool send_evictions;
- bool enable_prefetch := "False";
-
- // Message Queues
- // From this node's L1 cache TO the network
-
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
- vnet_type="request";
-
- // a local L1 -> this L2 bank
- MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
- vnet_type="response";
-
- MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
- vnet_type="unblock";
-
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
- vnet_type="request";
-
- // a L2 bank -> this L1
- MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
- vnet_type="response";
-
- // Request Buffer for prefetches
- MessageBuffer * optionalQueue;
-
- // Buffer for requests generated by the processor core.
- MessageBuffer * mandatoryQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
- // Base states
- NP, AccessPermission:Invalid, desc="Not present in either cache";
- I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
- S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
- E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
- M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
-
- // Transient States
- IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
- IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
- SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
- IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
-
- M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
- SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
-
- // Transient States in which block is being prefetched
- PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
- PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
- PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
- PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // L1 events
- Load, desc="Load request from the home processor";
- Ifetch, desc="I-fetch request from the home processor";
- Store, desc="Store request from the home processor";
-
- Inv, desc="Invalidate request from L2 bank";
-
- // internal generated request
- L1_Replacement, desc="L1 Replacement", format="!r";
- PF_L1_Replacement, desc="Prefetch L1 Replacement", format="!pr";
-
- // other requests
- Fwd_GETX, desc="GETX from other processor";
- Fwd_GETS, desc="GETS from other processor";
- Fwd_GET_INSTR, desc="GET_INSTR from other processor";
-
- Data, desc="Data for processor";
- Data_Exclusive, desc="Data for processor";
- DataS_fromL1, desc="data for GETS request, need to unblock directory";
- Data_all_Acks, desc="Data for processor, all acks";
-
- Ack, desc="Ack for processor";
- Ack_all, desc="Last ack for processor";
-
- WB_Ack, desc="Ack for replacement";
-
- PF_Load, desc="load request from prefetcher";
- PF_Ifetch, desc="instruction fetch request from prefetcher";
- PF_Store, desc="exclusive load request from prefetcher";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
- State CacheState, desc="cache state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
- bool isPrefetch, desc="Set if this block was prefetched and not yet accessed";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Addr addr, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="data is dirty";
- bool isPrefetch, desc="Set if this was caused by a prefetch";
- int pendingAcks, default="0", desc="number of pending acks";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
-
- int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Cycles ticksToCycles(Tick t);
- void set_cache_entry(AbstractCacheEntry a);
- void unset_cache_entry();
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpBuffers(Addr a);
- void profileMsgDelay(int virtualNetworkType, Cycles c);
-
- // inclusive cache returns L1 entries only
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
- if(is_valid(L1Dcache_entry)) {
- return L1Dcache_entry;
- }
-
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
- return L1Icache_entry;
- }
-
- Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
- return L1Dcache_entry;
- }
-
- Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
- return L1Icache_entry;
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
-
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:NP;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
-
- // MUST CHANGE
- if(is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
- return L1Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
- return L1Cache_State_to_permission(cache_entry.CacheState);
- }
-
- DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L1Cache_State_to_permission(state));
- }
- }
-
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Load;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Ifetch;
- } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
- return Event:Store;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- Event prefetch_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:PF_Load;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:PF_Ifetch;
- } else if ((type == RubyRequestType:ST) ||
- (type == RubyRequestType:ATOMIC)) {
- return Event:PF_Store;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- int getPendingAcks(TBE tbe) {
- return tbe.pendingAcks;
- }
-
- out_port(requestL1Network_out, RequestMsg, requestFromL1Cache);
- out_port(responseL1Network_out, ResponseMsg, responseFromL1Cache);
- out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
- out_port(optionalQueue_out, RubyRequest, optionalQueue);
-
-
- // Prefetch queue between the controller and the prefetcher
- // As per Spracklen et al. (HPCA 2005), the prefetch queue should be
- // implemented as a LIFO structure. The structure would allow for fast
- // searches of all entries in the queue, not just the head msg. All
- // msgs in the structure can be invalidated if a demand miss matches.
- in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
- if (optionalQueue_in.isReady(clockEdge())) {
- peek(optionalQueue_in, RubyRequest) {
- // Instruction Prefetch
- if (in_msg.Type == RubyRequestType:IFETCH) {
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The block to be prefetched is already present in the
- // cache. We should drop this request.
- trigger(prefetch_request_type_to_event(in_msg.Type),
- in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- }
-
- // Check to see if it is in the OTHER L1
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The block is in the wrong L1 cache. We should drop
- // this request.
- trigger(prefetch_request_type_to_event(in_msg.Type),
- in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
- }
-
- if (L1Icache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it
- // in the L1 so let's see if the L2 has it
- trigger(prefetch_request_type_to_event(in_msg.Type),
- in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L1, so we need to make room in the L1
- trigger(Event:PF_L1_Replacement,
- L1Icache.cacheProbe(in_msg.LineAddress),
- getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
- }
- } else {
- // Data prefetch
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The block to be prefetched is already present in the
- // cache. We should drop this request.
- trigger(prefetch_request_type_to_event(in_msg.Type),
- in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
- }
-
- // Check to see if it is in the OTHER L1
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The block is in the wrong L1. Just drop the prefetch
- // request.
- trigger(prefetch_request_type_to_event(in_msg.Type),
- in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- }
-
- if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in
- // the L1 let's see if the L2 has it
- trigger(prefetch_request_type_to_event(in_msg.Type),
- in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L1, so we need to make room in the L1
- trigger(Event:PF_L1_Replacement,
- L1Dcache.cacheProbe(in_msg.LineAddress),
- getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
- }
- }
- }
- }
- }
-
- // Response L1 Network - response msg to this L1 cache
- in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
- if (responseL1Network_in.isReady(clockEdge())) {
- peek(responseL1Network_in, ResponseMsg, block_on="addr") {
- assert(in_msg.Destination.isElement(machineID));
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
- } else if(in_msg.Type == CoherenceResponseType:DATA) {
- if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
- getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
- getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
- getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
- machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
-
- trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
-
- } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
- trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
- trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
- trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
- } else {
- error("Invalid L1 response type");
- }
- }
- }
- }
-
- // Request InterChip network - request from this L1 cache to the shared L2
- in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
- if(requestL1Network_in.isReady(clockEdge())) {
- peek(requestL1Network_in, RequestMsg, block_on="addr") {
- assert(in_msg.Destination.isElement(machineID));
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if (in_msg.Type == CoherenceRequestType:INV) {
- trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:GETX ||
- in_msg.Type == CoherenceRequestType:UPGRADE) {
- // upgrade transforms to GETX due to race
- trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
- trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
- } else {
- error("Invalid forwarded request type");
- }
- }
- }
- }
-
- // Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
-
- // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
-
- if (in_msg.Type == RubyRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
-
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The tag matches for the L1, so the L1 asks the L2 for it.
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- } else {
-
- // Check to see if it is in the OTHER L1
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
- }
-
- if (L1Icache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it
- // in the L1 so let's see if the L2 has it.
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L1, so we need to make room in the L1
-
- // Check if the line we want to evict is not locked
- Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, addr);
-
- trigger(Event:L1_Replacement, addr,
- getL1ICacheEntry(addr),
- TBEs[addr]);
- }
- }
- } else {
-
- // *** DATA ACCESS ***
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The tag matches for the L1, so the L1 ask the L2 for it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
- } else {
-
- // Check to see if it is in the OTHER L1
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- }
-
- if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it
- // in the L1 let's see if the L2 has it.
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L1, so we need to make room in the L1
-
- // Check if the line we want to evict is not locked
- Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, addr);
-
- trigger(Event:L1_Replacement, addr,
- getL1DCacheEntry(addr),
- TBEs[addr]);
- }
- }
- }
- }
- }
- }
-
- void enqueuePrefetch(Addr address, RubyRequestType type) {
- enqueue(optionalQueue_out, RubyRequest, 1) {
- out_msg.LineAddress := address;
- out_msg.Type := type;
- out_msg.AccessMode := RubyAccessMode:Supervisor;
- }
- }
-
- // ACTIONS
- action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
- peek(optionalQueue_in, RubyRequest) {
- enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GET_INSTR;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(pai_issuePfGETINSTR, "pai",
- desc="Issue GETINSTR for prefetch request") {
- peek(optionalQueue_in, RubyRequest) {
- enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GET_INSTR;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(
- mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
-
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- }
- }
- }
-
- action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- DPRINTF(RubySlicc, "%s\n", machineID);
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
- peek(optionalQueue_in, RubyRequest) {
- enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- DPRINTF(RubySlicc, "%s\n", machineID);
-
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
-
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(c_issueUPGRADE, "c", desc="Issue GETX") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:UPGRADE;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
- address, out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Prefetch := in_msg.Prefetch;
- out_msg.AccessMode := in_msg.AccessMode;
- }
- }
- }
-
- action(d_sendDataToRequestor, "d", desc="send data to requestor") {
- peek(requestL1Network_in, RequestMsg) {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
- peek(requestL1Network_in, RequestMsg) {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
- peek(requestL1Network_in, RequestMsg) {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
- peek(requestL1Network_in, RequestMsg) {
- enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.AckCount := 1;
- }
- }
- }
-
- action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(g_issuePUTX, "g", desc="send data to the L2 cache") {
- enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTX;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Requestor:= machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- if (cache_entry.Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%#x\n", address);
- }
- }
-
- action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
- enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%#x\n", address);
-
- }
- }
-
- action(dg_invalidate_sc, "dg",
- desc="Invalidate store conditional as the cache lost permissions") {
- sequencer.invalidateSC(address);
- }
-
- action(h_load_hit, "hd",
- desc="Notify sequencer the load completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.readCallback(address, cache_entry.DataBlk, true);
- }
-
- action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk);
- cache_entry.Dirty := true;
- }
-
- action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.writeCallback(address, cache_entry.DataBlk, true);
- cache_entry.Dirty := true;
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.isPrefetch := false;
- tbe.Dirty := cache_entry.Dirty;
- tbe.DataBlk := cache_entry.DataBlk;
- }
-
- action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(l_popRequestQueue, "l",
- desc="Pop incoming request queue and profile the delay within this virtual network") {
- Tick delay := requestL1Network_in.dequeue(clockEdge());
- profileMsgDelay(2, ticksToCycles(delay));
- }
-
- action(o_popIncomingResponseQueue, "o",
- desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- Tick delay := responseL1Network_in.dequeue(clockEdge());
- profileMsgDelay(1, ticksToCycles(delay));
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
- peek(responseL1Network_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(q_updateAckCount, "q", desc="Update ack count") {
- peek(responseL1Network_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" p: ");
- APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
- }
- }
-
- action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- if (L1Dcache.isTagPresent(address)) {
- L1Dcache.deallocate(address);
- } else {
- L1Icache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1Dcache.allocate(address, new Entry));
- }
- }
-
- action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1Icache.allocate(address, new Entry));
- }
- }
-
- action(z_stallAndWaitMandatoryQueue, "\z", desc="Stall and wait the L1 mandatory request queue") {
- stall_and_wait(mandatoryQueue_in, address);
- }
-
- action(z_stallAndWaitOptionalQueue, "\pz", desc="Stall and wait the L1 prefetch request queue") {
- stall_and_wait(optionalQueue_in, address);
- }
-
- action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
- wakeUpBuffers(address);
- }
-
- action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
- ++L1Icache.demand_misses;
- }
-
- action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
- ++L1Icache.demand_hits;
- }
-
- action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
- ++L1Dcache.demand_misses;
- }
-
- action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
- ++L1Dcache.demand_hits;
- }
-
- action(po_observeHit, "\ph", desc="Inform the prefetcher about the hit") {
- peek(mandatoryQueue_in, RubyRequest) {
- if (cache_entry.isPrefetch) {
- prefetcher.observePfHit(in_msg.LineAddress);
- cache_entry.isPrefetch := false;
- }
- }
- }
-
- action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- if (enable_prefetch) {
- prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
- }
- }
- }
-
- action(ppm_observePfMiss, "\ppm",
- desc="Inform the prefetcher about the partial miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- prefetcher.observePfMiss(in_msg.LineAddress);
- }
- }
-
- action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
- optionalQueue_in.dequeue(clockEdge());
- }
-
- action(mp_markPrefetched, "mp", desc="Set the isPrefetch flag") {
- assert(is_valid(cache_entry));
- cache_entry.isPrefetch := true;
- }
-
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // Transitions for Load/Store/Replacement/WriteBack from transient states
- transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
- z_stallAndWaitMandatoryQueue;
- }
-
- transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
- z_stallAndWaitMandatoryQueue;
- }
-
- transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
- z_stallAndWaitMandatoryQueue;
- }
-
- transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, PF_IS, PF_IS_I, PF_IM, PF_SM}, PF_L1_Replacement) {
- z_stallAndWaitOptionalQueue;
- }
-
- // Transitions from Idle
- transition({NP,I}, {L1_Replacement, PF_L1_Replacement}) {
- ff_deallocateL1CacheBlock;
- }
-
- transition({S,E,M,IS,IM,SM,IS_I,PF_IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
- {PF_Load, PF_Store, PF_Ifetch}) {
- pq_popPrefetchQueue;
- }
-
- transition({NP,I}, Load, IS) {
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileDataMiss;
- po_observeMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, PF_Load, PF_IS) {
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- pa_issuePfGETS;
- pq_popPrefetchQueue;
- }
-
- transition(PF_IS, Load, IS) {
- uu_profileDataMiss;
- ppm_observePfMiss;
- k_popMandatoryQueue;
- }
-
- transition(PF_IS_I, Load, IS_I) {
- uu_profileDataMiss;
- ppm_observePfMiss;
- k_popMandatoryQueue;
- }
-
- transition(PF_IS_I, Ifetch, IS_I) {
- uu_profileInstMiss;
- ppm_observePfMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, Ifetch, IS) {
- pp_allocateL1ICacheBlock;
- i_allocateTBE;
- ai_issueGETINSTR;
- uu_profileInstMiss;
- po_observeMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, PF_Ifetch, PF_IS) {
- pp_allocateL1ICacheBlock;
- i_allocateTBE;
- pai_issuePfGETINSTR;
- pq_popPrefetchQueue;
- }
-
- // We proactively assume that the prefetch is in to
- // the instruction cache
- transition(PF_IS, Ifetch, IS) {
- uu_profileDataMiss;
- ppm_observePfMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, Store, IM) {
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- b_issueGETX;
- uu_profileDataMiss;
- po_observeMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP,I}, PF_Store, PF_IM) {
- oo_allocateL1DCacheBlock;
- i_allocateTBE;
- pb_issuePfGETX;
- pq_popPrefetchQueue;
- }
-
- transition(PF_IM, Store, IM) {
- uu_profileDataMiss;
- ppm_observePfMiss;
- k_popMandatoryQueue;
- }
-
- transition(PF_SM, Store, SM) {
- uu_profileDataMiss;
- ppm_observePfMiss;
- k_popMandatoryQueue;
- }
-
- transition({NP, I}, Inv) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transitions from Shared
- transition({S,E,M}, Load) {
- h_load_hit;
- uu_profileDataHit;
- po_observeHit;
- k_popMandatoryQueue;
- }
-
- transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
- uu_profileInstHit;
- po_observeHit;
- k_popMandatoryQueue;
- }
-
- transition(S, Store, SM) {
- i_allocateTBE;
- c_issueUPGRADE;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(S, {L1_Replacement, PF_L1_Replacement}, I) {
- forward_eviction_to_cpu;
- ff_deallocateL1CacheBlock;
- }
-
- transition(S, Inv, I) {
- forward_eviction_to_cpu;
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- // Transitions from Exclusive
-
- transition({E,M}, Store, M) {
- hh_store_hit;
- uu_profileDataHit;
- po_observeHit;
- k_popMandatoryQueue;
- }
-
- transition(E, {L1_Replacement, PF_L1_Replacement}, M_I) {
- // silent E replacement??
- forward_eviction_to_cpu;
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateL1CacheBlock;
- }
-
- transition(E, Inv, I) {
- // don't send data
- forward_eviction_to_cpu;
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(E, Fwd_GETX, I) {
- forward_eviction_to_cpu;
- d_sendDataToRequestor;
- l_popRequestQueue;
- }
-
- transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popRequestQueue;
- }
-
- // Transitions from Modified
-
- transition(M, {L1_Replacement, PF_L1_Replacement}, M_I) {
- forward_eviction_to_cpu;
- i_allocateTBE;
- g_issuePUTX; // send data, but hold in case forwarded request
- ff_deallocateL1CacheBlock;
- }
-
- transition(M_I, WB_Ack, I) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(M, Inv, I) {
- forward_eviction_to_cpu;
- f_sendDataToL2;
- l_popRequestQueue;
- }
-
- transition(M_I, Inv, SINK_WB_ACK) {
- ft_sendDataToL2_fromTBE;
- l_popRequestQueue;
- }
-
- transition(M, Fwd_GETX, I) {
- forward_eviction_to_cpu;
- d_sendDataToRequestor;
- l_popRequestQueue;
- }
-
- transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
- d_sendDataToRequestor;
- d2_sendDataToL2;
- l_popRequestQueue;
- }
-
- transition(M_I, Fwd_GETX, SINK_WB_ACK) {
- dt_sendDataToRequestor_fromTBE;
- l_popRequestQueue;
- }
-
- transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
- dt_sendDataToRequestor_fromTBE;
- d2t_sendDataToL2_fromTBE;
- l_popRequestQueue;
- }
-
- // Transitions from IS
- transition({IS, IS_I}, Inv, IS_I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(IS, Data_all_Acks, S) {
- u_writeDataToL1Cache;
- hx_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(PF_IS, Data_all_Acks, S) {
- u_writeDataToL1Cache;
- s_deallocateTBE;
- mp_markPrefetched;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS_I, Data_all_Acks, I) {
- u_writeDataToL1Cache;
- hx_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(PF_IS_I, Data_all_Acks, I) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, DataS_fromL1, S) {
- u_writeDataToL1Cache;
- j_sendUnblock;
- hx_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(PF_IS, DataS_fromL1, S) {
- u_writeDataToL1Cache;
- j_sendUnblock;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS_I, DataS_fromL1, I) {
- u_writeDataToL1Cache;
- j_sendUnblock;
- hx_load_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(PF_IS_I, DataS_fromL1, I) {
- j_sendUnblock;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- // directory is blocked when sending exclusive data
- transition(IS_I, Data_Exclusive, E) {
- u_writeDataToL1Cache;
- hx_load_hit;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- // directory is blocked when sending exclusive data
- transition(PF_IS_I, Data_Exclusive, E) {
- u_writeDataToL1Cache;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Data_Exclusive, E) {
- u_writeDataToL1Cache;
- hx_load_hit;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(PF_IS, Data_Exclusive, E) {
- u_writeDataToL1Cache;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- mp_markPrefetched;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from IM
- transition(IM, Inv, IM) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition({PF_IM, PF_SM}, Inv, PF_IM) {
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(IM, Data, SM) {
- u_writeDataToL1Cache;
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(PF_IM, Data, PF_SM) {
- u_writeDataToL1Cache;
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(IM, Data_all_Acks, M) {
- u_writeDataToL1Cache;
- hhx_store_hit;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(PF_IM, Data_all_Acks, M) {
- u_writeDataToL1Cache;
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- mp_markPrefetched;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- // transitions from SM
- transition(SM, Inv, IM) {
- forward_eviction_to_cpu;
- fi_sendInvAck;
- dg_invalidate_sc;
- l_popRequestQueue;
- }
-
- transition({SM, IM, PF_SM, PF_IM}, Ack) {
- q_updateAckCount;
- o_popIncomingResponseQueue;
- }
-
- transition(SM, Ack_all, M) {
- jj_sendExclusiveUnblock;
- hhx_store_hit;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(PF_SM, Ack_all, M) {
- jj_sendExclusiveUnblock;
- s_deallocateTBE;
- mp_markPrefetched;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(SINK_WB_ACK, Inv){
- fi_sendInvAck;
- l_popRequestQueue;
- }
-
- transition(SINK_WB_ACK, WB_Ack, I){
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L2Cache, "MESI Directory L2 Cache CMP")
- : CacheMemory * L2cache;
- Cycles l2_request_latency := 2;
- Cycles l2_response_latency := 2;
- Cycles to_l1_latency := 1;
-
- // Message Queues
- // From local bank of L2 cache TO the network
- MessageBuffer * DirRequestFromL2Cache, network="To", virtual_network="0",
- vnet_type="request"; // this L2 bank -> Memory
-
- MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="2",
- vnet_type="request"; // this L2 bank -> a local L1
-
- MessageBuffer * responseFromL2Cache, network="To", virtual_network="1",
- vnet_type="response"; // this L2 bank -> a local L1 || Memory
-
- // FROM the network to this local bank of L2 cache
- MessageBuffer * unblockToL2Cache, network="From", virtual_network="2",
- vnet_type="unblock"; // a local L1 || Memory -> this L2 bank
-
- MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
- vnet_type="request"; // a local L1 -> this L2 bank
-
- MessageBuffer * responseToL2Cache, network="From", virtual_network="1",
- vnet_type="response"; // a local L1 || Memory -> this L2 bank
-{
- // STATES
- state_declaration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
- // Base states
- NP, AccessPermission:Invalid, desc="Not present in either cache";
- SS, AccessPermission:Read_Only, desc="L2 cache entry Shared, also present in one or more L1s";
- M, AccessPermission:Read_Write, desc="L2 cache entry Modified, not present in any L1s", format="!b";
- MT, AccessPermission:Maybe_Stale, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
-
- // L2 replacement
- M_I, AccessPermission:Busy, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
- MT_I, AccessPermission:Busy, desc="L2 cache replacing, getting data from exclusive";
- MCT_I, AccessPermission:Busy, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
- I_I, AccessPermission:Busy, desc="L2 replacing clean data, need to inv sharers and then drop data";
- S_I, AccessPermission:Busy, desc="L2 replacing dirty data, collecting acks from L1s";
-
- // Transient States for fetching data from memory
- ISS, AccessPermission:Busy, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
- IS, AccessPermission:Busy, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
- IM, AccessPermission:Busy, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
-
- // Blocking states
- SS_MB, AccessPermission:Busy, desc="Blocked for L1_GETX from SS";
- MT_MB, AccessPermission:Busy, desc="Blocked for L1_GETX from MT";
-
- MT_IIB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
- MT_IB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
- MT_SB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
-
- }
-
- // EVENTS
- enumeration(Event, desc="L2 Cache events") {
- // L2 events
-
- // events initiated by the local L1s
- L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
- L1_GETS, desc="a L1D GETS request for a block maped to us";
- L1_GETX, desc="a L1D GETX request for a block maped to us";
- L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
-
- L1_PUTX, desc="L1 replacing data";
- L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
-
- // events initiated by this L2
- L2_Replacement, desc="L2 Replacement", format="!r";
- L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
-
- // events from memory controller
- Mem_Data, desc="data from memory", format="!r";
- Mem_Ack, desc="ack from memory", format="!r";
-
- // M->S data writeback
- WB_Data, desc="data from L1";
- WB_Data_clean, desc="clean data from L1";
- Ack, desc="writeback ack";
- Ack_all, desc="writeback ack";
-
- Unblock, desc="Unblock from L1 requestor";
- Exclusive_Unblock, desc="Unblock from L1 requestor";
-
- MEM_Inv, desc="Invalidation from directory";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- NetDest Sharers, desc="tracks the L1 shares on-chip";
- MachineID Exclusive, desc="Exclusive holder of block";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, default="false", desc="data is dirty";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Addr addr, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, default="false", desc="Data is Dirty";
-
- NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
- MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
- int pendingAcks, desc="number of pending acks for invalidates during writeback";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
- Cycles ticksToCycles(Tick t);
-
- void set_cache_entry(AbstractCacheEntry a);
- void unset_cache_entry();
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpBuffers(Addr a);
- void profileMsgDelay(int virtualNetworkType, Cycles c);
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- // inclusive cache, returns L2 entries only
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache[addr]);
- }
-
- bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
- if (is_valid(cache_entry)) {
- return cache_entry.Sharers.isElement(requestor);
- } else {
- return false;
- }
- }
-
- void addSharer(Addr addr, MachineID requestor, Entry cache_entry) {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "machineID: %s, requestor: %s, address: %#x\n",
- machineID, requestor, addr);
- cache_entry.Sharers.add(requestor);
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:NP;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- // MUST CHANGE
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
- return L2Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(cache_entry.CacheState));
- return L2Cache_State_to_permission(cache_entry.CacheState);
- }
-
- DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L2Cache_State_to_permission(state));
- }
- }
-
- Event L1Cache_request_type_to_event(CoherenceRequestType type, Addr addr,
- MachineID requestor, Entry cache_entry) {
- if(type == CoherenceRequestType:GETS) {
- return Event:L1_GETS;
- } else if(type == CoherenceRequestType:GET_INSTR) {
- return Event:L1_GET_INSTR;
- } else if (type == CoherenceRequestType:GETX) {
- return Event:L1_GETX;
- } else if (type == CoherenceRequestType:UPGRADE) {
- if ( is_valid(cache_entry) && cache_entry.Sharers.isElement(requestor) ) {
- return Event:L1_UPGRADE;
- } else {
- return Event:L1_GETX;
- }
- } else if (type == CoherenceRequestType:PUTX) {
- if (isSharer(addr, requestor, cache_entry)) {
- return Event:L1_PUTX;
- } else {
- return Event:L1_PUTX_old;
- }
- } else {
- DPRINTF(RubySlicc, "address: %#x, Request Type: %s\n", addr, type);
- error("Invalid L1 forwarded request type");
- }
- }
-
- int getPendingAcks(TBE tbe) {
- return tbe.pendingAcks;
- }
-
- bool isDirty(Entry cache_entry) {
- assert(is_valid(cache_entry));
- return cache_entry.Dirty;
- }
-
- // ** OUT_PORTS **
-
- out_port(L1RequestL2Network_out, RequestMsg, L1RequestFromL2Cache);
- out_port(DirRequestL2Network_out, RequestMsg, DirRequestFromL2Cache);
- out_port(responseL2Network_out, ResponseMsg, responseFromL2Cache);
-
-
- in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
- if(L1unblockNetwork_in.isReady(clockEdge())) {
- peek(L1unblockNetwork_in, ResponseMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
- DPRINTF(RubySlicc, "Addr: %#x State: %s Sender: %s Type: %s Dest: %s\n",
- in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
- in_msg.Sender, in_msg.Type, in_msg.Destination);
-
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
- trigger(Event:Exclusive_Unblock, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- trigger(Event:Unblock, in_msg.addr, cache_entry, tbe);
- } else {
- error("unknown unblock message");
- }
- }
- }
- }
-
- // Response L2 Network - response msg to this particular L2 bank
- in_port(responseL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
- if (responseL2Network_in.isReady(clockEdge())) {
- peek(responseL2Network_in, ResponseMsg) {
- // test wether it's from a local L1 or an off chip source
- assert(in_msg.Destination.isElement(machineID));
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
- if(in_msg.Type == CoherenceResponseType:DATA) {
- if (in_msg.Dirty) {
- trigger(Event:WB_Data, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:WB_Data_clean, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- if ((getPendingAcks(tbe) - in_msg.AckCount) == 0) {
- trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
- }
- } else {
- error("unknown message type");
- }
-
- } else { // external message
- if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Mem_Data, in_msg.addr, cache_entry, tbe);
- } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
- trigger(Event:Mem_Ack, in_msg.addr, cache_entry, tbe);
- } else if(in_msg.Type == CoherenceResponseType:INV) {
- trigger(Event:MEM_Inv, in_msg.addr, cache_entry, tbe);
- } else {
- error("unknown message type");
- }
- }
- }
- } // if not ready, do nothing
- }
-
- // L1 Request
- in_port(L1RequestL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
- if(L1RequestL2Network_in.isReady(clockEdge())) {
- peek(L1RequestL2Network_in, RequestMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- DPRINTF(RubySlicc, "Addr: %#x State: %s Req: %s Type: %s Dest: %s\n",
- in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
- in_msg.Requestor, in_msg.Type, in_msg.Destination);
-
- assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
- assert(in_msg.Destination.isElement(machineID));
-
- if (is_valid(cache_entry)) {
- // The L2 contains the block, so proceeded with handling the request
- trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.addr,
- in_msg.Requestor, cache_entry),
- in_msg.addr, cache_entry, tbe);
- } else {
- if (L2cache.cacheAvail(in_msg.addr)) {
- // L2 does't have the line, but we have space for it in the L2
- trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.addr,
- in_msg.Requestor, cache_entry),
- in_msg.addr, cache_entry, tbe);
- } else {
- // No room in the L2, so we need to make room before handling the request
- Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
- if (isDirty(L2cache_entry)) {
- trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
- } else {
- trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
- }
- }
- }
- }
- }
- }
-
-
- // ACTIONS
-
- action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(DirRequestL2Network_out, RequestMsg, l2_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
- }
-
- action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(cache_entry.Exclusive);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
- }
-
- action(c_exclusiveReplacement, "c", desc="Send data to memory") {
- enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(c_exclusiveCleanReplacement, "cc", desc="Send ack to memory for clean replacement") {
- enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
- enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- out_msg.AckCount := 0 - cache_entry.Sharers.count();
- if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- }
- }
- }
-
- action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- out_msg.AckCount := 0 - cache_entry.Sharers.count();
- if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
- out_msg.AckCount := out_msg.AckCount + 1;
- }
- }
- }
- }
-
- action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.AckCount := 0;
- }
- }
- }
-
- action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
- assert(is_valid(tbe));
- assert(tbe.L1_GetS_IDs.count() > 0);
- enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
- assert(is_valid(tbe));
- assert(tbe.L1_GetS_IDs.count() == 1);
- enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
- enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
- assert(is_valid(tbe));
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(tbe.L1_GetX_ID);
- DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
- out_msg.DataBlk := cache_entry.DataBlk;
- DPRINTF(RubySlicc, "Address: %#x, Destination: %s, DataBlock: %s\n",
- out_msg.addr, out_msg.Destination, out_msg.DataBlk);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
- enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := machineID;
- out_msg.Destination := cache_entry.Sharers;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := cache_entry.Sharers;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
- }
-
- action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := cache_entry.Sharers;
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
- }
-
- // OTHER ACTIONS
- action(i_allocateTBE, "i", desc="Allocate TBE for request") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.L1_GetS_IDs.clear();
- tbe.DataBlk := cache_entry.DataBlk;
- tbe.Dirty := cache_entry.Dirty;
- tbe.pendingAcks := cache_entry.Sharers.count();
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
- Tick delay := L1RequestL2Network_in.dequeue(clockEdge());
- profileMsgDelay(0, ticksToCycles(delay));
- }
-
- action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
- Tick delay := L1unblockNetwork_in.dequeue(clockEdge());
- profileMsgDelay(0, ticksToCycles(delay));
- }
-
- action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
- Tick delay := responseL2Network_in.dequeue(clockEdge());
- profileMsgDelay(1, ticksToCycles(delay));
- }
-
- action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
- peek(responseL2Network_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- if (in_msg.Dirty) {
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
- peek(L1RequestL2Network_in, RequestMsg) {
- assert(is_valid(cache_entry));
- if (in_msg.Dirty) {
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(q_updateAck, "q", desc="update pending ack count") {
- peek(responseL2Network_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
- APPEND_TRANSITION_COMMENT(in_msg.AckCount);
- APPEND_TRANSITION_COMMENT(" p: ");
- APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
- }
- }
-
- action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
- peek(responseL2Network_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- }
- }
-
- action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
- peek(L1RequestL2Network_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.L1_GetS_IDs.add(in_msg.Requestor);
- }
- }
-
- action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
- peek(L1RequestL2Network_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.L1_GetX_ID := in_msg.Requestor;
- }
- }
-
- action(set_setMRU, "\set", desc="set the MRU entry") {
- L2cache.setMRU(address);
- }
-
- action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L2cache.allocate(address, new Entry));
- }
- }
-
- action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cache.deallocate(address);
- unset_cache_entry();
- }
-
- action(t_sendWBAck, "t", desc="Send writeback ACK") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:WB_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
- peek(L1RequestL2Network_in, RequestMsg) {
- enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- // upgrader doesn't get ack from itself, hence the + 1
- out_msg.AckCount := 0 - cache_entry.Sharers.count() + 1;
- }
- }
- }
-
- action(uu_profileMiss, "\um", desc="Profile the demand miss") {
- ++L2cache.demand_misses;
- }
-
- action(uu_profileHit, "\uh", desc="Profile the demand hit") {
- ++L2cache.demand_hits;
- }
-
- action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
- peek(L1RequestL2Network_in, RequestMsg) {
- assert(is_valid(cache_entry));
- addSharer(address, in_msg.Requestor, cache_entry);
- APPEND_TRANSITION_COMMENT( cache_entry.Sharers );
- }
- }
-
- action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- addSharer(address, in_msg.Sender, cache_entry);
- }
- }
-
- action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
- peek(L1RequestL2Network_in, RequestMsg) {
- assert(is_valid(cache_entry));
- cache_entry.Sharers.remove(in_msg.Requestor);
- }
- }
-
- action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
- peek(L1RequestL2Network_in, RequestMsg) {
- assert(is_valid(cache_entry));
- cache_entry.Sharers.clear();
- }
- }
-
- action(mm_markExclusive, "\m", desc="set the exclusive owner") {
- peek(L1RequestL2Network_in, RequestMsg) {
- assert(is_valid(cache_entry));
- cache_entry.Sharers.clear();
- cache_entry.Exclusive := in_msg.Requestor;
- addSharer(address, in_msg.Requestor, cache_entry);
- }
- }
-
- action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
- peek(L1unblockNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.Sharers.clear();
- cache_entry.Exclusive := in_msg.Sender;
- addSharer(address, in_msg.Sender, cache_entry);
- }
- }
-
- action(zz_stallAndWaitL1RequestQueue, "zz", desc="recycle L1 request queue") {
- stall_and_wait(L1RequestL2Network_in, address);
- }
-
- action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
- responseL2Network_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
- wakeUpBuffers(address);
- }
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
-
- //===============================================
- // BASE STATE - I
-
- // Transitions from I (Idle)
- transition({NP, IS, ISS, IM, SS, M, M_I, I_I, S_I, MT_IB, MT_SB}, L1_PUTX) {
- t_sendWBAck;
- jj_popL1RequestQueue;
- }
-
- transition({NP, SS, M, MT, M_I, I_I, S_I, IS, ISS, IM, MT_IB, MT_SB}, L1_PUTX_old) {
- t_sendWBAck;
- jj_popL1RequestQueue;
- }
-
- transition({IM, IS, ISS, SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L2_Replacement, L2_Replacement_clean}) {
- zz_stallAndWaitL1RequestQueue;
- }
-
- transition({IM, IS, ISS, SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, MEM_Inv) {
- zn_recycleResponseNetwork;
- }
-
- transition({I_I, S_I, M_I, MT_I, MCT_I, NP}, MEM_Inv) {
- o_popIncomingResponseQueue;
- }
-
-
- transition({SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE}) {
- zz_stallAndWaitL1RequestQueue;
- }
-
-
- transition(NP, L1_GETS, ISS) {
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- nn_addSharer;
- i_allocateTBE;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- transition(NP, L1_GET_INSTR, IS) {
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- nn_addSharer;
- i_allocateTBE;
- ss_recordGetSL1ID;
- a_issueFetchToMemory;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- transition(NP, L1_GETX, IM) {
- qq_allocateL2CacheBlock;
- ll_clearSharers;
- // nn_addSharer;
- i_allocateTBE;
- xx_recordGetXL1ID;
- a_issueFetchToMemory;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
-
- // transitions from IS/IM
-
- transition(ISS, Mem_Data, MT_MB) {
- m_writeDataToCache;
- ex_sendExclusiveDataToGetSRequestors;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(IS, Mem_Data, SS) {
- m_writeDataToCache;
- e_sendDataToGetSRequestors;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IM, Mem_Data, MT_MB) {
- m_writeDataToCache;
- ee_sendDataToGetXRequestor;
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- }
-
- transition({IS, ISS}, {L1_GETS, L1_GET_INSTR}, IS) {
- nn_addSharer;
- ss_recordGetSL1ID;
- uu_profileMiss;
- jj_popL1RequestQueue;
- }
-
- transition({IS, ISS}, L1_GETX) {
- zz_stallAndWaitL1RequestQueue;
- }
-
- transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR}) {
- zz_stallAndWaitL1RequestQueue;
- }
-
- // transitions from SS
- transition(SS, {L1_GETS, L1_GET_INSTR}) {
- ds_sendSharedDataToRequestor;
- nn_addSharer;
- set_setMRU;
- uu_profileHit;
- jj_popL1RequestQueue;
- }
-
-
- transition(SS, L1_GETX, SS_MB) {
- d_sendDataToRequestor;
- // fw_sendFwdInvToSharers;
- fwm_sendFwdInvToSharersMinusRequestor;
- set_setMRU;
- uu_profileHit;
- jj_popL1RequestQueue;
- }
-
- transition(SS, L1_UPGRADE, SS_MB) {
- fwm_sendFwdInvToSharersMinusRequestor;
- ts_sendInvAckToUpgrader;
- set_setMRU;
- uu_profileHit;
- jj_popL1RequestQueue;
- }
-
- transition(SS, L2_Replacement_clean, I_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SS, {L2_Replacement, MEM_Inv}, S_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
-
- transition(M, L1_GETX, MT_MB) {
- d_sendDataToRequestor;
- set_setMRU;
- uu_profileHit;
- jj_popL1RequestQueue;
- }
-
- transition(M, L1_GET_INSTR, SS) {
- d_sendDataToRequestor;
- nn_addSharer;
- set_setMRU;
- uu_profileHit;
- jj_popL1RequestQueue;
- }
-
- transition(M, L1_GETS, MT_MB) {
- dd_sendExclusiveDataToRequestor;
- set_setMRU;
- uu_profileHit;
- jj_popL1RequestQueue;
- }
-
- transition(M, {L2_Replacement, MEM_Inv}, M_I) {
- i_allocateTBE;
- c_exclusiveReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement_clean, M_I) {
- i_allocateTBE;
- c_exclusiveCleanReplacement;
- rr_deallocateL2CacheBlock;
- }
-
-
- // transitions from MT
-
- transition(MT, L1_GETX, MT_MB) {
- b_forwardRequestToExclusive;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
-
- transition(MT, {L1_GETS, L1_GET_INSTR}, MT_IIB) {
- b_forwardRequestToExclusive;
- uu_profileMiss;
- set_setMRU;
- jj_popL1RequestQueue;
- }
-
- transition(MT, {L2_Replacement, MEM_Inv}, MT_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L2_Replacement_clean, MCT_I) {
- i_allocateTBE;
- f_sendInvToSharers;
- rr_deallocateL2CacheBlock;
- }
-
- transition(MT, L1_PUTX, M) {
- ll_clearSharers;
- mr_writeDataToCacheFromRequest;
- t_sendWBAck;
- jj_popL1RequestQueue;
- }
-
- transition({SS_MB,MT_MB}, Exclusive_Unblock, MT) {
- // update actual directory
- mmu_markExclusiveFromUnblock;
- k_popUnblockQueue;
- kd_wakeUpDependents;
- }
-
- transition(MT_IIB, {L1_PUTX, L1_PUTX_old}){
- zz_stallAndWaitL1RequestQueue;
- }
-
- transition(MT_IIB, Unblock, MT_IB) {
- nnu_addSharerFromUnblock;
- k_popUnblockQueue;
- }
-
- transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- }
-
- transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
- m_writeDataToCache;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(MT_SB, Unblock, SS) {
- nnu_addSharerFromUnblock;
- k_popUnblockQueue;
- kd_wakeUpDependents;
- }
-
- // writeback states
- transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR}) {
- zz_stallAndWaitL1RequestQueue;
- }
-
- transition(I_I, Ack) {
- q_updateAck;
- o_popIncomingResponseQueue;
- }
-
- transition(I_I, Ack_all, M_I) {
- c_exclusiveCleanReplacement;
- o_popIncomingResponseQueue;
- }
-
- transition({MT_I, MCT_I}, WB_Data, M_I) {
- qq_writeDataToTBE;
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(MCT_I, {WB_Data_clean, Ack_all}, M_I) {
- c_exclusiveCleanReplacement;
- o_popIncomingResponseQueue;
- }
-
- transition(MCT_I, {L1_PUTX, L1_PUTX_old}){
- zz_stallAndWaitL1RequestQueue;
- }
-
- // L1 never changed Dirty data
- transition(MT_I, {WB_Data_clean, Ack_all}, M_I) {
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(MT_I, {L1_PUTX, L1_PUTX_old}){
- zz_stallAndWaitL1RequestQueue;
- }
-
- // possible race between unblock and immediate replacement
- transition({MT_MB,SS_MB}, {L1_PUTX, L1_PUTX_old}) {
- zz_stallAndWaitL1RequestQueue;
- }
-
- transition(S_I, Ack) {
- q_updateAck;
- o_popIncomingResponseQueue;
- }
-
- transition(S_I, Ack_all, M_I) {
- ct_exclusiveReplacementFromTBE;
- o_popIncomingResponseQueue;
- }
-
- transition(M_I, Mem_Ack, NP) {
- s_deallocateTBE;
- o_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:Directory, "MESI Two Level directory protocol")
- : DirectoryMemory * directory;
- Cycles to_mem_ctrl_latency := 1;
- Cycles directory_latency := 6;
-
- MessageBuffer * requestToDir, network="From", virtual_network="0",
- vnet_type="request";
- MessageBuffer * responseToDir, network="From", virtual_network="1",
- vnet_type="response";
- MessageBuffer * responseFromDir, network="To", virtual_network="1",
- vnet_type="response";
-
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
- ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
- ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
-
- M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
- IM, AccessPermission:Busy, desc="Intermediate State I>M";
- MI, AccessPermission:Busy, desc="Intermediate State M>I";
- M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
- M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
- M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
- M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- Fetch, desc="A memory fetch arrives";
- Data, desc="writeback data arrives";
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
-//added by SS for dma
- DMA_READ, desc="A DMA Read memory request";
- DMA_WRITE, desc="A DMA Write memory request";
- CleanReplacement, desc="Clean Replacement in L2 cache";
-
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- MachineID Owner;
- }
-
- // TBE entries for DMA requests
- structure(TBE, desc="TBE entries for outstanding DMA requests") {
- Addr PhysicalAddress, desc="physical address";
- State TBEState, desc="Transient State";
- DataBlock DataBlk, desc="Data to be written (DMA write only)";
- int Len, desc="...";
- MachineID Requestor, desc="The DMA engine that sent the request";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- bool functionalRead(Packet *pkt);
- int functionalWrite(Packet *pkt);
- }
-
-
- // ** OBJECTS **
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
- void set_tbe(TBE tbe);
- void unset_tbe();
- void wakeUpBuffers(Addr a);
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
-
- if (is_valid(dir_entry)) {
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- State getState(TBE tbe, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (directory.isPresent(addr)) {
- return getDirectoryEntry(addr).DirectoryState;
- } else {
- return State:I;
- }
- }
-
- void setState(TBE tbe, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (directory.isPresent(addr)) {
- getDirectoryEntry(addr).DirectoryState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
- return Directory_State_to_permission(tbe.TBEState);
- }
-
- if(directory.isPresent(addr)) {
- DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- void setAccessPermission(Addr addr, State state) {
- if (directory.isPresent(addr)) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
- }
-
- bool isGETRequest(CoherenceRequestType type) {
- return (type == CoherenceRequestType:GETS) ||
- (type == CoherenceRequestType:GET_INSTR) ||
- (type == CoherenceRequestType:GETX);
- }
-
- // ** OUT_PORTS **
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
-
- // ** IN_PORTS **
-
- in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (isGETRequest(in_msg.Type)) {
- trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
- trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
- trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
-
- // Actions
- action(a_sendAck, "a", desc="Send ack to L2") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:MEMORY_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Sender);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:MEMORY_DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
-
- Entry e := getDirectoryEntry(in_msg.addr);
- e.Owner := in_msg.OriginalRequestorMachId;
- }
- }
- }
-
- // Actions
- action(aa_sendAck, "aa", desc="Send ack to L2") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:MEMORY_ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
- wakeUpBuffers(address);
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestNetwork_in, RequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
- }
- }
-
- action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
- peek(responseNetwork_in, ResponseMsg) {
- queueMemoryWrite(in_msg.Sender, address, to_mem_ctrl_latency,
- in_msg.DataBlk);
- }
- }
-
-//added by SS for dma
- action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
- peek(requestNetwork_in, RequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
- }
- }
-
- action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
- out_msg.Destination.add(tbe.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(qw_queueMemoryWBRequest_partial, "qwp",
- desc="Queue off-chip writeback request") {
- peek(requestNetwork_in, RequestMsg) {
- queueMemoryWritePartial(machineID, address, to_mem_ctrl_latency,
- in_msg.DataBlk, in_msg.Len);
- }
- }
-
- action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
- enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Destination.add(tbe.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
- stall_and_wait(requestNetwork_in, address);
- }
-
- action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:INV;
- out_msg.Sender := machineID;
- out_msg.Destination.add(getDirectoryEntry(address).Owner);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
-
- action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
- out_msg.Destination.add(tbe.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE") {
- peek(requestNetwork_in, RequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.DataBlk := in_msg.DataBlk;
- tbe.PhysicalAddress := in_msg.addr;
- tbe.Len := in_msg.Len;
- tbe.Requestor := in_msg.Requestor;
- }
- }
-
- action(qw_queueMemoryWBRequest_partialTBE, "qwt",
- desc="Queue off-chip writeback request") {
- peek(responseNetwork_in, ResponseMsg) {
- queueMemoryWritePartial(in_msg.Sender, tbe.PhysicalAddress,
- to_mem_ctrl_latency, tbe.DataBlk, tbe.Len);
- }
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
-
- // TRANSITIONS
-
- transition(I, Fetch, IM) {
- qf_queueMemoryFetchRequest;
- j_popIncomingRequestQueue;
- }
-
- transition(M, Fetch) {
- inv_sendCacheInvalidate;
- z_stallAndWaitRequest;
- }
-
- transition(IM, Memory_Data, M) {
- d_sendData;
- l_popMemQueue;
- kd_wakeUpDependents;
- }
-//added by SS
- transition(M, CleanReplacement, I) {
- a_sendAck;
- k_popIncomingResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(M, Data, MI) {
- qw_queueMemoryWBRequest;
- k_popIncomingResponseQueue;
- }
-
- transition(MI, Memory_Ack, I) {
- aa_sendAck;
- l_popMemQueue;
- kd_wakeUpDependents;
- }
-
-
-//added by SS for dma support
- transition(I, DMA_READ, ID) {
- v_allocateTBE;
- qf_queueMemoryFetchRequestDMA;
- j_popIncomingRequestQueue;
- }
-
- transition(ID, Memory_Data, I) {
- dr_sendDMAData;
- w_deallocateTBE;
- l_popMemQueue;
- kd_wakeUpDependents;
- }
-
- transition(I, DMA_WRITE, ID_W) {
- v_allocateTBE;
- qw_queueMemoryWBRequest_partial;
- j_popIncomingRequestQueue;
- }
-
- transition(ID_W, Memory_Ack, I) {
- da_sendDMAAck;
- w_deallocateTBE;
- l_popMemQueue;
- kd_wakeUpDependents;
- }
-
- transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
- z_stallAndWaitRequest;
- }
-
- transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
- zz_recycleDMAQueue;
- }
-
-
- transition(M, DMA_READ, M_DRD) {
- v_allocateTBE;
- inv_sendCacheInvalidate;
- j_popIncomingRequestQueue;
- }
-
- transition(M_DRD, Data, M_DRDI) {
- drp_sendDMAData;
- w_deallocateTBE;
- qw_queueMemoryWBRequest;
- k_popIncomingResponseQueue;
- }
-
- transition(M_DRDI, Memory_Ack, I) {
- aa_sendAck;
- l_popMemQueue;
- kd_wakeUpDependents;
- }
-
- transition(M, DMA_WRITE, M_DWR) {
- v_allocateTBE;
- inv_sendCacheInvalidate;
- j_popIncomingRequestQueue;
- }
-
- transition(M_DWR, Data, M_DWRI) {
- qw_queueMemoryWBRequest_partialTBE;
- k_popIncomingResponseQueue;
- }
-
- transition(M_DWRI, Memory_Ack, I) {
- aa_sendAck;
- da_sendDMAAck;
- w_deallocateTBE;
- l_popMemQueue;
- kd_wakeUpDependents;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
- * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:DMA, "DMA Controller")
-: DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-
- MessageBuffer * responseFromDir, network="From", virtual_network="1",
- vnet_type="response";
- MessageBuffer * requestToDir, network="To", virtual_network="0",
- vnet_type="request";
- MessageBuffer * mandatoryQueue;
-{
- state_declaration(State, desc="DMA states", default="DMA_State_READY") {
- READY, AccessPermission:Invalid, desc="Ready to accept a new request";
- BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
- BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
- }
-
- enumeration(Event, desc="DMA events") {
- ReadRequest, desc="A new read request";
- WriteRequest, desc="A new write request";
- Data, desc="Data from a DMA memory read";
- Ack, desc="DMA write to memory completed";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Data";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
-
- TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- State getState(TBE tbe, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else {
- return State:READY;
- }
- }
-
- void setState(TBE tbe, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("DMA does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("DMA does not support functional write.");
- }
-
- out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
-
- in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, SequencerMsg) {
- if (in_msg.Type == SequencerRequestType:LD ) {
- trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == SequencerRequestType:ST) {
- trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid request type");
- }
- }
- }
- }
-
- in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady(clockEdge())) {
- peek( dmaResponseQueue_in, ResponseMsg) {
- if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else if (in_msg.Type == CoherenceResponseType:DATA) {
- trigger(Event:Data, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else {
- error("Invalid response type");
- }
- }
- }
- }
-
- action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(requestToDir_out, RequestMsg, request_latency) {
- out_msg.addr := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:DMA_READ;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(requestToDir_out, RequestMsg, request_latency) {
- out_msg.addr := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:DMA_WRITE;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
- dma_sequencer.ackCallback(address);
- }
-
- action(d_dataCallback, "d", desc="Write data to dma sequencer") {
- dma_sequencer.dataCallback(tbe.DataBlk, address);
- }
-
- action(t_updateTBEData, "t", desc="Update TBE Data") {
- assert(is_valid(tbe));
- peek( dmaResponseQueue_in, ResponseMsg) {
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE entry") {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue(clockEdge());
- }
-
- action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
- stall_and_wait(dmaRequestQueue_in, address);
- }
-
- action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
- wakeUpAllBuffers();
- }
-
- transition(READY, ReadRequest, BUSY_RD) {
- v_allocateTBE;
- s_sendReadRequest;
- p_popRequestQueue;
- }
-
- transition(READY, WriteRequest, BUSY_WR) {
- v_allocateTBE;
- s_sendWriteRequest;
- p_popRequestQueue;
- }
-
- transition(BUSY_RD, Data, READY) {
- t_updateTBEData;
- d_dataCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition(BUSY_WR, Ack, READY) {
- a_ackCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
- zz_stallAndWaitRequestQueue;
- }
-
-}
+++ /dev/null
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- GETX, desc="Get eXclusive";
- UPGRADE, desc="UPGRADE to exclusive";
- GETS, desc="Get Shared";
- GET_INSTR, desc="Get Instruction";
- INV, desc="INValidate";
- PUTX, desc="Replacement message";
-
- WB_ACK, desc="Writeback ack";
-
- DMA_READ, desc="DMA Read";
- DMA_WRITE, desc="DMA Write";
-}
-
-// CoherenceResponseType
-enumeration(CoherenceResponseType, desc="...") {
- MEMORY_ACK, desc="Ack from memory controller";
- DATA, desc="Data block for L1 cache in S state";
- DATA_EXCLUSIVE, desc="Data block for L1 cache in M/E state";
- MEMORY_DATA, desc="Data block from / to main memory";
- ACK, desc="Generic invalidate ack";
- WB_ACK, desc="writeback ack";
- UNBLOCK, desc="unblock";
- EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
- INV, desc="Invalidate from directory";
-}
-
-// RequestMsg
-structure(RequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- MachineID Requestor , desc="What component request";
- NetDest Destination, desc="What components receive the request, includes MachineType and num";
- MessageSizeType MessageSize, desc="size category of the message";
- DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
- int Len;
- bool Dirty, default="false", desc="Dirty bit";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
-
- bool functionalRead(Packet *pkt) {
- // Only PUTX messages contains the data block
- if (Type == CoherenceRequestType:PUTX) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-// ResponseMsg
-structure(ResponseMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
- MachineID Sender, desc="What component sent the data";
- NetDest Destination, desc="Node to whom the data is sent";
- DataBlock DataBlk, desc="Data for the cache line";
- bool Dirty, default="false", desc="Dirty bit";
- int AckCount, default="0", desc="number of acks in this message";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- // Valid data block is only present in message with following types
- if (Type == CoherenceResponseType:DATA ||
- Type == CoherenceResponseType:DATA_EXCLUSIVE ||
- Type == CoherenceResponseType:MEMORY_DATA) {
-
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
+++ /dev/null
-protocol "MESI_Two_Level";
-include "RubySlicc_interfaces.slicc";
-include "MESI_Two_Level-msg.sm";
-include "MESI_Two_Level-L1cache.sm";
-include "MESI_Two_Level-L2cache.sm";
-include "MESI_Two_Level-dir.sm";
-include "MESI_Two_Level-dma.sm";
+++ /dev/null
-/*
- * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
- * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L1Cache, "MI Example L1 Cache")
- : Sequencer * sequencer;
- CacheMemory * cacheMemory;
- Cycles cache_response_latency := 12;
- Cycles issue_latency := 2;
- bool send_evictions;
-
- // NETWORK BUFFERS
- MessageBuffer * requestFromCache, network="To", virtual_network="2",
- vnet_type="request";
- MessageBuffer * responseFromCache, network="To", virtual_network="4",
- vnet_type="response";
-
- MessageBuffer * forwardToCache, network="From", virtual_network="3",
- vnet_type="forward";
- MessageBuffer * responseToCache, network="From", virtual_network="4",
- vnet_type="response";
-
- MessageBuffer * mandatoryQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states") {
- I, AccessPermission:Invalid, desc="Not Present/Invalid";
- II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
- M, AccessPermission:Read_Write, desc="Modified";
- MI, AccessPermission:Busy, desc="Modified, issued PUT";
- MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
-
- IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
- IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- // From processor
-
- Load, desc="Load request from processor";
- Ifetch, desc="Ifetch request from processor";
- Store, desc="Store request from processor";
-
- Data, desc="Data from network";
- Fwd_GETX, desc="Forward from network";
-
- Inv, desc="Invalidate request from dir";
-
- Replacement, desc="Replace a block";
- Writeback_Ack, desc="Ack from the directory for a writeback";
- Writeback_Nack, desc="Nack from the directory for a writeback";
- }
-
- // STRUCTURE DEFINITIONS
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- DataBlock DataBlk, desc="Data in the block";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
-
- // STRUCTURES
- TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
-
- // PROTOTYPES
- Tick clockEdge();
- Cycles ticksToCycles(Tick t);
- void set_cache_entry(AbstractCacheEntry a);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void profileMsgDelay(int virtualNetworkType, Cycles b);
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", cacheMemory.lookup(address));
- }
-
- // FUNCTIONS
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Load;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Ifetch;
- } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
- return Event:Store;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
-
- if (is_valid(tbe)) {
- return tbe.TBEState;
- }
- else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- else {
- return State:I;
- }
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
-
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- return L1Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return L1Cache_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L1Cache_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- // NETWORK PORTS
-
- out_port(requestNetwork_out, RequestMsg, requestFromCache);
- out_port(responseNetwork_out, ResponseMsg, responseFromCache);
-
- in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
- if (forwardRequestNetwork_in.isReady(clockEdge())) {
- peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
- }
- else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
- trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
- }
- else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
- trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
- }
- else if (in_msg.Type == CoherenceRequestType:INV) {
- trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
- }
- else {
- error("Unexpected message");
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseToCache) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if (in_msg.Type == CoherenceResponseType:DATA) {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- }
- else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Mandatory Queue
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
-
- Entry cache_entry := getCacheEntry(in_msg.LineAddress);
- if (is_invalid(cache_entry) &&
- cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
- // make room for the block
- // Check if the line we want to evict is not locked
- Addr addr := cacheMemory.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, addr);
- trigger(Event:Replacement, addr,
- getCacheEntry(addr),
- TBEs[addr]);
- }
- else {
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- cache_entry, TBEs[in_msg.LineAddress]);
- }
- }
- }
- }
-
- // ACTIONS
-
- action(a_issueRequest, "a", desc="Issue a request") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Control;
- }
- }
-
- action(b_issuePUT, "b", desc="Issue a PUT request") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTX;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Data;
- }
- }
-
- action(e_sendData, "e", desc="Send data from cache to requestor") {
- peek(forwardRequestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
- peek(forwardRequestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
- if (is_valid(cache_entry)) {
- } else {
- set_cache_entry(cacheMemory.allocate(address, new Entry));
- }
- }
-
- action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
- if (is_valid(cache_entry)) {
- cacheMemory.deallocate(address);
- unset_cache_entry();
- }
- }
-
- action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(n_popResponseQueue, "n", desc="Pop the response queue") {
- Tick delay := responseNetwork_in.dequeue(clockEdge());
- profileMsgDelay(1, ticksToCycles(delay));
- }
-
- action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
- Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
- profileMsgDelay(2, ticksToCycles(delay));
- }
-
- action(p_profileMiss, "pi", desc="Profile cache miss") {
- ++cacheMemory.demand_misses;
- }
-
- action(p_profileHit, "ph", desc="Profile cache miss") {
- ++cacheMemory.demand_hits;
- }
-
- action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false);
- }
-
- action(rx_load_hit, "rx", desc="External load completed.") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, true,
- machineIDToMachineType(in_msg.Sender));
- }
- }
-
- action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk, false);
- }
-
- action(sx_store_hit, "sx", desc="External store completed.") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk, true,
- machineIDToMachineType(in_msg.Sender));
- }
- }
-
- action(u_writeDataToCache, "u", desc="Write data to the cache") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE") {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
- assert(is_valid(cache_entry));
- assert(is_valid(tbe));
- tbe.DataBlk := cache_entry.DataBlk;
- }
-
- action(z_stall, "z", desc="stall") {
- // do nothing
- }
-
- // TRANSITIONS
-
- transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
- z_stall;
- }
-
- transition({IS, IM}, {Fwd_GETX, Inv}) {
- z_stall;
- }
-
- transition(MI, Inv) {
- o_popForwardedRequestQueue;
- }
-
- transition(M, Store) {
- s_store_hit;
- p_profileHit;
- m_popMandatoryQueue;
- }
-
- transition(M, {Load, Ifetch}) {
- r_load_hit;
- p_profileHit;
- m_popMandatoryQueue;
- }
-
- transition(I, Inv) {
- o_popForwardedRequestQueue;
- }
-
- transition(I, Store, IM) {
- v_allocateTBE;
- i_allocateL1CacheBlock;
- a_issueRequest;
- p_profileMiss;
- m_popMandatoryQueue;
- }
-
- transition(I, {Load, Ifetch}, IS) {
- v_allocateTBE;
- i_allocateL1CacheBlock;
- a_issueRequest;
- p_profileMiss;
- m_popMandatoryQueue;
- }
-
- transition(IS, Data, M) {
- u_writeDataToCache;
- rx_load_hit;
- w_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition(IM, Data, M) {
- u_writeDataToCache;
- sx_store_hit;
- w_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition(M, Fwd_GETX, I) {
- e_sendData;
- forward_eviction_to_cpu;
- o_popForwardedRequestQueue;
- }
-
- transition(I, Replacement) {
- h_deallocateL1CacheBlock;
- }
-
- transition(M, {Replacement,Inv}, MI) {
- v_allocateTBE;
- b_issuePUT;
- x_copyDataFromCacheToTBE;
- forward_eviction_to_cpu;
- h_deallocateL1CacheBlock;
- }
-
- transition(MI, Writeback_Ack, I) {
- w_deallocateTBE;
- o_popForwardedRequestQueue;
- }
-
- transition(MI, Fwd_GETX, II) {
- ee_sendDataFromTBE;
- o_popForwardedRequestQueue;
- }
-
- transition(MI, Writeback_Nack, MII) {
- o_popForwardedRequestQueue;
- }
-
- transition(MII, Fwd_GETX, I) {
- ee_sendDataFromTBE;
- w_deallocateTBE;
- o_popForwardedRequestQueue;
- }
-
- transition(II, Writeback_Nack, I) {
- w_deallocateTBE;
- o_popForwardedRequestQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
- * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:Directory, "Directory protocol")
- : DirectoryMemory * directory;
- Cycles directory_latency := 12;
- Cycles to_memory_controller_latency := 1;
-
- MessageBuffer * forwardFromDir, network="To", virtual_network="3",
- vnet_type="forward";
- MessageBuffer * responseFromDir, network="To", virtual_network="4",
- vnet_type="response";
- MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
- vnet_type="response";
-
- MessageBuffer * requestToDir, network="From", virtual_network="2",
- vnet_type="request";
- MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
- vnet_type="request";
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, AccessPermission:Read_Write, desc="Invalid";
- M, AccessPermission:Invalid, desc="Modified";
-
- M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
- M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
-
- M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
- M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
-
- IM, AccessPermission:Busy, desc="Intermediate state I-->M";
- MI, AccessPermission:Busy, desc="Intermediate state M-->I";
- ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
- ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- // processor requests
- GETX, desc="A GETX arrives";
- GETS, desc="A GETS arrives";
- PUTX, desc="A PUTX arrives";
- PUTX_NotOwner, desc="A PUTX arrives";
-
- // DMA requests
- DMA_READ, desc="A DMA Read memory request";
- DMA_WRITE, desc="A DMA Write memory request";
-
- // Memory Controller
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- NetDest Sharers, desc="Sharers for this block";
- NetDest Owner, desc="Owner of this block";
- }
-
- // TBE entries for DMA requests
- structure(TBE, desc="TBE entries for outstanding DMA requests") {
- Addr PhysicalAddress, desc="physical address";
- State TBEState, desc="Transient State";
- DataBlock DataBlk, desc="Data to be written (DMA write only)";
- int Len, desc="...";
- MachineID DmaRequestor, desc="DMA requestor";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- // ** OBJECTS **
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- Cycles ticksToCycles(Tick t);
- Tick cyclesToTicks(Cycles c);
- void set_tbe(TBE b);
- void unset_tbe();
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
-
- if (is_valid(dir_entry)) {
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- State getState(TBE tbe, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (directory.isPresent(addr)) {
- return getDirectoryEntry(addr).DirectoryState;
- } else {
- return State:I;
- }
- }
-
- void setState(TBE tbe, Addr addr, State state) {
-
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (directory.isPresent(addr)) {
-
- if (state == State:M) {
- assert(getDirectoryEntry(addr).Owner.count() == 1);
- assert(getDirectoryEntry(addr).Sharers.count() == 0);
- }
-
- getDirectoryEntry(addr).DirectoryState := state;
-
- if (state == State:I) {
- assert(getDirectoryEntry(addr).Owner.count() == 0);
- assert(getDirectoryEntry(addr).Sharers.count() == 0);
- }
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- return Directory_State_to_permission(tbe.TBEState);
- }
-
- if(directory.isPresent(addr)) {
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- if (directory.isPresent(addr)) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- // ** OUT_PORTS **
- out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
- out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
-
- // ** IN_PORTS **
- in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- TBE tbe := TBEs[in_msg.LineAddress];
- if (in_msg.Type == DMARequestType:READ) {
- trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
- } else if (in_msg.Type == DMARequestType:WRITE) {
- trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady(clockEdge())) {
- peek(requestQueue_in, RequestMsg) {
- TBE tbe := TBEs[in_msg.addr];
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, tbe);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, tbe);
- } else if (in_msg.Type == CoherenceRequestType:PUTX) {
- if (getDirectoryEntry(in_msg.addr).Owner.isElement(in_msg.Requestor)) {
- trigger(Event:PUTX, in_msg.addr, tbe);
- } else {
- trigger(Event:PUTX_NotOwner, in_msg.addr, tbe);
- }
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
-//added by SS
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs[in_msg.addr];
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, tbe);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, tbe);
- } else {
- DPRINTF(RubySlicc,"%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WB_ACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(forwardNetwork_out, RequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WB_ACK;
- out_msg.Requestor := in_msg.OriginalRequestorMachId;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WB_NACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(c_clearOwner, "c", desc="Clear the owner field") {
- getDirectoryEntry(address).Owner.clear();
- }
-
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- assert(is_valid(tbe));
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:DATA;
- out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
-
-
- action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- assert(is_valid(tbe));
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:DATA;
-
- // we send the entire data block and rely on the dma controller
- // to split it up if need be
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- assert(is_valid(tbe));
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:ACK;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
- peek(requestQueue_in, RequestMsg) {
- getDirectoryEntry(address).Owner.clear();
- getDirectoryEntry(address).Owner.add(in_msg.Requestor);
- }
- }
-
- action(f_forwardRequest, "f", desc="Forward request to owner") {
- peek(requestQueue_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT("Own: ");
- APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.addr).Owner);
- APPEND_TRANSITION_COMMENT("Req: ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := getDirectoryEntry(in_msg.addr).Owner;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := machineID;
- out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue(clockEdge());
- }
-
- action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.DataBlk := in_msg.DataBlk;
- tbe.PhysicalAddress := in_msg.PhysicalAddress;
- tbe.Len := in_msg.Len;
- tbe.DmaRequestor := in_msg.Requestor;
- }
- }
-
- action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.DmaRequestor := in_msg.Requestor;
- }
- }
-
- action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
- peek(requestQueue_in, RequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(z_recycleRequestQueue, "z", desc="recycle request queue") {
- requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestQueue_in, RequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
- }
- }
-
- action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
- }
- }
-
- action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- queueMemoryWritePartial(in_msg.Requestor, address,
- to_memory_controller_latency, in_msg.DataBlk,
- in_msg.Len);
- }
- }
-
- action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
- peek(requestQueue_in, RequestMsg) {
- queueMemoryWritePartial(in_msg.Requestor, address,
- to_memory_controller_latency, tbe.DataBlk,
- tbe.Len);
- }
- }
-
- action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
- peek(requestQueue_in, RequestMsg) {
- queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
-
- action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- // TRANSITIONS
- transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
- z_recycleRequestQueue;
- }
-
- transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
- z_recycleRequestQueue;
- }
-
- transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
- y_recycleDMARequestQueue;
- }
-
-
- transition(I, GETX, IM) {
- //d_sendData;
- v_allocateTBEFromRequestNet;
- qf_queueMemoryFetchRequest;
- e_ownerIsRequestor;
- i_popIncomingRequestQueue;
- }
-
- transition(IM, Memory_Data, M) {
- d_sendData;
- //e_ownerIsRequestor;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
-
- transition(I, DMA_READ, ID) {
- //dr_sendDMAData;
- r_allocateTbeForDmaRead;
- qf_queueMemoryFetchRequestDMA;
- p_popIncomingDMARequestQueue;
- }
-
- transition(ID, Memory_Data, I) {
- dr_sendDMAData;
- //p_popIncomingDMARequestQueue;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
-
-
- transition(I, DMA_WRITE, ID_W) {
- v_allocateTBE;
- qw_queueMemoryWBRequest_partial;
- p_popIncomingDMARequestQueue;
- }
-
- transition(ID_W, Memory_Ack, I) {
- da_sendDMAAck;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
- transition(M, DMA_READ, M_DRD) {
- v_allocateTBE;
- inv_sendCacheInvalidate;
- p_popIncomingDMARequestQueue;
- }
-
- transition(M_DRD, PUTX, M_DRDI) {
- drp_sendDMAData;
- c_clearOwner;
- l_queueMemoryWBRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(M_DRDI, Memory_Ack, I) {
- l_sendWriteBackAck;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
-
- transition(M, DMA_WRITE, M_DWR) {
- v_allocateTBE;
- inv_sendCacheInvalidate;
- p_popIncomingDMARequestQueue;
- }
-
- transition(M_DWR, PUTX, M_DWRI) {
- qw_queueMemoryWBRequest_partialTBE;
- c_clearOwner;
- i_popIncomingRequestQueue;
- }
-
- transition(M_DWRI, Memory_Ack, I) {
- l_sendWriteBackAck;
- da_sendDMAAck;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
- transition(M, GETX, M) {
- f_forwardRequest;
- e_ownerIsRequestor;
- i_popIncomingRequestQueue;
- }
-
- transition(M, PUTX, MI) {
- c_clearOwner;
- v_allocateTBEFromRequestNet;
- l_queueMemoryWBRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(MI, Memory_Ack, I) {
- l_sendWriteBackAck;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
- transition(M, PUTX_NotOwner, M) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- transition(I, PUTX_NotOwner, I) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
- * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:DMA, "DMA Controller")
- : DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-
- MessageBuffer * responseFromDir, network="From", virtual_network="1",
- vnet_type="response";
- MessageBuffer * requestToDir, network="To", virtual_network="0",
- vnet_type="request";
- MessageBuffer * mandatoryQueue;
-{
- state_declaration(State, desc="DMA states", default="DMA_State_READY") {
- READY, AccessPermission:Invalid, desc="Ready to accept a new request";
- BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
- BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
- }
-
- enumeration(Event, desc="DMA events") {
- ReadRequest, desc="A new read request";
- WriteRequest, desc="A new write request";
- Data, desc="Data from a DMA memory read";
- Ack, desc="DMA write to memory completed";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Data";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
-
- TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- State getState(TBE tbe, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else {
- return State:READY;
- }
- }
-
- void setState(TBE tbe, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("DMA does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("DMA does not support functional write.");
- }
-
- out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
-
- in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, SequencerMsg) {
- if (in_msg.Type == SequencerRequestType:LD ) {
- trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == SequencerRequestType:ST) {
- trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid request type");
- }
- }
- }
- }
-
- in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady(clockEdge())) {
- peek( dmaResponseQueue_in, DMAResponseMsg) {
- if (in_msg.Type == DMAResponseType:ACK) {
- trigger(Event:Ack, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == DMAResponseType:DATA) {
- trigger(Event:Data, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid response type");
- }
- }
- }
- }
-
- action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(requestToDir_out, DMARequestMsg, request_latency) {
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.LineAddress := in_msg.LineAddress;
- out_msg.Type := DMARequestType:READ;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(requestToDir_out, DMARequestMsg, request_latency) {
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.LineAddress := in_msg.LineAddress;
- out_msg.Type := DMARequestType:WRITE;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
- dma_sequencer.ackCallback(address);
- }
-
- action(d_dataCallback, "d", desc="Write data to dma sequencer") {
- dma_sequencer.dataCallback(tbe.DataBlk, address);
- }
-
- action(t_updateTBEData, "t", desc="Update TBE Data") {
- assert(is_valid(tbe));
- peek( dmaResponseQueue_in, DMAResponseMsg) {
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE entry") {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue(clockEdge());
- }
-
- action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
- stall_and_wait(dmaRequestQueue_in, address);
- }
-
- action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
- wakeUpAllBuffers();
- }
-
- transition(READY, ReadRequest, BUSY_RD) {
- v_allocateTBE;
- s_sendReadRequest;
- p_popRequestQueue;
- }
-
- transition(READY, WriteRequest, BUSY_WR) {
- v_allocateTBE;
- s_sendWriteRequest;
- p_popRequestQueue;
- }
-
- transition(BUSY_RD, Data, READY) {
- t_updateTBEData;
- d_dataCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition(BUSY_WR, Ack, READY) {
- a_ackCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
- zz_stallAndWaitRequestQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- GETX, desc="Get eXclusive";
- GETS, desc="Get Shared";
- PUTX, desc="Put eXclusive";
- WB_ACK, desc="Writeback ack";
- WB_NACK, desc="Writeback neg. ack";
- INV, desc="Invalidation";
-}
-
-// CoherenceResponseType
-enumeration(CoherenceResponseType, desc="...") {
- ACK, desc="ACKnowledgment, responder doesn't have a copy";
- DATA, desc="Data";
- DATA_EXCLUSIVE_CLEAN, desc="Data, no other processor has a copy, data is clean";
- DATA_EXCLUSIVE_DIRTY, desc="Data, no other processor has a copy, data is dirty";
- UNBLOCK, desc="Unblock";
- UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
- WRITEBACK_CLEAN, desc="Clean writeback (no data)";
- WRITEBACK_DIRTY, desc="Dirty writeback (contains data)";
- WRITEBACK, desc="Generic writeback (contains data)";
-}
-
-// RequestMsg (and also forwarded requests)
-structure(RequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Multicast destination mask";
- DataBlock DataBlk, desc="data for the cache line";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- // Valid data block is only present in PUTX messages
- if (Type == CoherenceRequestType:PUTX) {
- return testAndRead(addr, DataBlk, pkt);
- }
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should read
- // data block from only those messages that contain valid data
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-// ResponseMsg (and also unblock requests)
-structure(ResponseMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
- MachineID Sender, desc="Node who sent the data";
- NetDest Destination, desc="Node to whom the data is sent";
- DataBlock DataBlk, desc="data for the cache line";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- // A check on message type should appear here so that only those
- // messages that contain data
- return testAndRead(addr, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should read
- // data block from only those messages that contain valid data
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
- READ, desc="Memory Read";
- WRITE, desc="Memory Write";
- NULL, desc="Invalid";
-}
-
-enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
- DATA, desc="DATA read";
- ACK, desc="ACK write";
- NULL, desc="Invalid";
-}
-
-structure(DMARequestMsg, desc="...", interface="Message") {
- DMARequestType Type, desc="Request type (read/write)";
- Addr PhysicalAddress, desc="Physical address for this request";
- Addr LineAddress, desc="Line address for this request";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Destination";
- DataBlock DataBlk, desc="DataBlk attached to this request";
- int Len, desc="The length of the request";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- return testAndRead(LineAddress, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(LineAddress, DataBlk, pkt);
- }
-}
-
-structure(DMAResponseMsg, desc="...", interface="Message") {
- DMAResponseType Type, desc="Response type (DATA/ACK)";
- Addr PhysicalAddress, desc="Physical address for this request";
- Addr LineAddress, desc="Line address for this request";
- NetDest Destination, desc="Destination";
- DataBlock DataBlk, desc="DataBlk attached to this request";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- return testAndRead(LineAddress, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(LineAddress, DataBlk, pkt);
- }
-}
+++ /dev/null
-protocol "MI_example";
-include "RubySlicc_interfaces.slicc";
-include "MI_example-msg.sm";
-include "MI_example-cache.sm";
-include "MI_example-dir.sm";
-include "MI_example-dma.sm";
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:CorePair, "CP-like Core Coherence")
- : Sequencer * sequencer;
- Sequencer * sequencer1;
- CacheMemory * L1Icache;
- CacheMemory * L1D0cache;
- CacheMemory * L1D1cache;
- CacheMemory * L2cache; // func mem logic looks in this CacheMemory
- bool send_evictions := "False";
- Cycles issue_latency := 5; // time to send data down to NB
- Cycles l2_hit_latency := 18;
-
- // BEGIN Core Buffers
-
- // To the Network
- MessageBuffer * requestFromCore, network="To", virtual_network="0", vnet_type="request";
- MessageBuffer * responseFromCore, network="To", virtual_network="2", vnet_type="response";
- MessageBuffer * unblockFromCore, network="To", virtual_network="4", vnet_type="unblock";
-
- // From the Network
- MessageBuffer * probeToCore, network="From", virtual_network="0", vnet_type="request";
- MessageBuffer * responseToCore, network="From", virtual_network="2", vnet_type="response";
-
- MessageBuffer * mandatoryQueue;
-
- MessageBuffer * triggerQueue, ordered="true";
-
- // END Core Buffers
-
-{
- // BEGIN STATES
- state_declaration(State, desc="Cache states", default="CorePair_State_I") {
-
- // Base States
- I, AccessPermission:Invalid, desc="Invalid";
- S, AccessPermission:Read_Only, desc="Shared";
- E0, AccessPermission:Read_Write, desc="Exclusive with Cluster 0 ownership";
- E1, AccessPermission:Read_Write, desc="Exclusive with Cluster 1 ownership";
- Es, AccessPermission:Read_Write, desc="Exclusive in core";
- O, AccessPermission:Read_Only, desc="Owner state in core, both clusters and other cores may be sharing line";
- Ms, AccessPermission:Read_Write, desc="Modified in core, both clusters may be sharing line";
- M0, AccessPermission:Read_Write, desc="Modified with cluster ownership";
- M1, AccessPermission:Read_Write, desc="Modified with cluster ownership";
-
- // Transient States
- I_M0, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
- I_M1, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
- I_M0M1, AccessPermission:Busy, desc="Was in I_M0, got a store request from other cluster as well";
- I_M1M0, AccessPermission:Busy, desc="Was in I_M1, got a store request from other cluster as well";
- I_M0Ms, AccessPermission:Busy, desc="Was in I_M0, got a load request from other cluster as well";
- I_M1Ms, AccessPermission:Busy, desc="Was in I_M1, got a load request from other cluster as well";
- I_E0S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
- I_E1S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
- I_ES, AccessPermission:Busy, desc="S_F got hit by invalidating probe, RdBlk response needs to go to both clusters";
-
- IF_E0S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E0S but expecting a L2_to_L1D0 trigger, just drop when receive";
- IF_E1S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E1S but expecting a L2_to_L1D1 trigger, just drop when receive";
- IF_ES, AccessPermission:Busy, desc="same, but waiting for two fills";
- IF0_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
- IF1_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
- F_S0, AccessPermission:Busy, desc="same, but going to S0 when trigger received";
- F_S1, AccessPermission:Busy, desc="same, but going to S1 when trigger received";
-
- ES_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for clean writeback ack";
- MO_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for dirty writeback ack";
- MO_S0, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
- MO_S1, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
- S_F0, AccessPermission:Read_Only, desc="Shared, filling L1";
- S_F1, AccessPermission:Read_Only, desc="Shared, filling L1";
- S_F, AccessPermission:Read_Only, desc="Shared, filling L1";
- O_F0, AccessPermission:Read_Only, desc="Owned, filling L1";
- O_F1, AccessPermission:Read_Only, desc="Owned, filling L1";
- O_F, AccessPermission:Read_Only, desc="Owned, filling L1";
- Si_F0, AccessPermission:Read_Only, desc="Shared, filling icache";
- Si_F1, AccessPermission:Read_Only, desc="Shared, filling icache";
- S_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- S_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- O_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- O_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- S0, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 0, waiting for response";
- S1, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 1, waiting for response";
-
- Es_F0, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
- Es_F1, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
- Es_F, AccessPermission:Read_Write, desc="Es, other cluster read, filling";
- E0_F, AccessPermission:Read_Write, desc="E0, cluster read, filling";
- E1_F, AccessPermission:Read_Write, desc="...";
- E0_Es, AccessPermission:Read_Write, desc="...";
- E1_Es, AccessPermission:Read_Write, desc="...";
- Ms_F0, AccessPermission:Read_Write, desc="...";
- Ms_F1, AccessPermission:Read_Write, desc="...";
- Ms_F, AccessPermission:Read_Write, desc="...";
- M0_F, AccessPermission:Read_Write, desc="...";
- M0_Ms, AccessPermission:Read_Write, desc="...";
- M1_F, AccessPermission:Read_Write, desc="...";
- M1_Ms, AccessPermission:Read_Write, desc="...";
-
- I_C, AccessPermission:Invalid, desc="Invalid, but waiting for WBAck from NB from canceled writeback";
- S0_C, AccessPermission:Busy, desc="MO_S0 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
- S1_C, AccessPermission:Busy, desc="MO_S1 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
- S_C, AccessPermission:Busy, desc="S*_C got NB_AckS, still waiting for WBAck";
-
- } // END STATES
-
- // BEGIN EVENTS
- enumeration(Event, desc="CP Events") {
- // CP Initiated events
- C0_Load_L1miss, desc="Cluster 0 load, L1 missed";
- C0_Load_L1hit, desc="Cluster 0 load, L1 hit";
- C1_Load_L1miss, desc="Cluster 1 load L1 missed";
- C1_Load_L1hit, desc="Cluster 1 load L1 hit";
- Ifetch0_L1hit, desc="Instruction fetch, hit in the L1";
- Ifetch1_L1hit, desc="Instruction fetch, hit in the L1";
- Ifetch0_L1miss, desc="Instruction fetch, missed in the L1";
- Ifetch1_L1miss, desc="Instruction fetch, missed in the L1";
- C0_Store_L1miss, desc="Cluster 0 store missed in L1";
- C0_Store_L1hit, desc="Cluster 0 store hit in L1";
- C1_Store_L1miss, desc="Cluster 1 store missed in L1";
- C1_Store_L1hit, desc="Cluster 1 store hit in L1";
- // NB Initiated events
- NB_AckS, desc="NB Ack to Core Request";
- NB_AckM, desc="NB Ack to Core Request";
- NB_AckE, desc="NB Ack to Core Request";
-
- NB_AckWB, desc="NB Ack for writeback";
-
- // Memory System initiatied events
- L1I_Repl, desc="Replace address from L1I"; // Presumed clean
- L1D0_Repl, desc="Replace address from L1D0"; // Presumed clean
- L1D1_Repl, desc="Replace address from L1D1"; // Presumed clean
- L2_Repl, desc="Replace address from L2";
-
- L2_to_L1D0, desc="L1 fill from L2";
- L2_to_L1D1, desc="L1 fill from L2";
- L2_to_L1I, desc="L1 fill from L2";
-
- // Probe Events
- PrbInvData, desc="probe, return O or M data";
- PrbInv, desc="probe, no need for data";
- PrbShrData, desc="probe downgrade, return O or M data";
-
- } // END EVENTS
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- L1D0DataArrayRead, desc="Read the data array";
- L1D0DataArrayWrite, desc="Write the data array";
- L1D0TagArrayRead, desc="Read the data array";
- L1D0TagArrayWrite, desc="Write the data array";
- L1D1DataArrayRead, desc="Read the data array";
- L1D1DataArrayWrite, desc="Write the data array";
- L1D1TagArrayRead, desc="Read the data array";
- L1D1TagArrayWrite, desc="Write the data array";
- L1IDataArrayRead, desc="Read the data array";
- L1IDataArrayWrite, desc="Write the data array";
- L1ITagArrayRead, desc="Read the data array";
- L1ITagArrayWrite, desc="Write the data array";
- L2DataArrayRead, desc="Read the data array";
- L2DataArrayWrite, desc="Write the data array";
- L2TagArrayRead, desc="Read the data array";
- L2TagArrayWrite, desc="Write the data array";
- }
-
-
- // BEGIN STRUCTURE DEFINITIONS
-
-
- // Cache Entry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff than memory)?";
- DataBlock DataBlk, desc="data for the block";
- bool FromL2, default="false", desc="block just moved from L2";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
- bool Shared, desc="Victim hit by shared probe";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<CorePair_TBE>", constructor="m_number_of_TBEs";
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- // END STRUCTURE DEFINITIONS
-
- // BEGIN INTERNAL FUNCTIONS
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- bool addressInCore(Addr addr) {
- return (L2cache.isTagPresent(addr) || L1Icache.isTagPresent(addr) || L1D0cache.isTagPresent(addr) || L1D1cache.isTagPresent(addr));
- }
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
- return L2cache_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return tbe.DataBlk;
- } else {
- return getCacheEntry(addr).DataBlk;
- }
- }
-
- Entry getL1CacheEntry(Addr addr, int cluster), return_by_pointer="yes" {
- if (cluster == 0) {
- Entry L1D0_entry := static_cast(Entry, "pointer", L1D0cache.lookup(addr));
- return L1D0_entry;
- } else {
- Entry L1D1_entry := static_cast(Entry, "pointer", L1D1cache.lookup(addr));
- return L1D1_entry;
- }
- }
-
- Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry c_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
- return c_entry;
- }
-
- bool presentOrAvail2(Addr addr) {
- return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
- }
-
- bool presentOrAvailI(Addr addr) {
- return L1Icache.isTagPresent(addr) || L1Icache.cacheAvail(addr);
- }
-
- bool presentOrAvailD0(Addr addr) {
- return L1D0cache.isTagPresent(addr) || L1D0cache.cacheAvail(addr);
- }
-
- bool presentOrAvailD1(Addr addr) {
- return L1D1cache.isTagPresent(addr) || L1D1cache.cacheAvail(addr);
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return CorePair_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return CorePair_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(CorePair_State_to_permission(state));
- }
- }
-
- MachineType testAndClearLocalHit(Entry cache_entry) {
- assert(is_valid(cache_entry));
- if (cache_entry.FromL2) {
- cache_entry.FromL2 := false;
- return MachineType:L2Cache;
- } else {
- return MachineType:L1Cache;
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L1D0DataArrayRead) {
- L1D0cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L1D0DataArrayWrite) {
- L1D0cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L1D0TagArrayRead) {
- L1D0cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L1D0TagArrayWrite) {
- L1D0cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- } else if (request_type == RequestType:L1D1DataArrayRead) {
- L1D1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L1D1DataArrayWrite) {
- L1D1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L1D1TagArrayRead) {
- L1D1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L1D1TagArrayWrite) {
- L1D1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- } else if (request_type == RequestType:L1IDataArrayRead) {
- L1Icache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L1IDataArrayWrite) {
- L1Icache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L1ITagArrayRead) {
- L1Icache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L1ITagArrayWrite) {
- L1Icache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- } else if (request_type == RequestType:L2DataArrayRead) {
- L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L2DataArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L2TagArrayRead) {
- L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L2TagArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L2DataArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L2DataArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L2TagArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L2TagArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D0DataArrayRead) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D0DataArrayWrite) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D0TagArrayRead) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D0TagArrayWrite) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D1DataArrayRead) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D1DataArrayWrite) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D1TagArrayRead) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D1TagArrayWrite) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1IDataArrayRead) {
- return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1IDataArrayWrite) {
- return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1ITagArrayRead) {
- return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1ITagArrayWrite) {
- return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
-
- } else {
- return true;
- }
- }
-
- // END INTERNAL FUNCTIONS
-
- // ** OUT_PORTS **
-
- out_port(requestNetwork_out, CPURequestMsg, requestFromCore);
- out_port(responseNetwork_out, ResponseMsg, responseFromCore);
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
-
- // ** IN_PORTS **
-
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, block_on="addr") {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == TriggerType:L2_to_L1) {
- if (in_msg.Dest == CacheId:L1I) {
- trigger(Event:L2_to_L1I, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Dest == CacheId:L1D0) {
- trigger(Event:L2_to_L1D0, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Dest == CacheId:L1D1) {
- trigger(Event:L2_to_L1D1, in_msg.addr, cache_entry, tbe);
- } else {
- error("unexpected trigger dest");
- }
- }
- }
- }
- }
-
-
- in_port(probeNetwork_in, NBProbeRequestMsg, probeToCore) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, NBProbeRequestMsg, block_on="addr") {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- assert(in_msg.ReturnData);
- trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
-
- // ResponseNetwork
- in_port(responseToCore_in, ResponseMsg, responseToCore) {
- if (responseToCore_in.isReady(clockEdge())) {
- peek(responseToCore_in, ResponseMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == CoherenceResponseType:NBSysResp) {
- if (in_msg.State == CoherenceState:Modified) {
- trigger(Event:NB_AckM, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.State == CoherenceState:Shared) {
- trigger(Event:NB_AckS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.State == CoherenceState:Exclusive) {
- trigger(Event:NB_AckE, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
- trigger(Event:NB_AckWB, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- // Nothing from the Unblock Network
-
- // Mandatory Queue
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
-
- Entry cache_entry := getCacheEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
-
- if (in_msg.Type == RubyRequestType:IFETCH) {
- // FETCH ACCESS
-
- if (L1Icache.isTagPresent(in_msg.LineAddress)) {
- if (mod(in_msg.contextId, 2) == 0) {
- trigger(Event:Ifetch0_L1hit, in_msg.LineAddress, cache_entry, tbe);
- } else {
- trigger(Event:Ifetch1_L1hit, in_msg.LineAddress, cache_entry, tbe);
- }
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- if (presentOrAvailI(in_msg.LineAddress)) {
- if (mod(in_msg.contextId, 2) == 0) {
- trigger(Event:Ifetch0_L1miss, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- trigger(Event:Ifetch1_L1miss, in_msg.LineAddress, cache_entry,
- tbe);
- }
- } else {
- // Check if the line we want to evict is not locked
- Addr victim := L1Icache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, victim);
- trigger(Event:L1I_Repl, victim,
- getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else { // Not present or avail in L2
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- } else {
- // DATA ACCESS
- if (mod(in_msg.contextId, 2) == 1) {
- if (L1D1cache.isTagPresent(in_msg.LineAddress)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C1_Load_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- // Stores must write through, make sure L2 avail.
- if (presentOrAvail2(in_msg.LineAddress)) {
- trigger(Event:C1_Store_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- if (presentOrAvailD1(in_msg.LineAddress)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C1_Load_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- } else {
- trigger(Event:C1_Store_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- }
- } else {
- // Check if the line we want to evict is not locked
- Addr victim := L1D1cache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, victim);
- trigger(Event:L1D1_Repl, victim,
- getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else { // not present or avail in L2
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- }
- } else {
- Entry L1D0cache_entry := getL1CacheEntry(in_msg.LineAddress, 0);
- if (is_valid(L1D0cache_entry)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C0_Load_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- trigger(Event:C0_Store_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- if (presentOrAvailD0(in_msg.LineAddress)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C0_Load_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- } else {
- trigger(Event:C0_Store_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- }
- } else {
- // Check if the line we want to evict is not locked
- Addr victim := L1D0cache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, victim);
- trigger(Event:L1D0_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- }
- }
- }
- }
- }
-
-
- // ACTIONS
- action(ii_invIcache, "ii", desc="invalidate iCache") {
- if (L1Icache.isTagPresent(address)) {
- L1Icache.deallocate(address);
- }
- }
-
- action(i0_invCluster, "i0", desc="invalidate cluster 0") {
- if (L1D0cache.isTagPresent(address)) {
- L1D0cache.deallocate(address);
- }
- }
-
- action(i1_invCluster, "i1", desc="invalidate cluster 1") {
- if (L1D1cache.isTagPresent(address)) {
- L1D1cache.deallocate(address);
- }
- }
-
- action(ib_invBothClusters, "ib", desc="invalidate both clusters") {
- if (L1D0cache.isTagPresent(address)) {
- L1D0cache.deallocate(address);
- }
- if (L1D1cache.isTagPresent(address)) {
- L1D1cache.deallocate(address);
- }
- }
-
- action(i2_invL2, "i2", desc="invalidate L2") {
- if(is_valid(cache_entry)) {
- L2cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(mru_setMRU, "mru", desc="Update LRU state") {
- L2cache.setMRU(address);
- }
-
- action(mruD1_setD1cacheMRU, "mruD1", desc="Update LRU state") {
- L1D1cache.setMRU(address);
- }
-
- action(mruD0_setD0cacheMRU, "mruD0", desc="Update LRU state") {
- L1D0cache.setMRU(address);
- }
-
- action(mruI_setIcacheMRU, "mruI", desc="Update LRU state") {
- L1Icache.setMRU(address);
- }
-
- action(n_issueRdBlk, "n", desc="Issue RdBlk") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlk;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- DPRINTF(RubySlicc,"%s\n",out_msg.Destination);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkM;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(vd_victim, "vd", desc="Victimize M/O L2 Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- assert(is_valid(cache_entry));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicDirty;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:O) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- }
- }
-
- action(vc_victim, "vc", desc="Victimize E/S L2 Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicClean;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:S) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- }
- }
-
- action(a0_allocateL1D, "a0", desc="Allocate L1D0 Block") {
- if (L1D0cache.isTagPresent(address) == false) {
- L1D0cache.allocateVoid(address, new Entry);
- }
- }
-
- action(a1_allocateL1D, "a1", desc="Allocate L1D1 Block") {
- if (L1D1cache.isTagPresent(address) == false) {
- L1D1cache.allocateVoid(address, new Entry);
- }
- }
-
- action(ai_allocateL1I, "ai", desc="Allocate L1I Block") {
- if (L1Icache.isTagPresent(address) == false) {
- L1Icache.allocateVoid(address, new Entry);
- }
- }
-
- action(a2_allocateL2, "a2", desc="Allocate L2 Block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L2cache.allocate(address, new Entry));
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
- tbe.Dirty := cache_entry.Dirty;
- tbe.Shared := false;
- }
-
- action(d_deallocateTBE, "d", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
- responseToCore_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="Pop Trigger Queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(il0_loadDone, "il0", desc="Cluster 0 i load done") {
- Entry entry := getICacheEntry(address);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(il1_loadDone, "il1", desc="Cluster 1 i load done") {
- Entry entry := getICacheEntry(address);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(l0_loadDone, "l0", desc="Cluster 0 load done") {
- Entry entry := getL1CacheEntry(address, 0);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(l1_loadDone, "l1", desc="Cluster 1 load done") {
- Entry entry := getL1CacheEntry(address, 1);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(xl0_loadDone, "xl0", desc="Cluster 0 load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- DPRINTF(ProtocolTrace, "CP Load Done 0 -- address %s, data: %s\n", address, l2entry.DataBlk);
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(xl1_loadDone, "xl1", desc="Cluster 1 load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(xi0_loadDone, "xi0", desc="Cluster 0 i-load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(xi1_loadDone, "xi1", desc="Cluster 1 i-load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(s0_storeDone, "s0", desc="Cluster 0 store done") {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- sequencer.writeCallback(address,
- cache_entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- cache_entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- entry.Dirty := true;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
-
- action(s1_storeDone, "s1", desc="Cluster 1 store done") {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- sequencer1.writeCallback(address,
- cache_entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- cache_entry.Dirty := true;
- entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
-
- action(xs0_storeDone, "xs0", desc="Cluster 0 store done") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- sequencer.writeCallback(address,
- cache_entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- cache_entry.Dirty := true;
- entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
- }
-
- action(xs1_storeDone, "xs1", desc="Cluster 1 store done") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- sequencer1.writeCallback(address,
- cache_entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- cache_entry.Dirty := true;
- entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
- }
-
- action(forward_eviction_to_cpu0, "fec0", desc="sends eviction information to processor0") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(forward_eviction_to_cpu1, "fec1", desc="sends eviction information to processor1") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
- sequencer1.evictionCallback(address);
- }
- }
-
- action(ci_copyL2ToL1, "ci", desc="copy L2 data to L1") {
- Entry entry := getICacheEntry(address);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.Dirty := cache_entry.Dirty;
- entry.DataBlk := cache_entry.DataBlk;
- entry.FromL2 := true;
- }
-
- action(c0_copyL2ToL1, "c0", desc="copy L2 data to L1") {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.Dirty := cache_entry.Dirty;
- entry.DataBlk := cache_entry.DataBlk;
- entry.FromL2 := true;
- }
-
- action(c1_copyL2ToL1, "c1", desc="copy L2 data to L1") {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.Dirty := cache_entry.Dirty;
- entry.DataBlk := cache_entry.DataBlk;
- entry.FromL2 := true;
- }
-
- action(fi_L2ToL1, "fi", desc="L2 to L1 inst fill") {
- enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L2_to_L1;
- out_msg.Dest := CacheId:L1I;
- }
- }
-
- action(f0_L2ToL1, "f0", desc="L2 to L1 data fill") {
- enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L2_to_L1;
- out_msg.Dest := CacheId:L1D0;
- }
- }
-
- action(f1_L2ToL1, "f1", desc="L2 to L1 data fill") {
- enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L2_to_L1;
- out_msg.Dest := CacheId:L1D1;
- }
- }
-
- action(wi_writeIcache, "wi", desc="write data to icache (and l2)") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getICacheEntry(address);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.DataBlk := in_msg.DataBlk;
- entry.Dirty := in_msg.Dirty;
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(w0_writeDcache, "w0", desc="write data to dcache 0 (and l2)") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- DPRINTF(ProtocolTrace, "CP writeD0: address %s, data: %s\n", address, in_msg.DataBlk);
- entry.DataBlk := in_msg.DataBlk;
- entry.Dirty := in_msg.Dirty;
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(w1_writeDcache, "w1", desc="write data to dcache 1 (and l2)") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.DataBlk := in_msg.DataBlk;
- entry.Dirty := in_msg.Dirty;
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
- peek(responseToCore_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:StaleNotif;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(wb_data, "wb", desc="write back data") {
- peek(responseToCore_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUData;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Shared) {
- out_msg.NbReqShared := true;
- } else {
- out_msg.NbReqShared := false;
- }
- out_msg.State := CoherenceState:Shared; // faux info
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Ntsl := true;
- out_msg.Hit := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(ph_sendProbeResponseHit, "ph", desc="send probe ack PrbShrData, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- assert(addressInCore(address) || is_valid(tbe));
- out_msg.Dirty := false; // only true if sending back data i think
- out_msg.Hit := true;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pb_sendProbeResponseBackprobe, "pb", desc="send probe ack PrbShrData, no data, check for L1 residence") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- if (addressInCore(address)) {
- out_msg.Hit := true;
- } else {
- out_msg.Hit := false;
- }
- out_msg.Dirty := false; // not sending back data, so def. not dirty
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- assert(tbe.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(s_setSharedFlip, "s", desc="hit by shared probe, status may be different") {
- assert(is_valid(tbe));
- tbe.Shared := true;
- }
-
- action(uu_sendUnblock, "uu", desc="state changed, unblock") {
- enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(l2m_profileMiss, "l2m", desc="l2m miss profile") {
- ++L2cache.demand_misses;
- }
-
- action(l10m_profileMiss, "l10m", desc="l10m miss profile") {
- ++L1D0cache.demand_misses;
- }
-
- action(l11m_profileMiss, "l11m", desc="l11m miss profile") {
- ++L1D1cache.demand_misses;
- }
-
- action(l1im_profileMiss, "l1lm", desc="l1im miss profile") {
- ++L1Icache.demand_misses;
- }
-
- action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
- probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(xx_recycleResponseQueue, "xx", desc="recycle response queue") {
- responseToCore_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
- mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- // END ACTIONS
-
- // BEGIN TRANSITIONS
-
- // transitions from base
- transition(I, C0_Load_L1miss, I_E0S) {L1D0TagArrayRead, L2TagArrayRead} {
- // track misses, if implemented
- // since in I state, L2 miss as well
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- a2_allocateL2;
- i1_invCluster;
- ii_invIcache;
- n_issueRdBlk;
- p_popMandatoryQueue;
- }
-
- transition(I, C1_Load_L1miss, I_E1S) {L1D1TagArrayRead, L2TagArrayRead} {
- // track misses, if implemented
- // since in I state, L2 miss as well
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- a2_allocateL2;
- i0_invCluster;
- ii_invIcache;
- n_issueRdBlk;
- p_popMandatoryQueue;
- }
-
- transition(I, Ifetch0_L1miss, S0) {L1ITagArrayRead,L2TagArrayRead} {
- // track misses, if implemented
- // L2 miss as well
- l2m_profileMiss;
- l1im_profileMiss;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(I, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
- // track misses, if implemented
- // L2 miss as well
- l2m_profileMiss;
- l1im_profileMiss;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(I, C0_Store_L1miss, I_M0) {L1D0TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- a2_allocateL2;
- i1_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(I, C1_Store_L1miss, I_M1) {L1D0TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- a2_allocateL2;
- i0_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(S, C0_Load_L1miss, S_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(S, C1_Load_L1miss, S_F1) {L1D1TagArrayRead,L2TagArrayRead, L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(S, Ifetch0_L1miss, Si_F0) {L1ITagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l1im_profileMiss;
- ai_allocateL1I;
- fi_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(S, Ifetch1_L1miss, Si_F1) {L1ITagArrayRead,L2TagArrayRead, L2DataArrayRead} {
- l1im_profileMiss;
- ai_allocateL1I;
- fi_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition({S}, {C0_Store_L1hit, C0_Store_L1miss}, S_M0) {L1D0TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- mruD0_setD0cacheMRU;
- i1_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition({S}, {C1_Store_L1hit, C1_Store_L1miss}, S_M1) {L1D1TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- mruD1_setD1cacheMRU;
- i0_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(Es, C0_Load_L1miss, Es_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
- a0_allocateL1D;
- l10m_profileMiss;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(Es, C1_Load_L1miss, Es_F1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(Es, Ifetch0_L1miss, S0) {L1ITagArrayRead, L1ITagArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(Es, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- // THES SHOULD NOT BE INSTANTANEOUS BUT OH WELL FOR NOW
- transition(Es, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone; // instantaneous L1/L2 dirty - no writethrough delay
- mruD0_setD0cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(Es, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
- a1_allocateL1D;
- i0_invCluster;
- s1_storeDone;
- mruD1_setD1cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E0, C0_Load_L1miss, E0_F) {L1D0TagArrayRead,L2TagArrayRead, L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E0, C1_Load_L1miss, E0_Es) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E0, Ifetch0_L1miss, S0) {L2TagArrayRead, L1ITagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i0_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E0, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i0_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E0, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a0_allocateL1D;
- s0_storeDone;
- mruD0_setD0cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E0, C1_Store_L1miss, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1TagArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
- l11m_profileMiss;
- a1_allocateL1D;
- i0_invCluster;
- s1_storeDone;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E1, C1_Load_L1miss, E1_F) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E1, C0_Load_L1miss, E1_Es) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l11m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E1, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i1_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E1, Ifetch0_L1miss, S0) {L2TagArrayRead, L1ITagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i1_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E1, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite} {
- a1_allocateL1D;
- s1_storeDone;
- mruD1_setD1cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(E1, C0_Store_L1miss, M0) {L1D0TagArrayRead, L2TagArrayRead, L2TagArrayWrite, L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite} {
- l10m_profileMiss;
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition({O}, {C0_Store_L1hit, C0_Store_L1miss}, O_M0) {L1D0TagArrayRead,L2TagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue CtoD
- l10m_profileMiss;
- a0_allocateL1D;
- mruD0_setD0cacheMRU;
- i1_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition({O}, {C1_Store_L1hit, C1_Store_L1miss}, O_M1) {L1D1TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l11m_profileMiss;
- a1_allocateL1D;
- mruD1_setD1cacheMRU;
- i0_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(O, C0_Load_L1miss, O_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(O, C1_Load_L1miss, O_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(Ms, C0_Load_L1miss, Ms_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(Ms, C1_Load_L1miss, Ms_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition({Ms, M0, M1, O}, Ifetch0_L1miss, MO_S0) {L1ITagArrayRead, L2DataArrayRead, L2TagArrayRead} {
- l2m_profileMiss; // permissions miss
- l1im_profileMiss;
- ai_allocateL1I;
- t_allocateTBE;
- ib_invBothClusters;
- vd_victim;
-// i2_invL2;
- p_popMandatoryQueue;
- }
-
- transition({Ms, M0, M1, O}, Ifetch1_L1miss, MO_S1) {L1ITagArrayRead, L2TagArrayRead, L2DataArrayRead } {
- l2m_profileMiss; // permissions miss
- l1im_profileMiss;
- ai_allocateL1I;
- t_allocateTBE;
- ib_invBothClusters;
- vd_victim;
-// i2_invL2;
- p_popMandatoryQueue;
- }
-
- transition(Ms, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone;
- mruD0_setD0cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(Ms, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a1_allocateL1D;
- i0_invCluster;
- s1_storeDone;
- mruD1_setD1cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M0, C0_Load_L1miss, M0_F) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M0, C1_Load_L1miss, M0_Ms) {L2TagArrayRead, L2DataArrayRead,L1D0TagArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M0, {C0_Store_L1hit, C0_Store_L1miss}) {L1D0TagArrayRead,L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead} {
- a0_allocateL1D;
- s0_storeDone;
- mruD0_setD0cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M0, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
- a1_allocateL1D;
- i0_invCluster;
- s1_storeDone;
- mruD1_setD1cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M1, C0_Load_L1miss, M1_Ms) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M1, C1_Load_L1miss, M1_F) {L1D1TagArrayRead,L2TagArrayRead, L2DataArrayRead} {
- a1_allocateL1D;
- f1_L2ToL1;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M1, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone;
- mruD0_setD0cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(M1, {C1_Store_L1hit, C1_Store_L1miss}) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayWrite} {
- a1_allocateL1D;
- s1_storeDone;
- mruD1_setD1cacheMRU;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- // end transitions from base
-
- // Begin simple hit transitions
- transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es,
- Ms_F1, M0_Ms}, C0_Load_L1hit) {L1D0TagArrayRead, L1D0DataArrayRead} {
- // track hits, if implemented
- l0_loadDone;
- mruD0_setD0cacheMRU;
- p_popMandatoryQueue;
- }
-
- transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es,
- Ms_F0, M1_Ms}, C1_Load_L1hit) {L1D1TagArrayRead, L1D1DataArrayRead} {
- // track hits, if implemented
- l1_loadDone;
- mruD1_setD1cacheMRU;
- p_popMandatoryQueue;
- }
-
- transition({S, S_C, S_F0, S_F1, S_F}, Ifetch0_L1hit) {L1ITagArrayRead, L1IDataArrayRead} {
- // track hits, if implemented
- il0_loadDone;
- mruI_setIcacheMRU;
- p_popMandatoryQueue;
- }
-
- transition({S, S_C, S_F0, S_F1, S_F}, Ifetch1_L1hit) {L1ITagArrayRead, L1IDataArrayWrite} {
- // track hits, if implemented
- il1_loadDone;
- mruI_setIcacheMRU;
- p_popMandatoryQueue;
- }
-
- // end simple hit transitions
-
- // Transitions from transient states
-
- // recycles
- transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
- E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, C0_Load_L1hit) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M1,
- O_M1, S0, S1, I_C, S0_C, S1_C, S_C}, C0_Load_L1miss) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
- IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
- E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, C1_Load_L1hit) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M0,
- O_M0, S0, S1, I_C, S0_C, S1_C, S_C}, C1_Load_L1miss) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, {Ifetch0_L1hit, Ifetch1_L1hit}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES,
- IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, ES_I, MO_I, S_F0, S_F1, S_F,
- O_F0, O_F1, O_F, S_M0, S_M1, O_M0, O_M1, Es_F0, Es_F1, Es_F, E0_F,
- E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, I_C,
- S_C}, {Ifetch0_L1miss, Ifetch1_L1miss}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_E1S, IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, S_F1, O_F1,
- Si_F0, Si_F1, S_M1, O_M1, S0, S1, Es_F1, E1_F, E0_Es, Ms_F1, M0_Ms,
- M1_F, I_C, S0_C, S1_C, S_C}, {C0_Store_L1miss}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_E0S, IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1 S_F0, O_F0,
- Si_F0, Si_F1, S_M0, O_M0, S0, S1, Es_F0, E0_F, E1_Es, Ms_F0, M0_F,
- M1_Ms, I_C, S0_C, S1_C, S_C}, {C1_Store_L1miss}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M0, O_M0, Es_F0, Es_F1, Es_F, E0_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_Ms}, {C0_Store_L1hit}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M1,
- O_M1, Es_F0, Es_F1, Es_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F,
- M0_Ms, M1_F, M1_Ms}, {C1_Store_L1hit}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
- E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, L1D0_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
- IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
- E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, L1D1_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, L1I_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({S_C, S0_C, S1_C, S0, S1, Si_F0, Si_F1, I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, S_M0, O_M0, S_M1, O_M1, Es_F0, Es_F1, Es_F, E0_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, MO_S0, MO_S1, IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, L2_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, {NB_AckS,
- PrbInvData, PrbInv, PrbShrData}) {} {
- yy_recycleProbeQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
- }
-
- transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES}, NB_AckE) {} {
- xx_recycleResponseQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
- }
-
- transition({E0_Es, E1_F, Es_F1}, C0_Load_L1miss, Es_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(S_F1, C0_Load_L1miss, S_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(O_F1, C0_Load_L1miss, O_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({Ms_F1, M0_Ms, M1_F}, C0_Load_L1miss, Ms_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(I_M0, C1_Load_L1miss, I_M0Ms) {} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(I_M1, C0_Load_L1miss, I_M1Ms) {} {
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(I_M0, C1_Store_L1miss, I_M0M1) {} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(I_M1, C0_Store_L1miss, I_M1M0) {} {
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- mru_setMRU;
- p_popMandatoryQueue;
- }
-
- transition(I_E0S, C1_Load_L1miss, I_ES) {} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition(I_E1S, C0_Load_L1miss, I_ES) {} {
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition({E1_Es, E0_F, Es_F0}, C1_Load_L1miss, Es_F) {L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(S_F0, C1_Load_L1miss, S_F) {L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(O_F0, C1_Load_L1miss, O_F) {L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({Ms_F0, M1_Ms, M0_F}, C1_Load_L1miss, Ms_F) { L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es, Ms_F1, M0_Ms}, L1D0_Repl) {L1D0TagArrayRead} {
- i0_invCluster;
- }
-
- transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es, Ms_F0, M1_Ms}, L1D1_Repl) {L1D1TagArrayRead} {
- i1_invCluster;
- }
-
- transition({S, S_C, S_F0, S_F1}, L1I_Repl) {L1ITagArrayRead} {
- ii_invIcache;
- }
-
- transition({S, E0, E1, Es}, L2_Repl, ES_I) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead, L1D1TagArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- t_allocateTBE;
- vc_victim;
- ib_invBothClusters;
- i2_invL2;
- ii_invIcache;
- }
-
- transition({Ms, M0, M1, O}, L2_Repl, MO_I) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead, L1D1TagArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- t_allocateTBE;
- vd_victim;
- i2_invL2;
- ib_invBothClusters; // nothing will happen for D0 on M1, vice versa
- }
-
- transition(S0, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- wi_writeIcache;
- xi0_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S1, NB_AckS, S) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- wi_writeIcache;
- xi1_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S0_C, NB_AckS, S_C) {L1D0DataArrayWrite,L2DataArrayWrite} {
- wi_writeIcache;
- xi0_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S1_C, NB_AckS, S_C) {L1D1DataArrayWrite, L2DataArrayWrite} {
- wi_writeIcache;
- xi1_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_M0, NB_AckM, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xs0_storeDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w1_writeDcache;
- xs1_storeDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- // THESE MO->M1 should not be instantaneous but oh well for now.
- transition(I_M0M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xs0_storeDone;
- uu_sendUnblock;
- i0_invCluster;
- s1_storeDone;
- pr_popResponseQueue;
- }
-
- transition(I_M1M0, NB_AckM, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w1_writeDcache;
- xs1_storeDone;
- uu_sendUnblock;
- i1_invCluster;
- s0_storeDone;
- pr_popResponseQueue;
- }
-
- // Above shoudl be more like this, which has some latency to xfer to L1
- transition(I_M0Ms, NB_AckM, M0_Ms) {L1D0DataArrayWrite,L2DataArrayWrite} {
- w0_writeDcache;
- xs0_storeDone;
- uu_sendUnblock;
- f1_L2ToL1;
- pr_popResponseQueue;
- }
-
- transition(I_M1Ms, NB_AckM, M1_Ms) {L1D1DataArrayWrite, L2DataArrayWrite} {
- w1_writeDcache;
- xs1_storeDone;
- uu_sendUnblock;
- f0_L2ToL1;
- pr_popResponseQueue;
- }
-
- transition(I_E0S, NB_AckE, E0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xl0_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_E1S, NB_AckE, E1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w1_writeDcache;
- xl1_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_ES, NB_AckE, Es) {L1D1DataArrayWrite, L1D1TagArrayWrite, L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite } {
- w0_writeDcache;
- xl0_loadDone;
- w1_writeDcache;
- xl1_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_E0S, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xl0_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_E1S, NB_AckS, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
- w1_writeDcache;
- xl1_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_ES, NB_AckS, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
- w0_writeDcache;
- xl0_loadDone;
- w1_writeDcache;
- xl1_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S_F0, L2_to_L1D0, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(S_F1, L2_to_L1D1, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Si_F0, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- ci_copyL2ToL1;
- mru_setMRU;
- il0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Si_F1, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- ci_copyL2ToL1;
- mru_setMRU;
- il1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(S_F, L2_to_L1D0, S_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(S_F, L2_to_L1D1, S_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F0, L2_to_L1D0, O) { L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F1, L2_to_L1D1, O) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F, L2_to_L1D0, O_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F, L2_to_L1D1, O_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M1_F, L2_to_L1D1, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M0_F, L2_to_L1D0, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F0, L2_to_L1D0, Ms) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F1, L2_to_L1D1, Ms) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F, L2_to_L1D0, Ms_F1) {L1D0DataArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F, L2_to_L1D1, Ms_F0) {L1IDataArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M1_Ms, L2_to_L1D0, Ms) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M0_Ms, L2_to_L1D1, Ms) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F0, L2_to_L1D0, Es) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F1, L2_to_L1D1, Es) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F, L2_to_L1D0, Es_F1) {L2TagArrayRead, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F, L2_to_L1D1, Es_F0) {L2TagArrayRead, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E0_F, L2_to_L1D0, E0) {L2TagArrayRead, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E1_F, L2_to_L1D1, E1) {L2TagArrayRead, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E1_Es, L2_to_L1D0, Es) {L2TagArrayRead, L2DataArrayRead} {
- c0_copyL2ToL1;
- mru_setMRU;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E0_Es, L2_to_L1D1, Es) {L2TagArrayRead, L2DataArrayRead} {
- c1_copyL2ToL1;
- mru_setMRU;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(IF_E0S, L2_to_L1D0, I_E0S) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF_E1S, L2_to_L1D1, I_E1S) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF_ES, L2_to_L1D0, IF1_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF_ES, L2_to_L1D1, IF0_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF0_ES, L2_to_L1D0, I_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF1_ES, L2_to_L1D1, I_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(F_S0, L2_to_L1I, S0) {} {
- pt_popTriggerQueue;
- }
-
- transition(F_S1, L2_to_L1I, S1) {} {
- pt_popTriggerQueue;
- }
-
- transition({S_M0, O_M0}, NB_AckM, M0) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- mru_setMRU;
- xs0_storeDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition({S_M1, O_M1}, NB_AckM, M1) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- mru_setMRU;
- xs1_storeDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(MO_I, NB_AckWB, I) {L2TagArrayWrite} {
- wb_data;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(ES_I, NB_AckWB, I) {L2TagArrayWrite} {
- wb_data;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(MO_S0, NB_AckWB, S0) {L2TagArrayWrite} {
- wb_data;
- i2_invL2;
- a2_allocateL2;
- d_deallocateTBE; // FOO
- nS_issueRdBlkS;
- pr_popResponseQueue;
- }
-
- transition(MO_S1, NB_AckWB, S1) {L2TagArrayWrite} {
- wb_data;
- i2_invL2;
- a2_allocateL2;
- d_deallocateTBE; // FOO
- nS_issueRdBlkS;
- pr_popResponseQueue;
- }
-
- // Writeback cancel "ack"
- transition(I_C, NB_AckWB, I) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(S0_C, NB_AckWB, S0) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- pr_popResponseQueue;
- }
-
- transition(S1_C, NB_AckWB, S1) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- pr_popResponseQueue;
- }
-
- transition(S_C, NB_AckWB, S) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- pr_popResponseQueue;
- }
-
- // Begin Probe Transitions
-
- transition({Ms, M0, M1, O}, PrbInvData, I) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- i2_invL2;
- ib_invBothClusters;
- pp_popProbeQueue;
- }
-
- transition({Es, E0, E1, S, I}, PrbInvData, I) {L2TagArrayRead, L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- ib_invBothClusters;
- ii_invIcache; // only relevant for S
- pp_popProbeQueue;
- }
-
- transition(S_C, PrbInvData, I_C) {L2TagArrayWrite} {
- t_allocateTBE;
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbInvData, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- pp_popProbeQueue;
- }
-
- transition({Ms, M0, M1, O, Es, E0, E1, S, I}, PrbInv, I) {L2TagArrayRead, L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2; // nothing will happen in I
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(S_C, PrbInv, I_C) {L2TagArrayWrite} {
- t_allocateTBE;
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbInv, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition({Ms, M0, M1, O}, PrbShrData, O) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({Es, E0, E1, S}, PrbShrData, S) {L2TagArrayRead, L2TagArrayWrite} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition(S_C, PrbShrData) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({I, I_C}, PrbShrData) {L2TagArrayRead} {
- pb_sendProbeResponseBackprobe;
- pp_popProbeQueue;
- }
-
- transition({I_M0, I_E0S}, {PrbInv, PrbInvData}) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters; // must invalidate current data (only relevant for I_M0)
- a0_allocateL1D; // but make sure there is room for incoming data when it arrives
- pp_popProbeQueue;
- }
-
- transition({I_M1, I_E1S}, {PrbInv, PrbInvData}) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters; // must invalidate current data (only relevant for I_M1)
- a1_allocateL1D; // but make sure there is room for incoming data when it arrives
- pp_popProbeQueue;
- }
-
- transition({I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_ES}, {PrbInv, PrbInvData, PrbShrData}) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- a0_allocateL1D;
- a1_allocateL1D;
- pp_popProbeQueue;
- }
-
- transition({I_M0, I_E0S, I_M1, I_E1S}, PrbShrData) {} {
- pb_sendProbeResponseBackprobe;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbInvData, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInvData, I_C) {} {
- pdt_sendProbeResponseDataFromTBE;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInv, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbInv, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbShrData, ES_I) {} {
- ph_sendProbeResponseHit;
- s_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbShrData, MO_I) {} {
- pdt_sendProbeResponseDataFromTBE;
- s_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition(MO_S0, PrbInvData, S0_C) {L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdt_sendProbeResponseDataFromTBE;
- i2_invL2;
- a2_allocateL2;
- d_deallocateTBE;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition(MO_S1, PrbInvData, S1_C) {L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdt_sendProbeResponseDataFromTBE;
- i2_invL2;
- a2_allocateL2;
- d_deallocateTBE;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition(MO_S0, PrbInv, S0_C) {L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- a2_allocateL2;
- d_deallocateTBE;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition(MO_S1, PrbInv, S1_C) {L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- a2_allocateL2;
- d_deallocateTBE;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition({MO_S0, MO_S1}, PrbShrData) {} {
- pdt_sendProbeResponseDataFromTBE;
- s_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition({S_F0, Es_F0, E0_F, E1_Es}, {PrbInvData, PrbInv}, IF_E0S) {}{
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- // invalidate everything you've got
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- // but make sure you have room for what you need from the fill
- a0_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({S_F1, Es_F1, E1_F, E0_Es}, {PrbInvData, PrbInv}, IF_E1S) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- // invalidate everything you've got
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- // but make sure you have room for what you need from the fill
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({S_F, Es_F}, {PrbInvData, PrbInv}, IF_ES) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- // invalidate everything you've got
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- // but make sure you have room for what you need from the fill
- a0_allocateL1D;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition(Si_F0, {PrbInvData, PrbInv}, F_S0) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition(Si_F1, {PrbInvData, PrbInv}, F_S1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition({Es_F0, E0_F, E1_Es}, PrbShrData, S_F0) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({Es_F1, E1_F, E0_Es}, PrbShrData, S_F1) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition(Es_F, PrbShrData, S_F) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({S_F0, S_F1, S_F, Si_F0, Si_F1}, PrbShrData) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition(S_M0, PrbInvData, I_M0) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition(O_M0, PrbInvData, I_M0) {L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdm_sendProbeResponseDataMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S_M0, O_M0}, {PrbInv}, I_M0) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition(S_M1, PrbInvData, I_M1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition(O_M1, PrbInvData, I_M1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdm_sendProbeResponseDataMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S_M1, O_M1}, {PrbInv}, I_M1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S0, S0_C}, {PrbInvData, PrbInv}) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S1, S1_C}, {PrbInvData, PrbInv}) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S_M0, S_M1}, PrbShrData) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({O_M0, O_M1}, PrbShrData) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({S0, S1, S0_C, S1_C}, PrbShrData) {} {
- pb_sendProbeResponseBackprobe;
- pp_popProbeQueue;
- }
-
- transition({Ms_F0, M0_F, M1_Ms, O_F0}, PrbInvData, IF_E0S) { L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F1, M1_F, M0_Ms, O_F1}, PrbInvData, IF_E1S) {L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- ib_invBothClusters;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F, O_F}, PrbInvData, IF_ES) {L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F0, M0_F, M1_Ms, O_F0}, PrbInv, IF_E0S) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F1, M1_F, M0_Ms, O_F1}, PrbInv, IF_E1S) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F, O_F}, PrbInv, IF_ES) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F0, M0_F, M1_Ms}, PrbShrData, O_F0) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({Ms_F1, M1_F, M0_Ms}, PrbShrData, O_F1) {} {
- }
-
- transition({Ms_F}, PrbShrData, O_F) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({O_F0, O_F1, O_F}, PrbShrData) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- // END TRANSITIONS
-}
-
-
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:L3Cache, "L3")
- : CacheMemory * L3cache;
- WireBuffer * reqToDir;
- WireBuffer * respToDir;
- WireBuffer * l3UnblockToDir;
- WireBuffer * reqToL3;
- WireBuffer * probeToL3;
- WireBuffer * respToL3;
- Cycles l3_request_latency := 1;
- Cycles l3_response_latency := 35;
-
- // To the general response network
- MessageBuffer * responseFromL3, network="To", virtual_network="2", ordered="false", vnet_type="response";
-
- // From the general response network
- MessageBuffer * responseToL3, network="From", virtual_network="2", ordered="false", vnet_type="response";
-
-{
- // EVENTS
- enumeration(Event, desc="L3 Events") {
- // Requests coming from the Cores
- RdBlk, desc="CPU RdBlk event";
- RdBlkM, desc="CPU RdBlkM event";
- RdBlkS, desc="CPU RdBlkS event";
- CtoD, desc="Change to Dirty request";
- WrVicBlk, desc="L2 Victim (dirty)";
- WrVicBlkShared, desc="L2 Victim (dirty)";
- ClVicBlk, desc="L2 Victim (clean)";
- ClVicBlkShared, desc="L2 Victim (clean)";
-
- CPUData, desc="WB data from CPU";
- CPUDataShared, desc="WB data from CPU, NBReqShared 1";
- StaleWB, desc="WB stale; no data";
-
- L3_Repl, desc="L3 Replacement";
-
- // Probes
- PrbInvData, desc="Invalidating probe, return dirty data";
- PrbInv, desc="Invalidating probe, no need to return data";
- PrbShrData, desc="Downgrading probe, return data";
-
- // Coming from Memory Controller
- WBAck, desc="ack from memory";
-
- CancelWB, desc="Cancel WB from L2";
- }
-
- // STATES
- // Base States:
- state_declaration(State, desc="L3 State", default="L3Cache_State_I") {
- M, AccessPermission:Read_Write, desc="Modified"; // No other cache has copy, memory stale
- O, AccessPermission:Read_Only, desc="Owned"; // Correct most recent copy, others may exist in S
- E, AccessPermission:Read_Write, desc="Exclusive"; // Correct, most recent, and only copy (and == Memory)
- S, AccessPermission:Read_Only, desc="Shared"; // Correct, most recent. If no one in O, then == Memory
- I, AccessPermission:Invalid, desc="Invalid";
-
- I_M, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
- I_O, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
- I_E, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
- I_S, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
- S_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to M";
- S_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
- S_E, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to E";
- S_S, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to S";
- E_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
- E_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
- E_E, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
- E_S, AccessPermission:Busy, desc="Shared, received WrVicBlk, sent Ack, waiting for Data";
- O_M, AccessPermission:Busy, desc="...";
- O_O, AccessPermission:Busy, desc="...";
- O_E, AccessPermission:Busy, desc="...";
- O_S, AccessPermission:Busy, desc="...";
- M_M, AccessPermission:Busy, desc="...";
- M_O, AccessPermission:Busy, desc="...";
- M_E, AccessPermission:Busy, desc="...";
- M_S, AccessPermission:Busy, desc="...";
- D_I, AccessPermission:Invalid, desc="drop WB data on the floor when receive";
- MOD_I, AccessPermission:Busy, desc="drop WB data on the floor, waiting for WBAck from Mem";
- MO_I, AccessPermission:Busy, desc="M or O, received L3_Repl, waiting for WBAck from Mem";
- I_I, AccessPermission:Busy, desc="I_MO received L3_Repl";
- I_CD, AccessPermission:Busy, desc="I_I received WBAck, now just waiting for CPUData";
- I_C, AccessPermission:Invalid, desc="sent cancel, just waiting to receive mem wb ack so nothing gets confused";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
- // STRUCTURES
-
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff from memory?)";
- DataBlock DataBlk, desc="Data for the block";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, desc="Is the data dirty?";
- bool Shared, desc="Victim hit by shared probe";
- MachineID From, desc="Waiting for writeback from...";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<L3Cache_TBE>", constructor="m_number_of_TBEs";
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- // FUNCTION DEFINITIONS
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L3cache.lookup(addr));
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- return getCacheEntry(addr).DataBlk;
- }
-
- bool presentOrAvail(Addr addr) {
- return L3cache.isTagPresent(addr) || L3cache.cacheAvail(addr);
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes +
- functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return L3Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return L3Cache_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L3Cache_State_to_permission(state));
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
-
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- return true;
- }
-
-
- // OUT PORTS
- out_port(requestNetwork_out, CPURequestMsg, reqToDir);
- out_port(L3Resp_out, ResponseMsg, respToDir);
- out_port(responseNetwork_out, ResponseMsg, responseFromL3);
- out_port(unblockNetwork_out, UnblockMsg, l3UnblockToDir);
-
- // IN PORTS
- in_port(NBResponse_in, ResponseMsg, respToL3) {
- if (NBResponse_in.isReady(clockEdge())) {
- peek(NBResponse_in, ResponseMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
- trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- error("Error on NBResponse Type");
- }
- }
- }
- }
-
- // Response Network
- in_port(responseNetwork_in, ResponseMsg, responseToL3) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:CPUData) {
- if (in_msg.NbReqShared) {
- trigger(Event:CPUDataShared, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:CPUData, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
- trigger(Event:StaleWB, in_msg.addr, cache_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg);
- error("Error on NBResponse Type");
- }
- }
- }
- }
-
- // probe network
- in_port(probeNetwork_in, NBProbeRequestMsg, probeToL3) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- if (in_msg.ReturnData) {
- trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
- } else {
- error("Don't think I should get any of these");
- }
- }
- }
- }
- }
-
- // Request Network
- in_port(requestNetwork_in, CPURequestMsg, reqToL3) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, CPURequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
- if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
- trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (presentOrAvail(in_msg.addr)) {
- if (in_msg.Shared) {
- trigger(Event:ClVicBlkShared, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:ClVicBlk, in_msg.addr, cache_entry, tbe);
- }
- } else {
- Addr victim := L3cache.cacheProbe(in_msg.addr);
- trigger(Event:L3_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
- if (presentOrAvail(in_msg.addr)) {
- if (in_msg.Shared) {
- trigger(Event:WrVicBlkShared, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
- }
- } else {
- Addr victim := L3cache.cacheProbe(in_msg.addr);
- trigger(Event:L3_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else if (in_msg.Type == CoherenceRequestType:WrCancel) {
- if (is_valid(tbe) && tbe.From == in_msg.Requestor) {
- trigger(Event:CancelWB, in_msg.addr, cache_entry, tbe);
- } else {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
- }
- }
- }
- }
-
- // BEGIN ACTIONS
-
- action(i_invL3, "i", desc="invalidate L3 cache block") {
- if (is_valid(cache_entry)) {
- L3cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(rm_sendResponseM, "rm", desc="send Modified response") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.State := CoherenceState:Modified;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(rs_sendResponseS, "rs", desc="send Shared response") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.State := CoherenceState:Shared;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
-
- action(r_requestToMem, "r", desc="Miss in L3, pass on") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Shared := false; // unneeded for this request
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- if (is_valid(cache_entry)) {
- tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
- tbe.Dirty := cache_entry.Dirty;
- }
- tbe.From := machineID;
- }
-
- action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(vd_vicDirty, "vd", desc="Victimize dirty L3 data") {
- enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:VicDirty;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(w_sendResponseWBAck, "w", desc="send WB Ack") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysWBAck;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(ph_sendProbeResponseHit, "ph", desc="send probe ack, no data") {
- enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := true;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pm_sendProbeResponseMiss, "pm", desc="send probe ack, no data") {
- enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
- enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
- enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- assert(tbe.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.State := CoherenceState:NA;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(mc_cancelMemWriteback, "mc", desc="send writeback cancel to memory") {
- enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WrCancel;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(a_allocateBlock, "a", desc="allocate L3 block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L3cache.allocate(address, new Entry));
- }
- }
-
- action(d_writeData, "d", desc="write data to L3") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty) {
- cache_entry.Dirty := in_msg.Dirty;
- }
- cache_entry.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Writing to L3: %s\n", in_msg);
- }
- }
-
- action(rd_copyDataFromRequest, "rd", desc="write data to L3") {
- peek(requestNetwork_in, CPURequestMsg) {
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := true;
- }
- }
-
- action(f_setFrom, "f", desc="set who WB is expected to come from") {
- peek(requestNetwork_in, CPURequestMsg) {
- tbe.From := in_msg.Requestor;
- }
- }
-
- action(rf_resetFrom, "rf", desc="reset From") {
- tbe.From := machineID;
- }
-
- action(wb_data, "wb", desc="write back data") {
- enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUData;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Shared) {
- out_msg.NbReqShared := true;
- } else {
- out_msg.NbReqShared := false;
- }
- out_msg.State := CoherenceState:Shared; // faux info
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(wt_writeDataToTBE, "wt", desc="write WB data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- }
- }
-
- action(uu_sendUnblock, "uu", desc="state changed, unblock") {
- enqueue(unblockNetwork_out, UnblockMsg, l3_request_latency) {
- out_msg.addr := address;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
- L3cache.setMRU(address);
- }
-
- action(p_popRequestQueue, "p", desc="pop request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(pn_popNBResponseQueue, "pn", desc="pop NB response queue") {
- NBResponse_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(zz_recycleRequestQueue, "\z", desc="recycle request queue") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
-
- // END ACTIONS
-
- // BEGIN TRANSITIONS
-
- // transitions from base
-
- transition({I, I_C}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {TagArrayRead} {
- r_requestToMem;
- p_popRequestQueue;
- }
-
- transition(O, RdBlk ) {TagArrayRead, DataArrayRead} {
- rs_sendResponseS;
- ut_updateTag;
- p_popRequestQueue;
- }
- transition(M, RdBlk, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- p_popRequestQueue;
- }
-
- transition(S, RdBlk) {TagArrayRead, DataArrayRead} {
- rs_sendResponseS;
- ut_updateTag;
- p_popRequestQueue;
- }
- transition(E, RdBlk, S) {TagArrayRead, DataArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- p_popRequestQueue;
- }
-
- transition({M, O}, RdBlkS, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- p_popRequestQueue;
- }
-
- transition({E, S}, RdBlkS, S) {TagArrayRead, DataArrayRead, TagArrayWrite} {
- rs_sendResponseS;
- ut_updateTag;
- p_popRequestQueue;
- }
-
- transition(M, RdBlkM, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- rm_sendResponseM;
- i_invL3;
- p_popRequestQueue;
- }
-
- transition({O, S}, {RdBlkM, CtoD}) {TagArrayRead} {
- r_requestToMem; // can't handle this, just forward
- p_popRequestQueue;
- }
-
- transition(E, RdBlkM, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- rm_sendResponseM;
- i_invL3;
- p_popRequestQueue;
- }
-
- transition({I}, WrVicBlk, I_M) {TagArrayRead, TagArrayWrite} {
- a_allocateBlock;
- t_allocateTBE;
- f_setFrom;
-// rd_copyDataFromRequest;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(I_C, {WrVicBlk, WrVicBlkShared, ClVicBlk, ClVicBlkShared}) {} {
- zz_recycleRequestQueue;
- }
-
- transition({I}, WrVicBlkShared, I_O) {TagArrayRead, TagArrayWrite} {
- a_allocateBlock;
- t_allocateTBE;
- f_setFrom;
-// rd_copyDataFromRequest;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(S, WrVicBlkShared, S_O) {TagArrayRead, TagArrayWrite} {
-// rd_copyDataFromRequest;
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(S, WrVicBlk, S_M) {TagArrayRead, TagArrayWrite} { // should be technically not possible, but assume the data comes back with shared bit flipped
-// rd_copyDataFromRequest;
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(E, WrVicBlk, E_M) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(E, WrVicBlkShared, E_O) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(O, WrVicBlk, O_M) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(O, WrVicBlkShared, O_O) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(M, WrVicBlk, M_M) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(M, WrVicBlkShared, M_O) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition({I}, ClVicBlk, I_E) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- a_allocateBlock;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition({I}, ClVicBlkShared, I_S) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- a_allocateBlock;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(S, ClVicBlk, S_E) {TagArrayRead, TagArrayWrite} { // technically impossible, assume data comes back with shared bit flipped
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(S, ClVicBlkShared, S_S) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(E, ClVicBlk, E_E) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(E, ClVicBlkShared, E_S) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(O, ClVicBlk, O_E) {TagArrayRead, TagArrayWrite} { // technically impossible, but assume data comes back with shared bit flipped
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(O, ClVicBlkShared, O_S) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(M, ClVicBlk, M_E) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(M, ClVicBlkShared, M_S) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition({MO_I}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {} {
- r_requestToMem;
- p_popRequestQueue;
- }
-
- transition(MO_I, {WrVicBlkShared, WrVicBlk, ClVicBlk, ClVicBlkShared}, MOD_I) {TagArrayWrite} {
- f_setFrom;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(I_M, CPUData, M) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_M, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_O, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_E, CPUData, E) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_E, CPUDataShared, S) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(I_S, {CPUData, CPUDataShared}, S) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- pr_popResponseQueue;
- }
-
- transition(S_M, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(S_O, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(S_E, CPUDataShared, S) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(S_S, {CPUData, CPUDataShared}, S) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(O_E, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition(O_S, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- d_writeData;
- ut_updateTag; // update tag on writeback hits.
- pr_popResponseQueue;
- }
-
- transition({D_I}, {CPUData, CPUDataShared}, I) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(MOD_I, {CPUData, CPUDataShared}, MO_I) {TagArrayWrite} {
- uu_sendUnblock;
- rf_resetFrom;
- pr_popResponseQueue;
- }
-
- transition(I_I, {CPUData, CPUDataShared}, MO_I) {TagArrayWrite, DataArrayRead} {
- uu_sendUnblock;
- wt_writeDataToTBE;
- rf_resetFrom;
- pr_popResponseQueue;
- }
-
- transition(I_CD, {CPUData, CPUDataShared}, I) {DataArrayRead, TagArrayWrite} {
- uu_sendUnblock;
- wt_writeDataToTBE;
- wb_data;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition({M, O}, L3_Repl, MO_I) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- vd_vicDirty;
- i_invL3;
- }
-
- transition({E, S,}, L3_Repl, I) {TagArrayRead, TagArrayWrite} {
- i_invL3;
- }
-
- transition({I_M, I_O, S_M, S_O, E_M, E_O}, L3_Repl) {} {
- zz_recycleRequestQueue;
- }
-
- transition({O_M, O_O, O_E, O_S, M_M, M_O, M_E, M_S}, L3_Repl) {} {
- zz_recycleRequestQueue;
- }
-
- transition({I_E, I_S, S_E, S_S, E_E, E_S}, L3_Repl) {} {
- zz_recycleRequestQueue;
- }
-
- transition({M, O}, PrbInvData, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
- pd_sendProbeResponseData;
- i_invL3;
- pp_popProbeQueue;
- }
-
- transition({E, S, I}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- i_invL3; // nothing will happen in I
- pp_popProbeQueue;
- }
-
- transition({M, O, E, S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
- pi_sendProbeResponseInv;
- i_invL3; // nothing will happen in I
- pp_popProbeQueue;
- }
-
- transition({M, O}, PrbShrData, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({E, S}, PrbShrData, S) {TagArrayRead, TagArrayWrite} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition(I, PrbShrData) {TagArrayRead} {
- pm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInvData, I_C) {TagArrayWrite, DataArrayRead} {
- pdt_sendProbeResponseDataFromTBE;
- mc_cancelMemWriteback;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInv, I_C) {TagArrayWrite} {
- pi_sendProbeResponseInv;
- mc_cancelMemWriteback;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbShrData) {DataArrayRead} {
- pdt_sendProbeResponseDataFromTBE;
- pp_popProbeQueue;
- }
-
- transition(I_C, {PrbInvData, PrbInv}) {} {
- pi_sendProbeResponseInv;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbShrData) {} {
- pm_sendProbeResponseMiss;
- pp_popProbeQueue;
- }
-
- transition(I_I, {WBAck}, I_CD) {TagArrayWrite} {
- pn_popNBResponseQueue;
- }
-
- transition(MOD_I, WBAck, D_I) {DataArrayRead} {
- wb_data;
- pn_popNBResponseQueue;
- }
-
- transition(MO_I, WBAck, I) {DataArrayRead, TagArrayWrite} {
- wb_data;
- dt_deallocateTBE;
- pn_popNBResponseQueue;
- }
-
- transition(I_C, {WBAck}, I) {TagArrayWrite} {
- dt_deallocateTBE;
- pn_popNBResponseQueue;
- }
-
- transition({I_M, I_O, I_E, I_S}, CancelWB, I) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- i_invL3;
- p_popRequestQueue;
- }
-
- transition({S_S, S_O, S_M, S_E}, CancelWB, S) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- p_popRequestQueue;
- }
-
- transition({E_M, E_O, E_E, E_S}, CancelWB, E) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- p_popRequestQueue;
- }
-
- transition({O_M, O_O, O_E, O_S}, CancelWB, O) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- p_popRequestQueue;
- }
-
- transition({M_M, M_O, M_E, M_S}, CancelWB, M) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- p_popRequestQueue;
- }
-
- transition(D_I, CancelWB, I) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- p_popRequestQueue;
- }
-
- transition(MOD_I, CancelWB, MO_I) {TagArrayWrite} {
- uu_sendUnblock;
- rf_resetFrom;
- p_popRequestQueue;
- }
-
- transition(I_I, CancelWB, I_C) {TagArrayWrite} {
- uu_sendUnblock;
- rf_resetFrom;
- mc_cancelMemWriteback;
- p_popRequestQueue;
- }
-
- transition(I_CD, CancelWB, I) {TagArrayWrite} {
- uu_sendUnblock;
- dt_deallocateTBE;
- mc_cancelMemWriteback;
- p_popRequestQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:CorePair, "CP-like Core Coherence")
- : Sequencer * sequencer;
- Sequencer * sequencer1;
- CacheMemory * L1Icache;
- CacheMemory * L1D0cache;
- CacheMemory * L1D1cache;
- CacheMemory * L2cache;
- int regionBufferNum;
- bool send_evictions := "False";
- Cycles issue_latency := 5;
- Cycles l2_hit_latency := 18;
-
- // BEGIN Core Buffers
-
- // To the Network
- MessageBuffer * requestFromCore, network="To", virtual_network="0", ordered="true", vnet_type="request";
- MessageBuffer * responseFromCore, network="To", virtual_network="2", ordered="false", vnet_type="response";
- MessageBuffer * unblockFromCore, network="To", virtual_network="4", ordered="false", vnet_type="unblock";
-
- // From the Network
- MessageBuffer * probeToCore, network="From", virtual_network="0", ordered="false", vnet_type="request";
- MessageBuffer * responseToCore, network="From", virtual_network="2", ordered="false", vnet_type="response";
-
- MessageBuffer * mandatoryQueue, ordered="false";
- MessageBuffer * triggerQueue, ordered="true";
-
- // END Core Buffers
-
-{
- // BEGIN STATES
- state_declaration(State, desc="Cache states", default="CorePair_State_I") {
-
- I, AccessPermission:Invalid, desc="Invalid";
- S, AccessPermission:Read_Only, desc="Shared";
- E0, AccessPermission:Read_Write, desc="Exclusive with Cluster 0 ownership";
- E1, AccessPermission:Read_Write, desc="Exclusive with Cluster 1 ownership";
- Es, AccessPermission:Read_Write, desc="Exclusive in core";
- O, AccessPermission:Read_Only, desc="Owner state in core, both clusters and other cores may be sharing line";
- Ms, AccessPermission:Read_Write, desc="Modified in core, both clusters may be sharing line";
- M0, AccessPermission:Read_Write, desc="Modified with cluster ownership";
- M1, AccessPermission:Read_Write, desc="Modified with cluster ownership";
-
- // Transient States
- I_M0, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
- I_M1, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
- I_M0M1, AccessPermission:Busy, desc="Was in I_M0, got a store request from other cluster as well";
- I_M1M0, AccessPermission:Busy, desc="Was in I_M1, got a store request from other cluster as well";
- I_M0Ms, AccessPermission:Busy, desc="Was in I_M0, got a load request from other cluster as well";
- I_M1Ms, AccessPermission:Busy, desc="Was in I_M1, got a load request from other cluster as well";
- I_E0S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
- I_E1S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
- I_ES, AccessPermission:Busy, desc="S_F got hit by invalidating probe, RdBlk response needs to go to both clusters";
-
- IF_E0S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E0S but expecting a L2_to_L1D0 trigger, just drop when receive";
- IF_E1S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E1S but expecting a L2_to_L1D1 trigger, just drop when receive";
- IF_ES, AccessPermission:Busy, desc="same, but waiting for two fills";
- IF0_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
- IF1_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
- F_S0, AccessPermission:Busy, desc="same, but going to S0 when trigger received";
- F_S1, AccessPermission:Busy, desc="same, but going to S1 when trigger received";
-
- ES_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for clean writeback ack";
- MO_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for dirty writeback ack";
- MO_S0, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
- MO_S1, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
- S_F0, AccessPermission:Read_Only, desc="Shared, filling L1";
- S_F1, AccessPermission:Read_Only, desc="Shared, filling L1";
- S_F, AccessPermission:Read_Only, desc="Shared, filling L1";
- O_F0, AccessPermission:Read_Only, desc="Owned, filling L1";
- O_F1, AccessPermission:Read_Only, desc="Owned, filling L1";
- O_F, AccessPermission:Read_Only, desc="Owned, filling L1";
- Si_F0, AccessPermission:Read_Only, desc="Shared, filling icache";
- Si_F1, AccessPermission:Read_Only, desc="Shared, filling icache";
- S_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- S_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- O_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- O_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
- S0, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 0, waiting for response";
- S1, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 1, waiting for response";
-
- Es_F0, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
- Es_F1, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
- Es_F, AccessPermission:Read_Write, desc="Es, other cluster read, filling";
- E0_F, AccessPermission:Read_Write, desc="E0, cluster read, filling";
- E1_F, AccessPermission:Read_Write, desc="...";
- E0_Es, AccessPermission:Read_Write, desc="...";
- E1_Es, AccessPermission:Read_Write, desc="...";
- Ms_F0, AccessPermission:Read_Write, desc="...";
- Ms_F1, AccessPermission:Read_Write, desc="...";
- Ms_F, AccessPermission:Read_Write, desc="...";
- M0_F, AccessPermission:Read_Write, desc="...";
- M0_Ms, AccessPermission:Read_Write, desc="...";
- M1_F, AccessPermission:Read_Write, desc="...";
- M1_Ms, AccessPermission:Read_Write, desc="...";
-
- I_C, AccessPermission:Invalid, desc="Invalid, but waiting for WBAck from NB from canceled writeback";
- S0_C, AccessPermission:Busy, desc="MO_S0 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
- S1_C, AccessPermission:Busy, desc="MO_S1 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
- S_C, AccessPermission:Busy, desc="S*_C got NB_AckS, still waiting for WBAck";
-
- } // END STATES
-
- // BEGIN EVENTS
- enumeration(Event, desc="CP Events") {
- // CP Initiated events
- C0_Load_L1miss, desc="Cluster 0 load, L1 missed";
- C0_Load_L1hit, desc="Cluster 0 load, L1 hit";
- C1_Load_L1miss, desc="Cluster 1 load L1 missed";
- C1_Load_L1hit, desc="Cluster 1 load L1 hit";
- Ifetch0_L1hit, desc="Instruction fetch, hit in the L1";
- Ifetch1_L1hit, desc="Instruction fetch, hit in the L1";
- Ifetch0_L1miss, desc="Instruction fetch, missed in the L1";
- Ifetch1_L1miss, desc="Instruction fetch, missed in the L1";
- C0_Store_L1miss, desc="Cluster 0 store missed in L1";
- C0_Store_L1hit, desc="Cluster 0 store hit in L1";
- C1_Store_L1miss, desc="Cluster 1 store missed in L1";
- C1_Store_L1hit, desc="Cluster 1 store hit in L1";
- // NB Initiated events
- NB_AckS, desc="NB Ack to Core Request";
- NB_AckM, desc="NB Ack to Core Request";
- NB_AckE, desc="NB Ack to Core Request";
-
- NB_AckWB, desc="NB Ack for writeback";
-
- // Memory System initiatied events
- L1I_Repl, desc="Replace address from L1I"; // Presumed clean
- L1D0_Repl, desc="Replace address from L1D0"; // Presumed clean
- L1D1_Repl, desc="Replace address from L1D1"; // Presumed clean
- L2_Repl, desc="Replace address from L2";
-
- L2_to_L1D0, desc="L1 fill from L2";
- L2_to_L1D1, desc="L1 fill from L2";
- L2_to_L1I, desc="L1 fill from L2";
-
- // Probe Events
- PrbInvData, desc="probe, return O or M data";
- PrbInvDataDemand, desc="probe, return O or M data. Demand request";
- PrbInv, desc="probe, no need for data";
- PrbShrData, desc="probe downgrade, return O or M data";
- PrbShrDataDemand, desc="probe downgrade, return O or M data. Demand request";
- ForceRepl, desc="probe from r-buf. Act as though a repl";
- ForceDowngrade, desc="probe from r-buf. Act as though a repl";
-
- } // END EVENTS
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- L1D0DataArrayRead, desc="Read the data array";
- L1D0DataArrayWrite, desc="Write the data array";
- L1D0TagArrayRead, desc="Read the data array";
- L1D0TagArrayWrite, desc="Write the data array";
- L1D1DataArrayRead, desc="Read the data array";
- L1D1DataArrayWrite, desc="Write the data array";
- L1D1TagArrayRead, desc="Read the data array";
- L1D1TagArrayWrite, desc="Write the data array";
- L1IDataArrayRead, desc="Read the data array";
- L1IDataArrayWrite, desc="Write the data array";
- L1ITagArrayRead, desc="Read the data array";
- L1ITagArrayWrite, desc="Write the data array";
- L2DataArrayRead, desc="Read the data array";
- L2DataArrayWrite, desc="Write the data array";
- L2TagArrayRead, desc="Read the data array";
- L2TagArrayWrite, desc="Write the data array";
- }
-
-
- // BEGIN STRUCTURE DEFINITIONS
-
-
- // Cache Entry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (diff than memory)?";
- DataBlock DataBlk, desc="data for the block";
- bool FromL2, default="false", desc="block just moved from L2";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
- bool Shared, desc="Victim hit by shared probe";
- bool AckNeeded, desc="True if need to ack r-dir";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<CorePair_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- // END STRUCTURE DEFINITIONS
-
- // BEGIN INTERNAL FUNCTIONS
-
- MachineID getPeer(MachineID mach) {
- return createMachineID(MachineType:RegionBuffer, intToID(regionBufferNum));
- }
-
- bool addressInCore(Addr addr) {
- return (L2cache.isTagPresent(addr) || L1Icache.isTagPresent(addr) || L1D0cache.isTagPresent(addr) || L1D1cache.isTagPresent(addr));
- }
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
- return L2cache_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return tbe.DataBlk;
- } else {
- return getCacheEntry(addr).DataBlk;
- }
- }
-
- Entry getL1CacheEntry(Addr addr, int cluster), return_by_pointer="yes" {
- if (cluster == 0) {
- Entry L1D0_entry := static_cast(Entry, "pointer", L1D0cache.lookup(addr));
- return L1D0_entry;
- } else {
- Entry L1D1_entry := static_cast(Entry, "pointer", L1D1cache.lookup(addr));
- return L1D1_entry;
- }
- }
-
- Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry c_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
- return c_entry;
- }
-
- bool presentOrAvail2(Addr addr) {
- return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
- }
-
- bool presentOrAvailI(Addr addr) {
- return L1Icache.isTagPresent(addr) || L1Icache.cacheAvail(addr);
- }
-
- bool presentOrAvailD0(Addr addr) {
- return L1D0cache.isTagPresent(addr) || L1D0cache.cacheAvail(addr);
- }
-
- bool presentOrAvailD1(Addr addr) {
- return L1D1cache.isTagPresent(addr) || L1D1cache.cacheAvail(addr);
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- return CorePair_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return CorePair_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- bool isValid(Addr addr) {
- AccessPermission perm := getAccessPermission(addr);
- if (perm == AccessPermission:NotPresent ||
- perm == AccessPermission:Invalid ||
- perm == AccessPermission:Busy) {
- return false;
- } else {
- return true;
- }
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(CorePair_State_to_permission(state));
- }
- }
-
- MachineType testAndClearLocalHit(Entry cache_entry) {
- assert(is_valid(cache_entry));
- if (cache_entry.FromL2) {
- cache_entry.FromL2 := false;
- return MachineType:L2Cache;
- } else {
- return MachineType:L1Cache;
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L1D0DataArrayRead) {
- L1D0cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L1D0DataArrayWrite) {
- L1D0cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L1D0TagArrayRead) {
- L1D0cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L1D0TagArrayWrite) {
- L1D0cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- } else if (request_type == RequestType:L1D1DataArrayRead) {
- L1D1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L1D1DataArrayWrite) {
- L1D1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L1D1TagArrayRead) {
- L1D1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L1D1TagArrayWrite) {
- L1D1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- } else if (request_type == RequestType:L1IDataArrayRead) {
- L1Icache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L1IDataArrayWrite) {
- L1Icache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L1ITagArrayRead) {
- L1Icache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L1ITagArrayWrite) {
- L1Icache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- } else if (request_type == RequestType:L2DataArrayRead) {
- L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L2DataArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L2TagArrayRead) {
- L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L2TagArrayWrite) {
- L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L2DataArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L2DataArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L2TagArrayRead) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L2TagArrayWrite) {
- return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D0DataArrayRead) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D0DataArrayWrite) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D0TagArrayRead) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D0TagArrayWrite) {
- return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D1DataArrayRead) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D1DataArrayWrite) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1D1TagArrayRead) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1D1TagArrayWrite) {
- return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1IDataArrayRead) {
- return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1IDataArrayWrite) {
- return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L1ITagArrayRead) {
- return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L1ITagArrayWrite) {
- return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- return true;
- }
- }
-
- // END INTERNAL FUNCTIONS
-
- // ** OUT_PORTS **
-
- out_port(requestNetwork_out, CPURequestMsg, requestFromCore);
- out_port(responseNetwork_out, ResponseMsg, responseFromCore);
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
-
- // ** IN_PORTS **
-
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, block_on="addr") {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == TriggerType:L2_to_L1) {
- if (in_msg.Dest == CacheId:L1I) {
- trigger(Event:L2_to_L1I, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Dest == CacheId:L1D0) {
- trigger(Event:L2_to_L1D0, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Dest == CacheId:L1D1) {
- trigger(Event:L2_to_L1D1, in_msg.addr, cache_entry, tbe);
- } else {
- error("unexpected trigger dest");
- }
- }
- }
- }
- }
-
-
- in_port(probeNetwork_in, NBProbeRequestMsg, probeToCore) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, NBProbeRequestMsg, block_on="addr") {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- if (in_msg.DemandRequest) {
- trigger(Event:PrbInvDataDemand, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.ReturnData) {
- trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- if (in_msg.DemandRequest) {
- trigger(Event:PrbShrDataDemand, in_msg.addr, cache_entry, tbe);
- } else {
- assert(in_msg.ReturnData);
- trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == ProbeRequestType:PrbRepl) {
- trigger(Event:ForceRepl, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == ProbeRequestType:PrbRegDowngrade) {
- trigger(Event:ForceDowngrade, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unknown probe request");
- }
- }
- }
- }
-
-
- // ResponseNetwork
- in_port(responseToCore_in, ResponseMsg, responseToCore) {
- if (responseToCore_in.isReady(clockEdge())) {
- peek(responseToCore_in, ResponseMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
-
- if (in_msg.Type == CoherenceResponseType:NBSysResp) {
- if (in_msg.State == CoherenceState:Modified) {
- trigger(Event:NB_AckM, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.State == CoherenceState:Shared) {
- trigger(Event:NB_AckS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.State == CoherenceState:Exclusive) {
- trigger(Event:NB_AckE, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
- trigger(Event:NB_AckWB, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected Response Message to Core");
- }
- }
- }
- }
-
- // Nothing from the Unblock Network
-
- // Mandatory Queue
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
-
- Entry cache_entry := getCacheEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
-
- if (in_msg.Type == RubyRequestType:IFETCH) {
- // FETCH ACCESS
-
- if (L1Icache.isTagPresent(in_msg.LineAddress)) {
- if (mod(in_msg.contextId, 2) == 0) {
- trigger(Event:Ifetch0_L1hit, in_msg.LineAddress, cache_entry, tbe);
- } else {
- trigger(Event:Ifetch1_L1hit, in_msg.LineAddress, cache_entry, tbe);
- }
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- if (presentOrAvailI(in_msg.LineAddress)) {
- if (mod(in_msg.contextId, 2) == 0) {
- trigger(Event:Ifetch0_L1miss, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- trigger(Event:Ifetch1_L1miss, in_msg.LineAddress, cache_entry,
- tbe);
- }
- } else {
- Addr victim := L1Icache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L1I_Repl, victim,
- getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else { // Not present or avail in L2
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- DPRINTF(RubySlicc, "Victim for %s L2_Repl(0) is %s\n", in_msg.LineAddress, victim);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- } else {
- // DATA ACCESS
- if (mod(in_msg.contextId, 2) == 1) {
- if (L1D1cache.isTagPresent(in_msg.LineAddress)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C1_Load_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- // Stores must write through, make sure L2 avail.
- if (presentOrAvail2(in_msg.LineAddress)) {
- trigger(Event:C1_Store_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- DPRINTF(RubySlicc, "Victim for %s L2_Repl(1) is %s\n", in_msg.LineAddress, victim);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- if (presentOrAvailD1(in_msg.LineAddress)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C1_Load_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- } else {
- trigger(Event:C1_Store_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- }
- } else {
- Addr victim := L1D1cache.cacheProbe(in_msg.LineAddress);
- DPRINTF(RubySlicc, "Victim for %s L1D1_Repl is %s\n", in_msg.LineAddress, victim);
- trigger(Event:L1D1_Repl, victim,
- getCacheEntry(victim), TBEs.lookup(victim));
- }
- } else { // not present or avail in L2
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- DPRINTF(RubySlicc, "Victim for %s L2_Repl(2) is %s\n", in_msg.LineAddress, victim);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
- }
- }
- } else {
- Entry L1D0cache_entry := getL1CacheEntry(in_msg.LineAddress, 0);
- if (is_valid(L1D0cache_entry)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C0_Load_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- trigger(Event:C0_Store_L1hit, in_msg.LineAddress, cache_entry,
- tbe);
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- DPRINTF(RubySlicc, "Victim for %s L2_Repl(3) is %s\n", in_msg.LineAddress, victim);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- } else {
- if (presentOrAvail2(in_msg.LineAddress)) {
- if (presentOrAvailD0(in_msg.LineAddress)) {
- if (in_msg.Type == RubyRequestType:LD) {
- trigger(Event:C0_Load_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- } else {
- trigger(Event:C0_Store_L1miss, in_msg.LineAddress,
- cache_entry, tbe);
- }
- } else {
- Addr victim := L1D0cache.cacheProbe(in_msg.LineAddress);
- DPRINTF(RubySlicc, "Victim for %s L1D0_Repl is %s\n", in_msg.LineAddress, victim);
- trigger(Event:L1D0_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- } else {
- Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
- DPRINTF(RubySlicc, "Victim for %s L2_Repl(4) is %s\n", in_msg.LineAddress, victim);
- trigger(Event:L2_Repl, victim, getCacheEntry(victim),
- TBEs.lookup(victim));
- }
- }
- }
- }
- }
- }
- }
-
-
- // ACTIONS
- action(ii_invIcache, "ii", desc="invalidate iCache") {
- if (L1Icache.isTagPresent(address)) {
- L1Icache.deallocate(address);
- }
- }
-
- action(i0_invCluster, "i0", desc="invalidate cluster 0") {
- if (L1D0cache.isTagPresent(address)) {
- L1D0cache.deallocate(address);
- }
- }
-
- action(i1_invCluster, "i1", desc="invalidate cluster 1") {
- if (L1D1cache.isTagPresent(address)) {
- L1D1cache.deallocate(address);
- }
- }
-
- action(ib_invBothClusters, "ib", desc="invalidate both clusters") {
- if (L1D0cache.isTagPresent(address)) {
- L1D0cache.deallocate(address);
- }
- if (L1D1cache.isTagPresent(address)) {
- L1D1cache.deallocate(address);
- }
- }
-
- action(i2_invL2, "i2", desc="invalidate L2") {
- if(is_valid(cache_entry)) {
- L2cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(n_issueRdBlk, "n", desc="Issue RdBlk") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlk;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkM;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(nMs_issueRdBlkMSinked, "nMs", desc="Issue RdBlkM with CtoDSinked") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkM;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.CtoDSinked := true;
- }
- }
-
- action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- action(nSs_issueRdBlkSSinked, "nSs", desc="Issue RdBlkS with CtoDSinked") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:RdBlkS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.CtoDSinked := true;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
-
- action(vd_victim, "vd", desc="Victimize M/O L2 Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- assert(is_valid(cache_entry));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicDirty;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:O) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- }
- }
-
- action(vc_victim, "vc", desc="Victimize E/S L2 Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicClean;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:S) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- }
- }
-
- // Could send these two directly to dir if we made a new out network on channel 0
- action(vdf_victimForce, "vdf", desc="Victimize M/O L2 Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- assert(is_valid(cache_entry));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicDirty;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:O) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- out_msg.Private := true;
- }
- }
-
- action(vcf_victimForce, "vcf", desc="Victimize E/S L2 Data") {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Type := CoherenceRequestType:VicClean;
- out_msg.InitialRequestTime := curCycle();
- if (cache_entry.CacheState == State:S) {
- out_msg.Shared := true;
- } else {
- out_msg.Shared := false;
- }
- out_msg.Private := true;
- }
- }
-
- action(a0_allocateL1D, "a0", desc="Allocate L1D0 Block") {
- if (L1D0cache.isTagPresent(address) == false) {
- L1D0cache.allocateVoid(address, new Entry);
- }
- }
-
- action(a1_allocateL1D, "a1", desc="Allocate L1D1 Block") {
- if (L1D1cache.isTagPresent(address) == false) {
- L1D1cache.allocateVoid(address, new Entry);
- }
- }
-
- action(ai_allocateL1I, "ai", desc="Allocate L1I Block") {
- if (L1Icache.isTagPresent(address) == false) {
- L1Icache.allocateVoid(address, new Entry);
- }
- }
-
- action(a2_allocateL2, "a2", desc="Allocate L2 Block") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L2cache.allocate(address, new Entry));
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
- tbe.Dirty := cache_entry.Dirty;
- tbe.Shared := false;
- }
-
- action(d_deallocateTBE, "d", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
- responseToCore_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="Pop Trigger Queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="pop probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(il0_loadDone, "il0", desc="Cluster 0 i load done") {
- Entry entry := getICacheEntry(address);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(il1_loadDone, "il1", desc="Cluster 1 i load done") {
- Entry entry := getICacheEntry(address);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(l0_loadDone, "l0", desc="Cluster 0 load done") {
- Entry entry := getL1CacheEntry(address, 0);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(l1_loadDone, "l1", desc="Cluster 1 load done") {
- Entry entry := getL1CacheEntry(address, 1);
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- assert(is_valid(entry));
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- }
-
- action(xl0_loadDone, "xl0", desc="Cluster 0 load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- DPRINTF(ProtocolTrace, "CP Load Done 0 -- address %s, data: %s\n",
- address, l2entry.DataBlk);
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- assert(is_valid(l2entry));
- sequencer.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(xl1_loadDone, "xl1", desc="Cluster 1 load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- assert(is_valid(l2entry));
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(xi0_loadDone, "xi0", desc="Cluster 0 i-load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- assert(is_valid(l2entry));
- sequencer.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(xi1_loadDone, "xi1", desc="Cluster 1 i-load done") {
- peek(responseToCore_in, ResponseMsg) {
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- Entry l2entry := getCacheEntry(address); // Used for functional accesses
- // L2 supplies data (functional accesses only look in L2, ok because L1
- // writes through to L2)
- assert(is_valid(l2entry));
- sequencer1.readCallback(address,
- l2entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- }
- }
-
- action(s0_storeDone, "s0", desc="Cluster 0 store done") {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- sequencer.writeCallback(address,
- cache_entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- cache_entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- entry.Dirty := true;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
-
- action(s1_storeDone, "s1", desc="Cluster 1 store done") {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- sequencer1.writeCallback(address,
- cache_entry.DataBlk,
- true,
- testAndClearLocalHit(entry));
- cache_entry.Dirty := true;
- entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
-
- action(xs0_storeDone, "xs0", desc="Cluster 0 store done") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- sequencer.writeCallback(address,
- cache_entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- cache_entry.Dirty := true;
- entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
- }
-
- action(xs1_storeDone, "xs1", desc="Cluster 1 store done") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
- (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
- sequencer1.writeCallback(address,
- cache_entry.DataBlk,
- false,
- machineIDToMachineType(in_msg.Sender),
- in_msg.InitialRequestTime,
- in_msg.ForwardRequestTime,
- in_msg.ProbeRequestStartTime);
- cache_entry.Dirty := true;
- entry.Dirty := true;
- entry.DataBlk := cache_entry.DataBlk;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
- }
-
- action(forward_eviction_to_cpu0, "fec0", desc="sends eviction information to processor0") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(forward_eviction_to_cpu1, "fec1", desc="sends eviction information to processor1") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
- sequencer1.evictionCallback(address);
- }
- }
-
- action(ci_copyL2ToL1, "ci", desc="copy L2 data to L1") {
- Entry entry := getICacheEntry(address);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.Dirty := cache_entry.Dirty;
- entry.DataBlk := cache_entry.DataBlk;
- entry.FromL2 := true;
- }
-
- action(c0_copyL2ToL1, "c0", desc="copy L2 data to L1") {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.Dirty := cache_entry.Dirty;
- entry.DataBlk := cache_entry.DataBlk;
- entry.FromL2 := true;
- }
-
- action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
- peek(responseToCore_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:StaleNotif;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(c1_copyL2ToL1, "c1", desc="copy L2 data to L1") {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.Dirty := cache_entry.Dirty;
- entry.DataBlk := cache_entry.DataBlk;
- entry.FromL2 := true;
- }
-
- action(fi_L2ToL1, "fi", desc="L2 to L1 inst fill") {
- enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L2_to_L1;
- out_msg.Dest := CacheId:L1I;
- }
- }
-
- action(f0_L2ToL1, "f0", desc="L2 to L1 data fill") {
- enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L2_to_L1;
- out_msg.Dest := CacheId:L1D0;
- }
- }
-
- action(f1_L2ToL1, "f1", desc="L2 to L1 data fill") {
- enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L2_to_L1;
- out_msg.Dest := CacheId:L1D1;
- }
- }
-
- action(wi_writeIcache, "wi", desc="write data to icache (and l2)") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getICacheEntry(address);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.DataBlk := in_msg.DataBlk;
- entry.Dirty := in_msg.Dirty;
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(w0_writeDcache, "w0", desc="write data to dcache 0 (and l2)") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 0);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.DataBlk := in_msg.DataBlk;
- entry.Dirty := in_msg.Dirty;
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(w1_writeDcache, "w1", desc="write data to dcache 1 (and l2)") {
- peek(responseToCore_in, ResponseMsg) {
- Entry entry := getL1CacheEntry(address, 1);
- assert(is_valid(entry));
- assert(is_valid(cache_entry));
- entry.DataBlk := in_msg.DataBlk;
- entry.Dirty := in_msg.Dirty;
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(wb_data, "wb", desc="write back data") {
- peek(responseToCore_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUData;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Shared) {
- out_msg.NbReqShared := true;
- } else {
- out_msg.NbReqShared := false;
- }
- out_msg.State := CoherenceState:Shared; // faux info
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Hit := false;
- out_msg.Ntsl := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false;
- out_msg.Ntsl := true;
- out_msg.Hit := false;
- APPEND_TRANSITION_COMMENT("Setting Ms");
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(ph_sendProbeResponseHit, "ph", desc="send probe ack PrbShrData, no data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- assert(addressInCore(address) || is_valid(tbe));
- out_msg.Dirty := false; // only true if sending back data i think
- out_msg.Hit := true;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(pb_sendProbeResponseBackprobe, "pb", desc="send probe ack PrbShrData, no data, check for L1 residence") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- if (addressInCore(address)) {
- out_msg.Hit := true;
- } else {
- out_msg.Hit := false;
- }
- out_msg.Dirty := false; // not sending back data, so def. not dirty
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := cache_entry.DataBlk;
- assert(cache_entry.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- APPEND_TRANSITION_COMMENT("Setting Ms");
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- assert(tbe.Dirty);
- out_msg.Dirty := true;
- out_msg.Hit := true;
- out_msg.State := CoherenceState:NA;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.isValid := isValid(address);
- }
- }
-
- action(ra_sendReplAck, "ra", desc="Send ack to r-buf that line is replaced if needed") {
- if (is_invalid(tbe) || tbe.AckNeeded) {
- enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:InvAck;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- APPEND_TRANSITION_COMMENT(" Sending ack to r-buf ");
- } else {
- APPEND_TRANSITION_COMMENT(" NOT Sending ack to r-buf ");
- }
- }
-
- action(m_markAckNeeded, "m", desc="Mark TBE to send ack when deallocated") {
- assert(is_valid(tbe));
- tbe.AckNeeded := true;
- }
-
- action(mc_cancelWB, "mc", desc="send writeback cancel to L3") {
- enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:CPUCancelWB;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(s_setSharedFlip, "s", desc="hit by shared probe, status may be different") {
- assert(is_valid(tbe));
- tbe.Shared := true;
- }
-
- action(uu_sendUnblock, "uu", desc="state changed, unblock") {
- enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- out_msg.wasValid := isValid(address);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(sdv_sendDoneValid, "sdv", desc="Request finished, send done ack") {
- enqueue(unblockNetwork_out, UnblockMsg, 1) {
- out_msg.addr := address;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.DoneAck := true;
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- } else if (is_valid(cache_entry)) {
- out_msg.Dirty := cache_entry.Dirty;
- } else {
- out_msg.Dirty := false;
- }
- out_msg.validToInvalid := false;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(sdi_sendDoneInvalid, "sdi", desc="Request finished, send done ack") {
- enqueue(unblockNetwork_out, UnblockMsg, 1) {
- out_msg.addr := address;
- out_msg.Destination.add(getPeer(machineID));
- out_msg.DoneAck := true;
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- if (is_valid(tbe)) {
- out_msg.Dirty := tbe.Dirty;
- } else if (is_valid(cache_entry)) {
- out_msg.Dirty := cache_entry.Dirty;
- } else {
- out_msg.Dirty := false;
- }
- out_msg.validToInvalid := true;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(l10m_profileMiss, "l10m", desc="l10m miss profile") {
- ++L1D0cache.demand_misses;
- }
-
- action(l11m_profileMiss, "l11m", desc="l11m miss profile") {
- ++L1D1cache.demand_misses;
- }
-
- action(l1im_profileMiss, "l1lm", desc="l1im miss profile") {
- ++L1Icache.demand_misses;
- }
-
- action(l2m_profileMiss, "l2m", desc="l2m miss profile") {
- ++L2cache.demand_misses;
- }
-
- action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
- probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
- mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
- // END ACTIONS
-
- // BEGIN TRANSITIONS
-
- // transitions from base
- transition(I, C0_Load_L1miss, I_E0S) {L1D0TagArrayRead, L2TagArrayRead} {
- // track misses, if implemented
- // since in I state, L2 miss as well
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- l1im_profileMiss;
- a2_allocateL2;
- i1_invCluster;
- ii_invIcache;
- n_issueRdBlk;
- p_popMandatoryQueue;
- }
-
- transition(I, C1_Load_L1miss, I_E1S) {L1D1TagArrayRead, L2TagArrayRead} {
- // track misses, if implemented
- // since in I state, L2 miss as well
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- a2_allocateL2;
- i0_invCluster;
- ii_invIcache;
- n_issueRdBlk;
- p_popMandatoryQueue;
- }
-
- transition(I, Ifetch0_L1miss, S0) {L1ITagArrayRead, L2TagArrayRead} {
- // track misses, if implemented
- // L2 miss as well
- l10m_profileMiss;
- l2m_profileMiss;
- l1im_profileMiss;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(I, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
- l11m_profileMiss;
- // track misses, if implemented
- // L2 miss as well
- l2m_profileMiss;
- l1im_profileMiss;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(I, C0_Store_L1miss, I_M0) {L1D0TagArrayRead,L2TagArrayRead} {
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- a2_allocateL2;
- i1_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(I, C1_Store_L1miss, I_M1) {L1D0TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- a2_allocateL2;
- i0_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(S, C0_Load_L1miss, S_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(S, C1_Load_L1miss, S_F1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(S, Ifetch0_L1miss, Si_F0) {L1ITagArrayRead,L2TagArrayRead, L2DataArrayRead} {
- l1im_profileMiss;
- ai_allocateL1I;
- fi_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(S, Ifetch1_L1miss, Si_F1) {L1ITagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l1im_profileMiss;
- ai_allocateL1I;
- fi_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({S}, {C0_Store_L1hit, C0_Store_L1miss}, S_M0) {L1D0TagArrayRead, L2TagArrayRead}{
- l2m_profileMiss;
- l10m_profileMiss;
- a0_allocateL1D;
- i1_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition({S}, {C1_Store_L1hit, C1_Store_L1miss}, S_M1) {L1D1TagArrayRead,L2TagArrayRead} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- i0_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
- transition(Es, C0_Load_L1miss, Es_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(Es, C1_Load_L1miss, Es_F1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(Es, Ifetch0_L1miss, S0) {L1ITagArrayRead, L2TagArrayRead} {
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(Es, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- ib_invBothClusters;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- // THES SHOULD NOT BE INSTANTANEOUS BUT OH WELL FOR NOW
- transition(Es, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayWrite,L1D0TagArrayRead, L2TagArrayRead, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone; // instantaneous L1/L2 dirty - no writethrough delay
- p_popMandatoryQueue;
- }
-
- transition(Es, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
- a1_allocateL1D;
- i0_invCluster;
- s1_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(E0, C0_Load_L1miss, E0_F) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(E0, C1_Load_L1miss, E0_Es) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(E0, Ifetch0_L1miss, S0) {L2TagArrayRead, L1ITagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i0_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E0, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead } {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i0_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E0, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a0_allocateL1D;
- s0_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(E0, C1_Store_L1miss, M1) {L1D0TagArrayRead, L1D0TagArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
- a1_allocateL1D;
- l11m_profileMiss;
- i0_invCluster;
- s1_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(E1, C1_Load_L1miss, E1_F) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- a1_allocateL1D;
- l11m_profileMiss;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(E1, C0_Load_L1miss, E1_Es) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- a0_allocateL1D;
- l10m_profileMiss;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(E1, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i1_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E1, Ifetch0_L1miss, S0) {L2TagArrayRead,L1ITagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l1im_profileMiss;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- i1_invCluster;
- nS_issueRdBlkS;
- p_popMandatoryQueue;
- }
-
- transition(E1, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a1_allocateL1D;
- s1_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(E1, C0_Store_L1miss, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
- l10m_profileMiss;
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone;
- p_popMandatoryQueue;
- }
-
- transition({O}, {C0_Store_L1hit, C0_Store_L1miss}, O_M0) {L1D0TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue CtoD
- l10m_profileMiss;
- a0_allocateL1D;
- i1_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition({O}, {C1_Store_L1hit, C1_Store_L1miss}, O_M1) {L1D1TagArrayRead, L2TagArrayRead} {
- l2m_profileMiss; // permissions miss, still issue RdBlkS
- l11m_profileMiss;
- a1_allocateL1D;
- i0_invCluster;
- ii_invIcache;
- nM_issueRdBlkM;
- p_popMandatoryQueue;
- }
-
- transition(O, C0_Load_L1miss, O_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(O, C1_Load_L1miss, O_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(Ms, C0_Load_L1miss, Ms_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(Ms, C1_Load_L1miss, Ms_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({Ms, M0, M1, O}, Ifetch0_L1miss, MO_S0) {L1ITagArrayRead, L2TagArrayRead} {
- l2m_profileMiss; // permissions miss
- l1im_profileMiss;
- ai_allocateL1I;
- t_allocateTBE;
- ib_invBothClusters;
- vd_victim;
-// i2_invL2;
- p_popMandatoryQueue;
- }
-
- transition({Ms, M0, M1, O}, Ifetch1_L1miss, MO_S1) {L1ITagArrayRead L2TagArrayRead } {
- l2m_profileMiss; // permissions miss
- l10m_profileMiss;
- ai_allocateL1I;
- t_allocateTBE;
- ib_invBothClusters;
- vd_victim;
-// i2_invL2;
- p_popMandatoryQueue;
- }
-
- transition(Ms, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(Ms, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a1_allocateL1D;
- i0_invCluster;
- s1_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(M0, C0_Load_L1miss, M0_F) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(M0, C1_Load_L1miss, M0_Ms) {L2TagArrayRead, L2DataArrayRead,L1D1TagArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(M0, {C0_Store_L1hit, C0_Store_L1miss}) {L1D0TagArrayRead, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead} {
- a0_allocateL1D;
- s0_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(M0, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
- a1_allocateL1D;
- i0_invCluster;
- s1_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(M1, C0_Load_L1miss, M1_Ms) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(M1, C1_Load_L1miss, M1_F) {L1D1TagArrayRead L2TagArrayRead, L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(M1, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
- a0_allocateL1D;
- i1_invCluster;
- s0_storeDone;
- p_popMandatoryQueue;
- }
-
- transition(M1, {C1_Store_L1hit, C1_Store_L1miss}) {L1D1TagArrayRead, L1D1DataArrayWrite, L2TagArrayRead, L2DataArrayWrite} {
- a1_allocateL1D;
- s1_storeDone;
- p_popMandatoryQueue;
- }
-
- // end transitions from base
-
- // Begin simple hit transitions
- transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es,
- Ms_F1, M0_Ms}, C0_Load_L1hit) {L1D0TagArrayRead, L1D0DataArrayRead} {
- // track hits, if implemented
- l0_loadDone;
- p_popMandatoryQueue;
- }
-
- transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es,
- Ms_F0, M1_Ms}, C1_Load_L1hit) {L1D1TagArrayRead, L1D1DataArrayRead} {
- // track hits, if implemented
- l1_loadDone;
- p_popMandatoryQueue;
- }
-
- transition({S, S_C, S_F0, S_F1, S_F}, Ifetch0_L1hit) {L1ITagArrayRead, L1IDataArrayRead} {
- // track hits, if implemented
- il0_loadDone;
- p_popMandatoryQueue;
- }
-
- transition({S, S_C, S_F0, S_F1, S_F}, Ifetch1_L1hit) {L1ITagArrayRead, L1IDataArrayWrite} {
- // track hits, if implemented
- il1_loadDone;
- p_popMandatoryQueue;
- }
-
- // end simple hit transitions
-
- // Transitions from transient states
-
- // recycles
- transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
- E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, C0_Load_L1hit) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M1,
- O_M1, S0, S1, I_C, S0_C, S1_C, S_C}, C0_Load_L1miss) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
- IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
- E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, C1_Load_L1hit) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M0,
- O_M0, S0, S1, I_C, S0_C, S1_C, S_C}, C1_Load_L1miss) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, {Ifetch0_L1hit, Ifetch1_L1hit}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES,
- IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, ES_I, MO_I, S_F0, S_F1, S_F,
- O_F0, O_F1, O_F, S_M0, S_M1, O_M0, O_M1, Es_F0, Es_F1, Es_F, E0_F,
- E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, I_C,
- S_C}, {Ifetch0_L1miss, Ifetch1_L1miss}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_E1S, IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, S_F1, O_F1,
- Si_F0, Si_F1, S_M1, O_M1, S0, S1, Es_F1, E1_F, E0_Es, Ms_F1, M0_Ms,
- M1_F, I_C, S0_C, S1_C, S_C}, {C0_Store_L1miss}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_E0S, IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1 S_F0, O_F0,
- Si_F0, Si_F1, S_M0, O_M0, S0, S1, Es_F0, E0_F, E1_Es, Ms_F0, M0_F,
- M1_Ms, I_C, S0_C, S1_C, S_C}, {C1_Store_L1miss}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M0, O_M0, Es_F0, Es_F1, Es_F, E0_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_Ms}, {C0_Store_L1hit}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M1,
- O_M1, Es_F0, Es_F1, Es_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F,
- M0_Ms, M1_F, M1_Ms}, {C1_Store_L1hit}) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
- IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
- E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, L1D0_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
- IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
- E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, L1D1_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, L1I_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({S_C, S0_C, S1_C, S0, S1, Si_F0, Si_F1, I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, S_M0, O_M0, S_M1, O_M1, Es_F0, Es_F1, Es_F, E0_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, MO_S0, MO_S1, IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, L2_Repl) {} {
- zz_recycleMandatoryQueue;
- }
-
- transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, {NB_AckS,
- PrbInvData, PrbInvDataDemand, PrbInv, PrbShrData, PrbShrDataDemand}) {} {
- zz_recycleMandatoryQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
- }
-
- transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES}, NB_AckE) {} {
- zz_recycleMandatoryQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
- }
-
- transition({E0_Es, E1_F, Es_F1}, C0_Load_L1miss, Es_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(S_F1, C0_Load_L1miss, S_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(O_F1, C0_Load_L1miss, O_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({Ms_F1, M0_Ms, M1_F}, C0_Load_L1miss, Ms_F) {L2DataArrayRead} {
- l10m_profileMiss;
- a0_allocateL1D;
- f0_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(I_M0, C1_Load_L1miss, I_M0Ms){
- l11m_profileMiss;
- l2m_profileMiss;
- a1_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition(I_M1, C0_Load_L1miss, I_M1Ms){
- l10m_profileMiss;
- l2m_profileMiss;
- a0_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition(I_M0, C1_Store_L1miss, I_M0M1) {
- l11m_profileMiss;
- l2m_profileMiss;
- a1_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition(I_M1, C0_Store_L1miss, I_M1M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
- l2m_profileMiss;
- a0_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition(I_E0S, C1_Load_L1miss, I_ES) {} {
- l2m_profileMiss;
- l11m_profileMiss;
- a1_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition(I_E1S, C0_Load_L1miss, I_ES) {} {
- l2m_profileMiss;
- l10m_profileMiss;
- l2m_profileMiss;
- a0_allocateL1D;
- p_popMandatoryQueue;
- }
-
- transition({E1_Es, E0_F, Es_F0}, C1_Load_L1miss, Es_F) {L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(S_F0, C1_Load_L1miss, S_F) { L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition(O_F0, C1_Load_L1miss, O_F) {L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({Ms_F0, M1_Ms, M0_F}, C1_Load_L1miss, Ms_F) {L2DataArrayRead} {
- l11m_profileMiss;
- a1_allocateL1D;
- f1_L2ToL1;
- p_popMandatoryQueue;
- }
-
- transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es, Ms_F1, M0_Ms}, L1D0_Repl) {L1D0TagArrayRead} {
- i0_invCluster;
- }
-
- transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es, Ms_F0, M1_Ms}, L1D1_Repl) {L1D1TagArrayRead} {
- i1_invCluster;
- }
-
- transition({S, S_C, S_F0, S_F1}, L1I_Repl) {L1ITagArrayRead} {
- ii_invIcache;
- }
-
- transition({S, E0, E1, Es}, L2_Repl, ES_I) {L2TagArrayRead,L1D0TagArrayRead, L1D1TagArrayRead, L1ITagArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- t_allocateTBE;
- vc_victim;
- ib_invBothClusters;
- i2_invL2;
- ii_invIcache;
- }
-
- transition({Ms, M0, M1, O}, L2_Repl, MO_I) {L2TagArrayRead, L2TagArrayWrite, L1D0TagArrayRead, L1D1TagArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- t_allocateTBE;
- vd_victim;
- i2_invL2;
- ib_invBothClusters; // nothing will happen for D0 on M1, vice versa
- }
-
- transition(S0, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- wi_writeIcache;
- xi0_loadDone;
- uu_sendUnblock;
- sdv_sendDoneValid;
- pr_popResponseQueue;
- }
-
- transition(S1, NB_AckS, S) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- wi_writeIcache;
- xi1_loadDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S0_C, NB_AckS, S_C) { L1IDataArrayWrite,L2DataArrayWrite} {
- // does not need send done since the rdblks was "sinked"
- wi_writeIcache;
- xi0_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S1_C, NB_AckS, S_C) { L1D1DataArrayWrite,L2DataArrayWrite} {
- wi_writeIcache;
- xi1_loadDone;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_M0, NB_AckM, M0) { L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xs0_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
- w1_writeDcache;
- xs1_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- // THESE MO->M1 should not be instantaneous but oh well for now.
- transition(I_M0M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xs0_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- i0_invCluster;
- s1_storeDone;
- pr_popResponseQueue;
- }
-
- transition(I_M1M0, NB_AckM, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
- w1_writeDcache;
- xs1_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- i1_invCluster;
- s0_storeDone;
- pr_popResponseQueue;
- }
-
- // Above shoudl be more like this, which has some latency to xfer to L1
- transition(I_M0Ms, NB_AckM, M0_Ms) {L1D0DataArrayWrite,L2DataArrayWrite} {
- w0_writeDcache;
- xs0_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- f1_L2ToL1;
- pr_popResponseQueue;
- }
-
- transition(I_M1Ms, NB_AckM, M1_Ms) {L1D1DataArrayWrite,L2DataArrayWrite} {
- w1_writeDcache;
- xs1_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- f0_L2ToL1;
- pr_popResponseQueue;
- }
-
- transition(I_E0S, NB_AckE, E0) {L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xl0_loadDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_E1S, NB_AckE, E1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w1_writeDcache;
- xl1_loadDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_ES, NB_AckE, Es) {L1D1DataArrayWrite, L1D1TagArrayWrite, L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite } {
- w0_writeDcache;
- xl0_loadDone;
- w1_writeDcache;
- xl1_loadDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_E0S, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- w0_writeDcache;
- xl0_loadDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_E1S, NB_AckS, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
- w1_writeDcache;
- xl1_loadDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(I_ES, NB_AckS, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
- w0_writeDcache;
- xl0_loadDone;
- w1_writeDcache;
- xl1_loadDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(S_F0, L2_to_L1D0, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(S_F1, L2_to_L1D1, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Si_F0, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- ci_copyL2ToL1;
- il0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Si_F1, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- ci_copyL2ToL1;
- il1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(S_F, L2_to_L1D0, S_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(S_F, L2_to_L1D1, S_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F0, L2_to_L1D0, O) { L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F1, L2_to_L1D1, O) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F, L2_to_L1D0, O_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(O_F, L2_to_L1D1, O_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M1_F, L2_to_L1D1, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M0_F, L2_to_L1D0, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F0, L2_to_L1D0, Ms) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F1, L2_to_L1D1, Ms) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F, L2_to_L1D0, Ms_F1) {L1D0DataArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Ms_F, L2_to_L1D1, Ms_F0) {L1IDataArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M1_Ms, L2_to_L1D0, Ms) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(M0_Ms, L2_to_L1D1, Ms) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F0, L2_to_L1D0, Es) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F1, L2_to_L1D1, Es) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F, L2_to_L1D0, Es_F1) {L2TagArrayRead, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(Es_F, L2_to_L1D1, Es_F0) {L2TagArrayRead, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E0_F, L2_to_L1D0, E0) {L2TagArrayRead, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E1_F, L2_to_L1D1, E1) {L2TagArrayRead, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E1_Es, L2_to_L1D0, Es) {L2TagArrayRead, L2DataArrayRead} {
- c0_copyL2ToL1;
- l0_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(E0_Es, L2_to_L1D1, Es) {L2TagArrayRead, L2DataArrayRead} {
- c1_copyL2ToL1;
- l1_loadDone;
- pt_popTriggerQueue;
- }
-
- transition(IF_E0S, L2_to_L1D0, I_E0S) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF_E1S, L2_to_L1D1, I_E1S) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF_ES, L2_to_L1D0, IF1_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF_ES, L2_to_L1D1, IF0_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF0_ES, L2_to_L1D0, I_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(IF1_ES, L2_to_L1D1, I_ES) {} {
- pt_popTriggerQueue;
- }
-
- transition(F_S0, L2_to_L1I, S0) {} {
- pt_popTriggerQueue;
- }
-
- transition(F_S1, L2_to_L1I, S1) {} {
- pt_popTriggerQueue;
- }
-
- transition({S_M0, O_M0}, NB_AckM, M0) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- xs0_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition({S_M1, O_M1}, NB_AckM, M1) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
- xs1_storeDone;
- sdv_sendDoneValid;
- uu_sendUnblock;
- pr_popResponseQueue;
- }
-
- transition(MO_I, NB_AckWB, I) {L2TagArrayWrite} {
- wb_data;
- ra_sendReplAck;
- sdi_sendDoneInvalid;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(ES_I, NB_AckWB, I) {L2TagArrayWrite} {
- wb_data;
- ra_sendReplAck;
- sdi_sendDoneInvalid;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(MO_S0, NB_AckWB, S0) {L2TagArrayWrite} {
- wb_data;
- i2_invL2;
- a2_allocateL2;
- sdv_sendDoneValid;
- nS_issueRdBlkS;
- d_deallocateTBE; // FOO
- pr_popResponseQueue;
- }
-
- transition(MO_S1, NB_AckWB, S1) {L2TagArrayWrite} {
- wb_data;
- i2_invL2;
- a2_allocateL2;
- sdv_sendDoneValid;
- nS_issueRdBlkS;
- d_deallocateTBE; // FOO
- pr_popResponseQueue;
- }
-
- // Writeback cancel "ack"
- transition(I_C, NB_AckWB, I) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- sdi_sendDoneInvalid;
- d_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(S0_C, NB_AckWB, S0) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- sdv_sendDoneValid;
- pr_popResponseQueue;
- }
-
- transition(S1_C, NB_AckWB, S1) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- sdv_sendDoneValid;
- pr_popResponseQueue;
- }
-
- transition(S_C, NB_AckWB, S) {L2TagArrayWrite} {
- ss_sendStaleNotification;
- sdv_sendDoneValid;
- pr_popResponseQueue;
- }
-
- // Begin Probe Transitions
-
- transition({Ms, M0, M1, O}, {PrbInvData, PrbInvDataDemand}, I) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- i2_invL2;
- ib_invBothClusters;
- pp_popProbeQueue;
- }
-
- transition({Es, E0, E1, S, I}, {PrbInvData, PrbInvDataDemand}, I) {L2TagArrayRead, L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- ib_invBothClusters;
- ii_invIcache; // only relevant for S
- pp_popProbeQueue;
- }
-
- transition(S_C, {PrbInvData, PrbInvDataDemand}, I_C) {L2TagArrayWrite} {
- t_allocateTBE;
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(I_C, {PrbInvData, PrbInvDataDemand}, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- pp_popProbeQueue;
- }
-
- transition({Ms, M0, M1, O, Es, E0, E1, S, I}, PrbInv, I) {L2TagArrayRead, L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2; // nothing will happen in I
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(S_C, PrbInv, I_C) {L2TagArrayWrite} {
- t_allocateTBE;
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(I_C, PrbInv, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition({Ms, M0, M1, O}, {PrbShrData, PrbShrDataDemand}, O) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({Es, E0, E1, S}, {PrbShrData, PrbShrDataDemand}, S) {L2TagArrayRead, L2TagArrayWrite} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition(S_C, {PrbShrData, PrbShrDataDemand}) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({I, I_C}, {PrbShrData, PrbShrDataDemand}) {L2TagArrayRead} {
- pb_sendProbeResponseBackprobe;
- pp_popProbeQueue;
- }
-
- transition({I_M0, I_E0S}, {PrbInv, PrbInvData, PrbInvDataDemand}) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters; // must invalidate current data (only relevant for I_M0)
- a0_allocateL1D; // but make sure there is room for incoming data when it arrives
- pp_popProbeQueue;
- }
-
- transition({I_M1, I_E1S}, {PrbInv, PrbInvData, PrbInvDataDemand}) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters; // must invalidate current data (only relevant for I_M1)
- a1_allocateL1D; // but make sure there is room for incoming data when it arrives
- pp_popProbeQueue;
- }
-
- transition({I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_ES}, {PrbInv, PrbInvData, PrbInvDataDemand, PrbShrData, PrbShrDataDemand}) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- a0_allocateL1D;
- a1_allocateL1D;
- pp_popProbeQueue;
- }
-
- transition({I_M0, I_E0S, I_M1, I_E1S}, {PrbShrData, PrbShrDataDemand}) {} {
- pb_sendProbeResponseBackprobe;
- pp_popProbeQueue;
- }
-
- transition(ES_I, {PrbInvData, PrbInvDataDemand}, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(MO_I, {PrbInvData, PrbInvDataDemand}, I_C) {} {
- pdt_sendProbeResponseDataFromTBE;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(MO_I, PrbInv, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(ES_I, PrbInv, I_C) {} {
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- pp_popProbeQueue;
- }
-
- transition(ES_I, {PrbShrData, PrbShrDataDemand}, ES_I) {} {
- ph_sendProbeResponseHit;
- s_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition(MO_I, {PrbShrData, PrbShrDataDemand}, MO_I) {} {
- pdt_sendProbeResponseDataFromTBE;
- s_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition(MO_S0, {PrbInvData, PrbInvDataDemand}, S0_C) {L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdt_sendProbeResponseDataFromTBE;
- i2_invL2;
- a2_allocateL2;
- nS_issueRdBlkS;
- d_deallocateTBE;
- pp_popProbeQueue;
- }
-
- transition(MO_S1, {PrbInvData, PrbInvDataDemand}, S1_C) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdt_sendProbeResponseDataFromTBE;
- i2_invL2;
- a2_allocateL2;
- nS_issueRdBlkS;
- d_deallocateTBE;
- pp_popProbeQueue;
- }
-
- transition(MO_S0, PrbInv, S0_C) {L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- a2_allocateL2;
- nS_issueRdBlkS;
- d_deallocateTBE;
- pp_popProbeQueue;
- }
-
- transition(MO_S1, PrbInv, S1_C) {L2TagArrayWrite} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- i2_invL2;
- a2_allocateL2;
- nS_issueRdBlkS;
- d_deallocateTBE;
- pp_popProbeQueue;
- }
-
- transition({MO_S0, MO_S1}, {PrbShrData, PrbShrDataDemand}) {} {
- pdt_sendProbeResponseDataFromTBE;
- s_setSharedFlip;
- pp_popProbeQueue;
- }
-
- transition({S_F0, Es_F0, E0_F, E1_Es}, {PrbInvData, PrbInvDataDemand, PrbInv}, IF_E0S) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- // invalidate everything you've got
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- // but make sure you have room for what you need from the fill
- a0_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({S_F1, Es_F1, E1_F, E0_Es}, {PrbInvData, PrbInvDataDemand, PrbInv}, IF_E1S) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- // invalidate everything you've got
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- // but make sure you have room for what you need from the fill
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({S_F, Es_F}, {PrbInvData, PrbInvDataDemand, PrbInv}, IF_ES) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- // invalidate everything you've got
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- // but make sure you have room for what you need from the fill
- a0_allocateL1D;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition(Si_F0, {PrbInvData, PrbInvDataDemand, PrbInv}, F_S0) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition(Si_F1, {PrbInvData, PrbInvDataDemand, PrbInv}, F_S1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- nS_issueRdBlkS;
- pp_popProbeQueue;
- }
-
- transition({Es_F0, E0_F, E1_Es}, {PrbShrData, PrbShrDataDemand}, S_F0) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({Es_F1, E1_F, E0_Es}, {PrbShrData, PrbShrDataDemand}, S_F1) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition(Es_F, {PrbShrData, PrbShrDataDemand}, S_F) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({S_F0, S_F1, S_F, Si_F0, Si_F1}, {PrbShrData, PrbShrDataDemand}) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition(S_M0, {PrbInvData, PrbInvDataDemand}, I_M0) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition(O_M0, {PrbInvData, PrbInvDataDemand}, I_M0) {L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdm_sendProbeResponseDataMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S_M0, O_M0}, {PrbInv}, I_M0) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition(S_M1, {PrbInvData, PrbInvDataDemand}, I_M1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition(O_M1, {PrbInvData, PrbInvDataDemand}, I_M1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pdm_sendProbeResponseDataMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S_M1, O_M1}, {PrbInv}, I_M1) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pim_sendProbeResponseInvMs;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S0, S0_C}, {PrbInvData, PrbInvDataDemand, PrbInv}) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S1, S1_C}, {PrbInvData, PrbInvDataDemand, PrbInv}) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- ii_invIcache;
- i2_invL2;
- ai_allocateL1I;
- a2_allocateL2;
- pp_popProbeQueue;
- }
-
- transition({S_M0, S_M1}, {PrbShrData, PrbShrDataDemand}) {} {
- ph_sendProbeResponseHit;
- pp_popProbeQueue;
- }
-
- transition({O_M0, O_M1}, {PrbShrData, PrbShrDataDemand}) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({S0, S1, S0_C, S1_C}, {PrbShrData, PrbShrDataDemand}) {} {
- pb_sendProbeResponseBackprobe;
- pp_popProbeQueue;
- }
-
- transition({Ms_F0, M0_F, M1_Ms, O_F0}, {PrbInvData, PrbInvDataDemand}, IF_E0S) {L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F1, M1_F, M0_Ms, O_F1}, {PrbInvData, PrbInvDataDemand}, IF_E1S) {L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- ib_invBothClusters;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F, O_F}, {PrbInvData, PrbInvDataDemand}, IF_ES) {L2DataArrayRead} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pd_sendProbeResponseData;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F0, M0_F, M1_Ms, O_F0}, PrbInv, IF_E0S) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F1, M1_F, M0_Ms, O_F1}, PrbInv, IF_E1S) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- i2_invL2;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F, O_F}, PrbInv, IF_ES) {} {
- forward_eviction_to_cpu0;
- forward_eviction_to_cpu1;
- pi_sendProbeResponseInv;
- ib_invBothClusters;
- i2_invL2;
- a0_allocateL1D;
- a1_allocateL1D;
- a2_allocateL2;
- n_issueRdBlk;
- pp_popProbeQueue;
- }
-
- transition({Ms_F0, M0_F, M1_Ms}, {PrbShrData, PrbShrDataDemand}, O_F0) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({Ms_F1, M1_F, M0_Ms}, {PrbShrData, PrbShrDataDemand}, O_F1) {} {
- }
-
- transition({Ms_F}, {PrbShrData, PrbShrDataDemand}, O_F) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- transition({O_F0, O_F1, O_F}, {PrbShrData, PrbShrDataDemand}) {L2DataArrayRead} {
- pd_sendProbeResponseData;
- pp_popProbeQueue;
- }
-
- // END TRANSITIONS
-}
-
-
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:Directory, "AMD_Base-like protocol")
-: DirectoryMemory * directory;
- CacheMemory * L3CacheMemory;
- Cycles response_latency := 5;
- Cycles response_latency_regionDir := 1;
- Cycles l3_hit_latency := 30;
- bool useL3OnWT := "False";
- Cycles to_memory_controller_latency := 1;
-
- // From the Cores
- MessageBuffer * requestFromCores, network="From", virtual_network="0", vnet_type="request";
- MessageBuffer * responseFromCores, network="From", virtual_network="2", vnet_type="response";
- MessageBuffer * unblockFromCores, network="From", virtual_network="4", vnet_type="unblock";
-
- // To the Cores
- MessageBuffer * probeToCore, network="To", virtual_network="0", vnet_type="request";
- MessageBuffer * responseToCore, network="To", virtual_network="2", vnet_type="response";
-
- // From region buffer
- MessageBuffer * reqFromRegBuf, network="From", virtual_network="7", vnet_type="request";
-
- // To Region directory
- MessageBuffer * reqToRegDir, network="To", virtual_network="5", vnet_type="request";
- MessageBuffer * reqFromRegDir, network="From", virtual_network="5", vnet_type="request";
- MessageBuffer * unblockToRegDir, network="To", virtual_network="4", vnet_type="unblock";
-
- MessageBuffer * triggerQueue;
- MessageBuffer * L3triggerQueue;
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_U") {
- U, AccessPermission:Backing_Store, desc="unblocked";
- BR, AccessPermission:Backing_Store, desc="got CPU read request, blocked while sent to L3";
- BW, AccessPermission:Backing_Store, desc="got CPU write request, blocked while sent to L3";
- BL, AccessPermission:Busy, desc="got L3 WB request";
- // BL is Busy because it's possible for the data only to be in the network
- // in the WB, L3 has sent it and gone on with its business in possibly I
- // state.
- BI, AccessPermission:Backing_Store, desc="Blocked waiting for inv ack from core";
- BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
- BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack";
-
- // These are needed for when a private requests was issued before an inv was received
- // for writebacks
- BS_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- BM_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- BP_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
- // for reads
- BS_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- BM_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- BP_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- // CPU requests
- RdBlkS, desc="...";
- RdBlkM, desc="...";
- RdBlk, desc="...";
- WriteThrough, desc="WriteThrough Message";
- Atomic, desc="Atomic Message";
-
- RdBlkSP, desc="...";
- RdBlkMP, desc="...";
- RdBlkP, desc="...";
- VicDirtyP, desc="...";
- VicCleanP, desc="...";
- WriteThroughP, desc="WriteThrough Message";
- AtomicP, desc="Atomic Message";
-
- // writebacks
- VicDirty, desc="...";
- VicClean, desc="...";
- CPUData, desc="WB data from CPU";
- StaleWB, desc="WB response for a no longer valid request";
-
- // probe responses
- CPUPrbResp, desc="Probe Response Msg";
- LastCPUPrbResp, desc="Last Probe Response Msg";
-
- ProbeAcksComplete, desc="Probe Acks Complete";
-
- L3Hit, desc="Hit in L3 return data to core";
-
- // Memory Controller
- MemData, desc="Fetched data from memory arrives";
- WBAck, desc="Writeback Ack from memory arrives";
-
- CoreUnblock, desc="Core received data, unblock";
- UnblockWriteThrough, desc="unblock, self triggered";
-
- StaleVicDirty, desc="Core invalidated before VicDirty processed";
- StaleVicDirtyP, desc="Core invalidated before VicDirty processed";
-
- // For region protocol
- CPUReq, desc="Generic CPU request";
- Inv, desc="Region dir needs a block invalidated";
- Downgrade, desc="Region dir needs a block downgraded";
-
- // For private accesses (bypassed reg-dir)
- CPUReadP, desc="Initial req from core, sent to L3";
- CPUWriteP, desc="Initial req from core, sent to L3";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- L3DataArrayRead, desc="Read the data array";
- L3DataArrayWrite, desc="Write the data array";
- L3TagArrayRead, desc="Read the data array";
- L3TagArrayWrite, desc="Write the data array";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- DataBlock DataBlk, desc="data for the block";
- NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore";
- }
-
- structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
- DataBlock DataBlk, desc="data for the block";
- MachineID LastSender, desc="Mach which this block came from";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block";
- DataBlock DataBlkAux, desc="Auxiliary data for the block";
- bool Dirty, desc="Is the data dirty?";
- int NumPendingAcks, desc="num acks expected";
- MachineID OriginalRequestor, desc="Original Requestor";
- MachineID WTRequestor, desc="WT Requestor";
- bool Cached, desc="data hit in Cache";
- bool MemData, desc="Got MemData?",default="false";
- bool wtData, desc="Got write through data?",default="false";
- bool atomicData, desc="Got Atomic op?",default="false";
- Cycles InitialRequestTime, desc="...";
- Cycles ForwardRequestTime, desc="...";
- Cycles ProbeRequestStartTime, desc="...";
- bool DemandRequest, desc="for profiling";
- MachineID LastSender, desc="Mach which this block came from";
- bool L3Hit, default="false", desc="Was this an L3 hit?";
- bool TriggeredAcksComplete, default="false", desc="True if already triggered acks complete";
- WriteMask writeMask, desc="outstanding write through mask";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
-
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
-
- if (is_valid(dir_entry)) {
- //DPRINTF(RubySlicc, "Getting entry %s: %s\n", addr, dir_entry.DataBlk);
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if (is_valid(tbe) && tbe.MemData) {
- DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
- return tbe.DataBlk;
- }
- DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
- return getDirectoryEntry(addr).DataBlk;
- }
-
- State getState(TBE tbe, CacheEntry entry, Addr addr) {
- return getDirectoryEntry(addr).DirectoryState;
- }
-
- State getStateFromAddr(Addr addr) {
- return getDirectoryEntry(addr).DirectoryState;
- }
-
- void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
- getDirectoryEntry(addr).DirectoryState := state;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- // For this Directory, all permissions are just tracked in Directory, since
- // it's not possible to have something in TBE but not Dir, just keep track
- // of state all in one place.
- if(directory.isPresent(addr)) {
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- void setAccessPermission(CacheEntry entry, Addr addr, State state) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L3DataArrayRead) {
- L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L3DataArrayWrite) {
- L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L3TagArrayRead) {
- L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L3TagArrayWrite) {
- L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L3DataArrayRead) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L3DataArrayWrite) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L3TagArrayRead) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L3TagArrayWrite) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- // ** OUT_PORTS **
- out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
- out_port(responseNetwork_out, ResponseMsg, responseToCore);
-
- out_port(requestNetworkReg_out, CPURequestMsg, reqToRegDir);
- out_port(regAckNetwork_out, UnblockMsg, unblockToRegDir);
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
-
- // ** IN_PORTS **
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=7) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == TriggerType:AcksComplete) {
- trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
- trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
- } else {
- error("Unknown trigger msg");
- }
- }
- }
- }
-
- in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=6) {
- if (L3TriggerQueue_in.isReady(clockEdge())) {
- peek(L3TriggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == TriggerType:L3Hit) {
- trigger(Event:L3Hit, in_msg.addr, entry, tbe);
- } else {
- error("Unknown trigger msg");
- }
- }
- }
- }
-
- // Unblock Network
- in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=5) {
- if (unblockNetwork_in.isReady(clockEdge())) {
- peek(unblockNetwork_in, UnblockMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
- }
- }
- }
-
- // Core response network
- in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=4) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- DPRINTF(RubySlicc, "core responses %s\n", in_msg);
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
- if (is_valid(tbe) && tbe.NumPendingAcks == 1
- && tbe.TriggeredAcksComplete == false) {
- trigger(Event:LastCPUPrbResp, in_msg.addr, entry, tbe);
- } else {
- trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:CPUData) {
- trigger(Event:CPUData, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
- trigger(Event:StaleWB, in_msg.addr, entry, tbe);
- } else {
- error("Unexpected response type");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=3) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:MemData, in_msg.addr, entry, tbe);
- DPRINTF(RubySlicc, "%s\n", in_msg);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(regBuf_in, CPURequestMsg, reqFromRegBuf, rank=2) {
- if (regBuf_in.isReady(clockEdge())) {
- peek(regBuf_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == CoherenceRequestType:ForceInv) {
- trigger(Event:Inv, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:ForceDowngrade) {
- trigger(Event:Downgrade, in_msg.addr, entry, tbe);
- } else {
- error("Bad request from region buffer");
- }
- }
- }
- }
-
- in_port(regDir_in, CPURequestMsg, reqFromRegDir, rank=1) {
- if (regDir_in.isReady(clockEdge())) {
- peek(regDir_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
- trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:Atomic) {
- trigger(Event:Atomic, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
- } else {
- trigger(Event:VicDirty, in_msg.addr, entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
- } else {
- trigger(Event:VicClean, in_msg.addr, entry, tbe);
- }
- } else {
- error("Bad message type fwded from Region Dir");
- }
- }
- }
- }
-
- in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Private) {
- // Bypass the region dir
- if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlkP, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
- trigger(Event:RdBlkSP, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- trigger(Event:RdBlkMP, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:Atomic) {
- trigger(Event:AtomicP, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- trigger(Event:WriteThroughP, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicDirtyP for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirtyP, in_msg.addr, entry, tbe);
- } else {
- DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
- trigger(Event:VicDirtyP, in_msg.addr, entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicCleanP for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirtyP, in_msg.addr, entry, tbe);
- } else {
- DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
- trigger(Event:VicCleanP, in_msg.addr, entry, tbe);
- }
- } else {
- error("Bad message type for private access");
- }
- } else {
- trigger(Event:CPUReq, in_msg.addr, entry, tbe);
- }
- }
- }
- }
-
- // Actions
- action(s_sendResponseS, "s", desc="send Shared response") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.DemandRequest := tbe.DemandRequest;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Cached) {
- out_msg.State := CoherenceState:Shared;
- } else {
- out_msg.State := CoherenceState:Exclusive;
- }
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.DemandRequest := tbe.DemandRequest;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(m_sendResponseM, "m", desc="send Modified response") {
- if (tbe.wtData) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:UnblockWriteThrough;
- }
- } else {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := false;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.DemandRequest := tbe.DemandRequest;
- out_msg.L3Hit := tbe.L3Hit;
- if (tbe.atomicData) {
- out_msg.WTRequestor := tbe.WTRequestor;
- }
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- if (tbe.atomicData) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:UnblockWriteThrough;
- }
- }
- }
- }
-
- action(sb_sendResponseSBypass, "sb", desc="send Shared response") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.DemandRequest := false;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(esb_sendResponseESBypass, "esb", desc="send Exclusive or Shared response") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Cached || in_msg.ForceShared) {
- out_msg.State := CoherenceState:Shared;
- } else {
- out_msg.State := CoherenceState:Exclusive;
- }
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.DemandRequest := false;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(mbwt_sendResponseWriteThroughBypass, "mbwt", desc="send write through response") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysWBAck;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.DemandRequest := false;
- }
- } else {
- assert(in_msg.Type == CoherenceRequestType:Atomic);
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := false;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.DemandRequest := false;
- out_msg.L3Hit := tbe.L3Hit;
- out_msg.WTRequestor := in_msg.WTRequestor;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:UnblockWriteThrough;
- }
- }
- }
-
- action(mb_sendResponseMBypass, "mb", desc="send Modified response") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := false;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.DemandRequest := false;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := true;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.DemandRequest := tbe.DemandRequest;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(cp_sendResponseCtoDP, "cp", desc="send CtoD Ack") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := true;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.DemandRequest := false;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(w_sendResponseWBAck, "w", desc="send WB Ack") {
- peek(regDir_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysWBAck;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.DemandRequest := false;
- }
- }
- }
-
- action(wp_sendResponseWBAckP, "wp", desc="send WB Ack") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysWBAck;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- out_msg.DemandRequest := false;
- }
- }
- }
-
- action(wc_sendResponseWBAck, "wc", desc="send WB Ack for cancel") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysWBAck;
- out_msg.Destination.add(in_msg.Sender);
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(ra_ackRegionDir, "ra", desc="Ack region dir") {
- peek(regDir_in, CPURequestMsg) {
- if (in_msg.NoAckNeeded == false) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency_regionDir) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DirReadyAck;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
- }
-
- action(l_queueMemRdReq, "lr", desc="Read data from memory") {
- peek(regDir_in, CPURequestMsg) {
- if (L3CacheMemory.isTagPresent(address)) {
- enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- tbe.DataBlk := entry.DataBlk;
- tbe.LastSender := entry.LastSender;
- tbe.L3Hit := true;
- tbe.MemData := true;
- DPRINTF(RubySlicc, "L3 data is %s\n", entry.DataBlk);
- L3CacheMemory.deallocate(address);
- } else {
- queueMemoryRead(machineID, address, to_memory_controller_latency);
- }
- }
- }
-
- action(lrp_queueMemRdReqP, "lrp", desc="Read data from memory") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (L3CacheMemory.isTagPresent(address)) {
- enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- tbe.DataBlk := entry.DataBlk;
- tbe.LastSender := entry.LastSender;
- tbe.L3Hit := true;
- tbe.MemData := true;
- DPRINTF(RubySlicc, "L3 data is %s\n", entry.DataBlk);
- L3CacheMemory.deallocate(address);
- } else {
- queueMemoryRead(machineID, address, to_memory_controller_latency);
- }
- }
- }
-
- action(dcr_probeInvCoreData, "dcr", desc="probe inv cores, return data") {
- peek(regBuf_in, CPURequestMsg) {
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination := in_msg.Sharers;
- tbe.NumPendingAcks := tbe.NumPendingAcks + in_msg.Sharers.count();
- DPRINTF(RubySlicc, "%s\n", out_msg);
- APPEND_TRANSITION_COMMENT(" dcr: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(ddr_probeDownCoreData, "ddr", desc="probe inv cores, return data") {
- peek(regBuf_in, CPURequestMsg) {
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination := in_msg.Sharers;
- tbe.NumPendingAcks := tbe.NumPendingAcks + in_msg.Sharers.count();
- DPRINTF(RubySlicc, "%s\n", out_msg);
- APPEND_TRANSITION_COMMENT(" dcr: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
- peek(requestNetwork_in, CPURequestMsg) { // not the right network?
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
- tbe.NumPendingAcks := tbe.NumPendingAcks +machineCount(MachineType:CorePair) - 1;
- out_msg.Destination.broadcast(MachineType:TCP);
- tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:TCP);
- out_msg.Destination.broadcast(MachineType:SQC);
- tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:SQC);
- out_msg.Destination.remove(in_msg.Requestor);
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
- peek(requestNetwork_in, CPURequestMsg) { // not the right network?
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := false;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
- tbe.NumPendingAcks := tbe.NumPendingAcks +machineCount(MachineType:CorePair) - 1;
- out_msg.Destination.broadcast(MachineType:TCP);
- tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:TCP);
- out_msg.Destination.broadcast(MachineType:SQC);
- tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:SQC);
- out_msg.Destination.remove(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(d_writeDataToMemory, "d", desc="Write data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Writing Data: %s to address %s\n", in_msg.DataBlk,
- in_msg.addr);
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- peek(regDir_in, CPURequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.wtData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- if (in_msg.Type == CoherenceRequestType:Atomic) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.atomicData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
- tbe.Dirty := false;
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
- tbe.Dirty := false;
- }
- tbe.OriginalRequestor := in_msg.Requestor;
- tbe.NumPendingAcks := 0;
- tbe.Cached := in_msg.ForceShared;
- tbe.InitialRequestTime := in_msg.InitialRequestTime;
- tbe.ForwardRequestTime := curCycle();
- tbe.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- tbe.DemandRequest := in_msg.DemandRequest;
- }
- }
-
- action(tp_allocateTBEP, "tp", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- peek(requestNetwork_in, CPURequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.wtData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- if (in_msg.Type == CoherenceRequestType:Atomic) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.atomicData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
- tbe.Dirty := false;
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
- tbe.Dirty := false;
- }
- tbe.OriginalRequestor := in_msg.Requestor;
- tbe.NumPendingAcks := 0;
- tbe.Cached := in_msg.ForceShared;
- tbe.InitialRequestTime := in_msg.InitialRequestTime;
- tbe.ForwardRequestTime := curCycle();
- tbe.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
- tbe.DemandRequest := false;
- }
- }
-
- action(sa_setAcks, "sa", desc="setAcks") {
- peek(regDir_in, CPURequestMsg) {
- tbe.NumPendingAcks := in_msg.Acks;
- APPEND_TRANSITION_COMMENT(" waiting for acks ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
- }
-
- action(tr_allocateTBE, "tr", desc="allocate TBE Entry for Region inv") {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.NumPendingAcks := 0;
- }
-
- action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(wdp_writeBackDataPrivate, "wdp", desc="Write back data if needed") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.DataBlkAux := getDirectoryEntry(address).DataBlk;
- tbe.DataBlkAux.copyPartial(in_msg.DataBlk,in_msg.writeMask);
- getDirectoryEntry(address).DataBlk := tbe.DataBlkAux;
- } else{
- assert(in_msg.Type == CoherenceRequestType:Atomic);
- tbe.DataBlkAux.atomicPartial(getDirectoryEntry(address).DataBlk,in_msg.writeMask);
- getDirectoryEntry(address).DataBlk := tbe.DataBlkAux;
- }
- }
- }
-
- action(wd_writeBackData, "wd", desc="Write back data if needed") {
- if (tbe.wtData) {
- DataBlock tmp := getDirectoryEntry(address).DataBlk;
- tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
- tbe.DataBlk := tmp;
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- } else if (tbe.atomicData) {
- tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,tbe.writeMask);
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- } else if (tbe.Dirty == true) {
- APPEND_TRANSITION_COMMENT(" Wrote data back ");
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- }
- }
-
- action(wdi_writeBackDataInv, "wdi", desc="Write back inv data if needed") {
- // Kind of opposite from above...?
- if (tbe.Dirty == true) {
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- APPEND_TRANSITION_COMMENT("Writing dirty data to dir");
- DPRINTF(RubySlicc, "Data %s: %s\n", address, tbe.DataBlk);
- } else {
- APPEND_TRANSITION_COMMENT("NOT!!! Writing dirty data to dir");
- }
- }
-
- action(wdt_writeBackDataInvNoTBE, "wdt", desc="Write back inv data if needed no TBE") {
- // Kind of opposite from above...?
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty == true) {
- getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
- APPEND_TRANSITION_COMMENT("Writing dirty data to dir");
- DPRINTF(RubySlicc, "Data %s: %s\n", address, in_msg.DataBlk);
- } else {
- APPEND_TRANSITION_COMMENT("NOT!!! Writing dirty data to dir");
- }
- }
- }
-
- action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
- peek(memQueue_in, MemoryMsg) {
- if (tbe.Dirty == false) {
- tbe.DataBlk := getDirectoryEntry(address).DataBlk;
- }
- tbe.MemData := true;
- }
- }
-
- action(ml_writeL3DataToTBE, "ml", desc="write L3 data to TBE") {
- assert(tbe.Dirty == false);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- tbe.DataBlk := entry.DataBlk;
- tbe.LastSender := entry.LastSender;
- tbe.L3Hit := true;
- tbe.MemData := true;
- }
-
- action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty) {
- DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
- DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
- if (tbe.wtData) {
- DataBlock tmp := in_msg.DataBlk;
- tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
- tbe.DataBlk := tmp;
- } else if (tbe.Dirty) {
- if(tbe.atomicData == false && tbe.wtData == false) {
- DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
- assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
- }
- } else {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- tbe.LastSender := in_msg.Sender;
- }
- }
- if (in_msg.Hit) {
- tbe.Cached := true;
- }
- }
- }
-
- action(yc_writeCPUDataToTBE, "yc", desc="write CPU Data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty) {
- DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
- DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
- if (tbe.Dirty) {
- DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
- assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
- }
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := false;
- tbe.LastSender := in_msg.Sender;
- }
- }
- }
-
- action(x_decrementAcks, "x", desc="decrement Acks pending") {
- if (tbe.NumPendingAcks > 0) {
- tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
- } else {
- APPEND_TRANSITION_COMMENT(" Double ack! ");
- }
- assert(tbe.NumPendingAcks >= 0);
- APPEND_TRANSITION_COMMENT(" Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(o_checkForCompletion, "o", desc="check for ack completion") {
- if (tbe.NumPendingAcks == 0 && tbe.TriggeredAcksComplete == false) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- tbe.TriggeredAcksComplete := true;
- }
- APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(ont_checkForCompletionNoTrigger, "ont", desc="check for ack completion, no trigger") {
- if (tbe.NumPendingAcks == 0 && tbe.TriggeredAcksComplete == false) {
- tbe.TriggeredAcksComplete := true;
- }
- APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(rvp_removeVicDirtyIgnore, "rvp", desc="Remove ignored core") {
- peek(requestNetwork_in, CPURequestMsg) {
- getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
- }
- }
-
- action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
- peek(regDir_in, CPURequestMsg) {
- getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
- }
- }
-
- action(r_sendRequestToRegionDir, "r", desc="send request to Region Directory") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(requestNetworkReg_out, CPURequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "out dest: %s\n", mapAddressToMachine(address, MachineType:RegionDir));
- }
- }
- }
-
- action(ai_ackInvalidate, "ai", desc="Ack to let the reg-dir know that the inv is ordered") {
- peek(regBuf_in, CPURequestMsg) {
- enqueue(regAckNetwork_out, UnblockMsg, 1) {
- out_msg.addr := address;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "ai out_msg: %s\n", out_msg);
- }
- }
- }
-
- action(aic_ackInvalidate, "aic", desc="Ack to let the reg-dir know that the inv is ordered") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.NoAckNeeded == false) {
- enqueue(regAckNetwork_out, UnblockMsg, 1) {
- out_msg.addr := address;
- if (machineIDToMachineType(in_msg.Sender) == MachineType:CorePair) {
- out_msg.Destination.add(createMachineID(MachineType:RegionBuffer, intToID(0)));
- } else {
- out_msg.Destination.add(createMachineID(MachineType:RegionBuffer, intToID(1)));
- }
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "ai out_msg: %s\n", out_msg);
- out_msg.wasValid := in_msg.isValid;
- }
- }
- }
- }
-
- action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
- peek(responseNetwork_in, ResponseMsg) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
- entry.DataBlk := in_msg.DataBlk;
- entry.LastSender := in_msg.Sender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
- entry.DataBlk := in_msg.DataBlk;
- entry.LastSender := in_msg.Sender;
- }
- }
- }
-
- action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
- if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- }
- }
- }
-
- action(ali_allocateL3Block, "ali", desc="allocate the L3 block on ForceInv") {
- if (tbe.Dirty == true) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- }
- }
- }
-
- action(ali_allocateL3BlockNoTBE, "alt", desc="allocate the L3 block on ForceInv no TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" ali wrote data to L3 (hit) ");
- entry.DataBlk := in_msg.DataBlk;
- entry.LastSender := in_msg.Sender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" ali wrote data to L3 ");
- entry.DataBlk := in_msg.DataBlk;
- entry.LastSender := in_msg.Sender;
- }
- }
- }
- }
-
- action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
- L3CacheMemory.deallocate(address);
- }
-
- action(p_popRequestQueue, "p", desc="pop request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(prd_popRegionQueue, "prd", desc="pop request queue") {
- regDir_in.dequeue(clockEdge());
- }
-
- action(prb_popRegionBufQueue, "prb", desc="pop request queue") {
- regBuf_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(pm_popMemQueue, "pm", desc="pop mem queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
- L3TriggerQueue_in.dequeue(clockEdge());
- }
-
- action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
- unblockNetwork_in.dequeue(clockEdge());
- }
-
- action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
- responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(ww_stallAndWaitRegRequestQueue, "ww", desc="recycle region dir request queue") {
- stall_and_wait(regDir_in, address);
- }
-
- action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
- stall_and_wait(requestNetwork_in, address);
- }
-
- action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
- wakeUpBuffers(address);
- }
-
- action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
- wakeUpAllBuffers();
- }
-
- action(z_stall, "z", desc="...") {
- }
-
- // TRANSITIONS
-
- // transitions from U
-
- transition({BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {Inv, Downgrade}) {
- ww_stallAndWaitRegRequestQueue;
- }
-
- transition(U, Inv, BI){L3TagArrayRead} {
- tr_allocateTBE;
- dcr_probeInvCoreData; // only need to invalidate sharers
- ai_ackInvalidate;
- prb_popRegionBufQueue;
- }
-
- transition(U, Downgrade, BI){L3TagArrayRead} {
- tr_allocateTBE;
- ddr_probeDownCoreData; // only need to invalidate sharers
- ai_ackInvalidate;
- prb_popRegionBufQueue;
- }
-
- // The next 2 transistions are needed in the event that an invalidation
- // is waiting for its ack from the core, but the event makes it through
- // the region directory before the acks. This wouldn't be needed if
- // we waited to ack the region dir until the directory got all the acks
- transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {RdBlkS, RdBlkM, RdBlk, WriteThrough, Atomic}) {
- ww_stallAndWaitRegRequestQueue;
- }
-
- transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {RdBlkSP, RdBlkMP, RdBlkP}) {
- st_stallAndWaitRequest;
- }
-
- transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {WriteThroughP,AtomicP}) {
- st_stallAndWaitRequest;
- }
-
- transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead} {
- t_allocateTBE;
- l_queueMemRdReq;
- sa_setAcks;
- o_checkForCompletion;
- ra_ackRegionDir;
- prd_popRegionQueue;
- }
-
- transition(U, WriteThrough, BM_PM){L3TagArrayRead} {
- t_allocateTBE;
- w_sendResponseWBAck;
- l_queueMemRdReq;
- sa_setAcks;
- o_checkForCompletion;
- ra_ackRegionDir;
- prd_popRegionQueue;
- }
-
- transition(U, {RdBlkM,Atomic}, BM_PM){L3TagArrayRead} {
- t_allocateTBE;
- l_queueMemRdReq;
- sa_setAcks;
- o_checkForCompletion;
- ra_ackRegionDir;
- prd_popRegionQueue;
- }
-
- transition(U, RdBlk, B_PM){L3TagArrayRead} {
- t_allocateTBE;
- l_queueMemRdReq;
- sa_setAcks;
- o_checkForCompletion;
- ra_ackRegionDir;
- prd_popRegionQueue;
- }
-
- transition(U, {RdBlkSP}, BS_M) {L3TagArrayRead} {
- tp_allocateTBEP;
- lrp_queueMemRdReqP;
- p_popRequestQueue;
- }
-
- transition(U, WriteThroughP, BM_M) {L3TagArrayRead} {
- tp_allocateTBEP;
- wp_sendResponseWBAckP;
- lrp_queueMemRdReqP;
- p_popRequestQueue;
- }
-
- transition(U, {RdBlkMP,AtomicP}, BM_M) {L3TagArrayRead} {
- tp_allocateTBEP;
- lrp_queueMemRdReqP;
- p_popRequestQueue;
- }
-
- transition(U, RdBlkP, B_M) {L3TagArrayRead} {
- tp_allocateTBEP;
- lrp_queueMemRdReqP;
- p_popRequestQueue;
- }
-
- transition(U, VicDirtyP, BL) {L3TagArrayRead} {
- tp_allocateTBEP;
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(U, VicCleanP, BL) {L3TagArrayRead} {
- tp_allocateTBEP;
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(BM_Pm, RdBlkSP, BM_Pm_B) {L3DataArrayWrite} {
- sb_sendResponseSBypass;
- p_popRequestQueue;
- }
-
- transition(BS_Pm, RdBlkSP, BS_Pm_B) {L3DataArrayWrite} {
- sb_sendResponseSBypass;
- p_popRequestQueue;
- }
-
- transition(B_Pm, RdBlkSP, B_Pm_B) {L3DataArrayWrite} {
- sb_sendResponseSBypass;
- p_popRequestQueue;
- }
-
- transition(BP, RdBlkSP, BP_B) {L3DataArrayWrite} {
- sb_sendResponseSBypass;
- p_popRequestQueue;
- }
-
- transition(BM_Pm, RdBlkMP, BM_Pm_B) {L3DataArrayWrite} {
- mb_sendResponseMBypass;
- p_popRequestQueue;
- }
-
- transition(BS_Pm, RdBlkMP, BS_Pm_B) {L3DataArrayWrite} {
- mb_sendResponseMBypass;
- p_popRequestQueue;
- }
-
- transition(B_Pm, RdBlkMP, B_Pm_B) {L3DataArrayWrite} {
- mb_sendResponseMBypass;
- p_popRequestQueue;
- }
-
- transition(BP, RdBlkMP, BP_B) {L3DataArrayWrite} {
- mb_sendResponseMBypass;
- p_popRequestQueue;
- }
-
- transition(BM_Pm, {WriteThroughP,AtomicP}, BM_Pm_B) {L3DataArrayWrite} {
- wdp_writeBackDataPrivate;
- mbwt_sendResponseWriteThroughBypass;
- p_popRequestQueue;
- }
-
- transition(BS_Pm, {WriteThroughP,AtomicP}, BS_Pm_B) {L3DataArrayWrite} {
- wdp_writeBackDataPrivate;
- mbwt_sendResponseWriteThroughBypass;
- p_popRequestQueue;
- }
-
- transition(B_Pm, {WriteThroughP,AtomicP}, B_Pm_B) {L3DataArrayWrite} {
- wdp_writeBackDataPrivate;
- mbwt_sendResponseWriteThroughBypass;
- p_popRequestQueue;
- }
-
- transition(BP, {WriteThroughP,AtomicP}, BP_B) {L3DataArrayWrite} {
- wdp_writeBackDataPrivate;
- mbwt_sendResponseWriteThroughBypass;
- p_popRequestQueue;
- }
-
- transition(BM_Pm, RdBlkP, BM_Pm_B) {L3DataArrayWrite} {
- esb_sendResponseESBypass;
- p_popRequestQueue;
- }
-
- transition(BS_Pm, RdBlkP, BS_Pm_B) {L3DataArrayWrite} {
- esb_sendResponseESBypass;
- p_popRequestQueue;
- }
-
- transition(B_Pm, RdBlkP, B_Pm_B) {L3DataArrayWrite}{
- esb_sendResponseESBypass;
- p_popRequestQueue;
- }
-
- transition(BP, RdBlkP, BP_B) {L3DataArrayWrite}{
- esb_sendResponseESBypass;
- p_popRequestQueue;
- }
-
- transition(BM_Pm_B, CoreUnblock, BM_Pm) {
- wa_wakeUpDependents;
- pu_popUnblockQueue;
- }
-
- transition(BS_Pm_B, CoreUnblock, BS_Pm) {
- wa_wakeUpDependents;
- pu_popUnblockQueue;
- }
-
- transition(B_Pm_B, CoreUnblock, B_Pm) {
- wa_wakeUpDependents;
- pu_popUnblockQueue;
- }
-
- transition(BP_B, CoreUnblock, BP) {
- wa_wakeUpDependents;
- pu_popUnblockQueue;
- }
-
- transition(BM_Pm_B, UnblockWriteThrough, BM_Pm) {
- wa_wakeUpDependents;
- pt_popTriggerQueue;
- }
-
- transition(BS_Pm_B, UnblockWriteThrough, BS_Pm) {
- wa_wakeUpDependents;
- pt_popTriggerQueue;
- }
-
- transition(B_Pm_B, UnblockWriteThrough, B_Pm) {
- wa_wakeUpDependents;
- pt_popTriggerQueue;
- }
-
- transition(BP_B, UnblockWriteThrough, BP) {
- wa_wakeUpDependents;
- pt_popTriggerQueue;
- }
-
- transition(BM_Pm, VicDirtyP, BM_Pm_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(BS_Pm, VicDirtyP, BS_Pm_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(B_Pm, VicDirtyP, B_Pm_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(BP, VicDirtyP, BP_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(BM_Pm, VicCleanP, BM_Pm_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(BS_Pm, VicCleanP, BS_Pm_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(B_Pm, VicCleanP, B_Pm_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(BP, VicCleanP, BP_BL) {
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition(BM_Pm_BL, CPUData, BM_Pm) {
- yc_writeCPUDataToTBE;
- d_writeDataToMemory;
- wa_wakeUpDependents;
- pr_popResponseQueue;
- }
-
- transition(BS_Pm_BL, CPUData, BS_Pm) {
- yc_writeCPUDataToTBE;
- d_writeDataToMemory;
- wa_wakeUpDependents;
- pr_popResponseQueue;
- }
-
- transition(B_Pm_BL, CPUData, B_Pm) {
- yc_writeCPUDataToTBE;
- d_writeDataToMemory;
- wa_wakeUpDependents;
- pr_popResponseQueue;
- }
-
- transition(BP_BL, CPUData, BP) {
- yc_writeCPUDataToTBE;
- d_writeDataToMemory;
- wa_wakeUpDependents;
- pr_popResponseQueue;
- }
-
- transition({BR, BW, BL}, {VicDirtyP, VicCleanP}) {
- st_stallAndWaitRequest;
- }
-
- transition({BR, BW, BL}, {VicDirty, VicClean}) {
- ww_stallAndWaitRegRequestQueue;
- }
-
- transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
- dt_deallocateTBE;
- d_writeDataToMemory;
- al_allocateL3Block;
- wa_wakeUpDependents;
- pr_popResponseQueue;
- }
-
- transition(BL, StaleWB, U) {L3TagArrayWrite} {
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- pr_popResponseQueue;
- }
-
- transition({BI, B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {VicDirty, VicClean}) {
- ww_stallAndWaitRegRequestQueue;
- }
-
- transition({BI, B, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {VicDirtyP, VicCleanP}) {
- st_stallAndWaitRequest;
- }
-
- transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, WBAck) {
- pm_popMemQueue;
- }
-
- transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, StaleVicDirtyP) {
- rvp_removeVicDirtyIgnore;
- wp_sendResponseWBAckP;
- p_popRequestQueue;
- }
-
- transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, StaleVicDirty) {
- rv_removeVicDirtyIgnore;
- w_sendResponseWBAck;
- prd_popRegionQueue;
- }
-
- transition(U, VicDirty, BL) {L3TagArrayRead} {
- t_allocateTBE;
- ra_ackRegionDir;
- w_sendResponseWBAck;
- prd_popRegionQueue;
- }
-
- transition(U, VicClean, BL) {L3TagArrayRead} {
- t_allocateTBE;
- ra_ackRegionDir;
- w_sendResponseWBAck;
- prd_popRegionQueue;
- }
-
- transition({B, BR}, CoreUnblock, U) {
- wa_wakeUpDependents;
- pu_popUnblockQueue;
- }
-
- transition({B, BR}, UnblockWriteThrough, U) {
- wa_wakeUpDependents;
- pt_popTriggerQueue;
- }
-
- transition(BS_M, MemData, B) {L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(BS_PM, MemData, BS_Pm) {} {
- mt_writeMemDataToTBE;
- wa_wakeUpDependents;
- pm_popMemQueue;
- }
-
- transition(BM_PM, MemData, BM_Pm){} {
- mt_writeMemDataToTBE;
- wa_wakeUpDependents;
- pm_popMemQueue;
- }
-
- transition(B_PM, MemData, B_Pm){} {
- mt_writeMemDataToTBE;
- wa_wakeUpDependents;
- pm_popMemQueue;
- }
-
- transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition(BM_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition(B_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition(BS_PM, L3Hit, BS_Pm) {
- wa_wakeUpDependents;
- ptl_popTriggerQueue;
- }
-
- transition(BM_PM, L3Hit, BM_Pm) {
- wa_wakeUpDependents;
- ptl_popTriggerQueue;
- }
-
- transition(B_PM, L3Hit, B_Pm) {
- wa_wakeUpDependents;
- ptl_popTriggerQueue;
- }
-
- transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BP, BI}, CPUPrbResp) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- pr_popResponseQueue;
- }
-
- transition({B, B_M, BS_M, BM_M}, {CPUPrbResp, LastCPUPrbResp}) {
- z_stall;
- }
-
- transition({BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {CPUPrbResp, LastCPUPrbResp}) {
- // recycling because PrbResponse and data come on the same network
- yy_recycleResponseQueue;
- }
-
- transition(U, {CPUPrbResp, LastCPUPrbResp}) {L3TagArrayRead, L3DataArrayWrite} {
- aic_ackInvalidate;
- wdt_writeBackDataInvNoTBE;
- ali_allocateL3BlockNoTBE;
- pr_popResponseQueue;
- }
-
- transition(BL, {CPUPrbResp, LastCPUPrbResp}) {} {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- wdi_writeBackDataInv;
- ali_allocateL3Block;
- pr_popResponseQueue;
- }
-
- transition(BS_PM, LastCPUPrbResp, BS_M) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- pr_popResponseQueue;
- }
-
- transition(BS_PM, ProbeAcksComplete, BS_M) {} {
- pt_popTriggerQueue;
- }
-
- transition(BM_PM, LastCPUPrbResp, BM_M) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- pr_popResponseQueue;
- }
-
- transition(BM_PM, ProbeAcksComplete, BM_M) {} {
- pt_popTriggerQueue;
- }
-
- transition(B_PM, LastCPUPrbResp, B_M) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- pr_popResponseQueue;
- }
-
- transition(B_PM, ProbeAcksComplete, B_M){} {
- pt_popTriggerQueue;
- }
-
- transition(BS_Pm, LastCPUPrbResp, B) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(BM_Pm, LastCPUPrbResp, B) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(B_Pm, LastCPUPrbResp, B) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(BP, LastCPUPrbResp, B) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- c_sendResponseCtoD;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
- c_sendResponseCtoD;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(BI, LastCPUPrbResp, B) {
- aic_ackInvalidate;
- y_writeProbeDataToTBE;
- x_decrementAcks;
- ont_checkForCompletionNoTrigger;
- wa_wakeUpDependents;
- wdi_writeBackDataInv;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(BI, ProbeAcksComplete, U) {L3TagArrayWrite, L3DataArrayWrite}{
- wa_wakeUpDependents;
- wdi_writeBackDataInv;
- ali_allocateL3Block;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-enumeration(CoherenceRequestType, desc="Coherence Request Types") {
- // CPU Request Types ONLY
- RdBlk, desc="Read Blk";
- RdBlkM, desc="Read Blk Modified";
- RdBlkS, desc="Read Blk Shared";
- VicClean, desc="L2 clean eviction";
- VicDirty, desc="L2 dirty eviction";
-
- WrCancel, desc="want to cancel WB to Memory"; // should this be here?
-
- WBApproval, desc="WB Approval";
-
- // Messages between Dir and R-Dir
- ForceInv, desc="Send invalide to the block";
- ForceDowngrade, desc="Send downgrade to the block";
- Unblock, desc="Used to let the dir know a message has been sunk";
-
- // Messages between R-Dir and R-Buffer
- PrivateNotify, desc="Let region buffer know it has private access";
- SharedNotify, desc="Let region buffer know it has shared access";
- WbNotify, desc="Let region buffer know it saw its wb request";
- Downgrade, desc="Force the region buffer to downgrade to shared";
- // Response to R-Dir (probably should be on a different network, but
- // I need it to be ordered with respect to requests)
- InvAck, desc="Let the R-Dir know when the inv has occured";
-
- PrivateRequest, desc="R-buf wants the region in private";
- UpgradeRequest, desc="R-buf wants the region in private";
- SharedRequest, desc="R-buf wants the region in shared (could respond with private)";
- CleanWbRequest, desc="R-buf wants to deallocate clean region";
-
- NA, desc="So we don't get segfaults";
-}
-
-enumeration(ProbeRequestType, desc="Probe Request Types") {
- PrbDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
- PrbInv, desc="Probe to Invalidate";
-
- // For regions
- PrbRepl, desc="Force the cache to do a replacement";
- PrbRegDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
-}
-
-
-enumeration(CoherenceResponseType, desc="Coherence Response Types") {
- NBSysResp, desc="Northbridge response to CPU Rd request";
- NBSysWBAck, desc="Northbridge response ok to WB";
- TDSysResp, desc="TCCdirectory response to CPU Rd request";
- TDSysWBAck, desc="TCCdirectory response ok to WB";
- TDSysWBNack, desc="TCCdirectory response ok to drop";
- CPUPrbResp, desc="CPU Probe Response";
- CPUData, desc="CPU Data";
- StaleNotif, desc="Notification of Stale WBAck, No data to writeback";
- CPUCancelWB, desc="want to cancel WB to Memory";
- MemData, desc="Data from Memory";
-
- // for regions
- PrivateAck, desc="Ack that r-buf received private notify";
- RegionWbAck, desc="Writeback Ack that r-buf completed deallocation";
- DirReadyAck, desc="Directory (mem ctrl)<->region dir handshake";
-}
-
-enumeration(CoherenceState, default="CoherenceState_NA", desc="Coherence State") {
- Modified, desc="Modified";
- Owned, desc="Owned state";
- Exclusive, desc="Exclusive";
- Shared, desc="Shared";
- NA, desc="NA";
-}
-
-structure(CPURequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- Addr DemandAddress, desc="Physical block address for this request";
- CoherenceRequestType Type, desc="Type of request";
- DataBlock DataBlk, desc="data for the cache line"; // only for WB
- bool Dirty, desc="whether WB data is dirty"; // only for WB
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Multicast destination mask";
- bool Shared, desc="For CPU_WrVicBlk, vic is O not M. For CPU_ClVicBlk, vic is S";
- MessageSizeType MessageSize, desc="size category of the message";
- Cycles InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
- Cycles ForwardRequestTime, default="0", desc="time the dir forwarded the request";
- Cycles ProbeRequestStartTime, default="0", desc="the time the dir started the probe request";
- bool DemandRequest, default="false", desc="For profiling purposes";
-
- NetDest Sharers, desc="Caches that may have a valid copy of the data";
- bool ForceShared, desc="R-dir knows it is shared, pass on so it sends an S copy, not E";
- bool Private, default="false", desc="Requestor already has private permissions, no need for dir check";
- bool CtoDSinked, default="false", desc="This is true if the CtoD previously sent must have been sunk";
-
- bool NoAckNeeded, default="false", desc="True if region buffer doesn't need to ack";
- int Acks, default="0", desc="Acks that the dir (mem ctrl) should expect to receive";
- CoherenceRequestType OriginalType, default="CoherenceRequestType_NA", desc="Type of request from core fwded through region buffer";
-
- bool functionalRead(Packet *pkt) {
- // Only PUTX messages contains the data block
- if (Type == CoherenceRequestType:VicDirty) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-structure(NBProbeRequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- ProbeRequestType Type, desc="probe signal";
- bool ReturnData, desc="Indicates CPU should return data";
- NetDest Destination, desc="Node to whom the data is sent";
- MessageSizeType MessageSize, desc="size category of the message";
- bool DemandRequest, default="false", desc="demand request, requesting 3-hop transfer";
- Addr DemandAddress, desc="Demand block address for a region request";
- MachineID Requestor, desc="Requestor id for 3-hop requests";
- bool NoAckNeeded, default="false", desc="For short circuting acks";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-
-}
-
-structure(TDProbeRequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- ProbeRequestType Type, desc="TD_PrbNxtState signal";
- bool ReturnData, desc="Indicates CPU should return data";
- bool localCtoD, desc="Indicates CtoD is within the GPU hierarchy (aka TCC subtree)";
- NetDest Destination, desc="Node to whom the data is sent";
- MessageSizeType MessageSize, desc="size category of the message";
- MachineID Sender, desc="Node who sent the data";
- bool currentOwner, default="false", desc="Is the sender the current owner";
- bool DoneAck, default="false", desc="Is this a done ack?";
- bool Dirty, default="false", desc="Was block dirty when evicted";
- bool wasValid, default="false", desc="Was block valid when evicted";
- bool valid, default="false", desc="Is block valid";
- bool validToInvalid, default="false", desc="Was block valid when evicted";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-}
-
-// Response Messages seemed to be easily munged into one type
-structure(ResponseMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceResponseType Type, desc="NB Sys Resp or CPU Response to Probe";
- MachineID Sender, desc="Node who sent the data";
- NetDest Destination, desc="Node to whom the data is sent";
- // Begin Used Only By CPU Response
- DataBlock DataBlk, desc="data for the cache line";
- bool Hit, desc="probe hit valid line";
- bool Shared, desc="True if S, or if NB Probe ReturnData==1 && O";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- bool Ntsl, desc="indicates probed lin will be invalid after probe";
- bool UntransferredOwner, desc="pending confirmation of ownership change";
- // End Used Only By CPU Response
-
- // Begin NB Response Only
- CoherenceState State, default=CoherenceState_NA, desc="What returned data from NB should be in";
- bool CtoD, desc="was the originator a CtoD?";
- // End NB Response Only
-
- bool NbReqShared, desc="modification of Shared field from initial request, e.g. hit by shared probe";
-
- MessageSizeType MessageSize, desc="size category of the message";
- Cycles InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
- Cycles ForwardRequestTime, default="0", desc="time the dir forwarded the request";
- Cycles ProbeRequestStartTime, default="0", desc="the time the dir started the probe request";
- bool DemandRequest, default="false", desc="For profiling purposes";
-
- bool L3Hit, default="false", desc="Did memory or L3 supply the data?";
- MachineID OriginalResponder, desc="Mach which wrote the data to the L3";
-
- bool NotCached, default="false", desc="True when the Region buffer has already evicted the line";
-
- bool NoAckNeeded, default="false", desc="For short circuting acks";
- bool isValid, default="false", desc="Is acked block valid";
-
- bool functionalRead(Packet *pkt) {
- // Only PUTX messages contains the data block
- if (Type == CoherenceResponseType:CPUData ||
- Type == CoherenceResponseType:MemData) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-structure(UnblockMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- NetDest Destination, desc="Destination (always directory)";
- MessageSizeType MessageSize, desc="size category of the message";
-}
-
-enumeration(TriggerType, desc="Trigger Type") {
- L2_to_L1, desc="L2 to L1 fill";
- AcksComplete, desc="NB received all needed Acks";
-
- // For regions
- InvNext, desc="Invalidate the next block";
- PrivateAck, desc="Loopback ack for machines with no Region Buffer";
- AllOutstanding, desc="All outstanding requests have finished";
- L3Hit, desc="L3 hit in dir";
-
- // For region directory once the directory is blocked
- InvRegion, desc="Invalidate region";
- DowngradeRegion, desc="downgrade region";
-}
-
-enumeration(CacheId, desc="Which Cache in the Core") {
- L1I, desc="L1 I-cache";
- L1D0, desc="L1 D-cache cluster 0";
- L1D1, desc="L1 D-cache cluster 1";
- NA, desc="Default";
-}
-
-structure(TriggerMsg, desc="...", interface="Message") {
- Addr addr, desc="Address";
- TriggerType Type, desc="Type of trigger";
- CacheId Dest, default="CacheId_NA", desc="Cache to invalidate";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Jason Power
- */
-
-machine(MachineType:RegionBuffer, "Region Buffer for AMD_Base-like protocol")
-: CacheMemory *cacheMemory; // stores only region addresses. Must set block size same as below
- bool isOnCPU;
- int blocksPerRegion := 64; // 4k regions
- Cycles toDirLatency := 5; // Latency to fwd requests to directory
- Cycles toRegionDirLatency := 5; // Latency for requests and acks to directory
- Cycles nextEvictLatency := 1; // latency added between each block while evicting region
- bool noTCCdir := "False";
- int TCC_select_num_bits := 1;
-
- // From the Cores
- MessageBuffer * requestFromCore, network="From", virtual_network="0", vnet_type="request";
- MessageBuffer * responseFromCore, network="From", virtual_network="2", vnet_type="response";
-
- // Requests to the cores or directory
- MessageBuffer * requestToNetwork, network="To", virtual_network="0", vnet_type="request";
-
- // From Region-Dir
- MessageBuffer * notifyFromRegionDir, network="From", virtual_network="7", vnet_type="request";
- MessageBuffer * probeFromRegionDir, network="From", virtual_network="8", vnet_type="request";
-
- // From the directory
- MessageBuffer * unblockFromDir, network="From", virtual_network="4", vnet_type="unblock";
-
- // To the region-Dir
- MessageBuffer * responseToRegDir, network="To", virtual_network="2", vnet_type="response";
-
- MessageBuffer * triggerQueue;
-{
-
- // States
- state_declaration(State, desc="Region states", default="RegionBuffer_State_NP") {
- NP, AccessPermission:Invalid, desc="Not present in region directory";
- P, AccessPermission:Invalid, desc="Region is private to the cache";
- S, AccessPermission:Invalid, desc="Region is possibly shared with others";
-
- NP_PS, AccessPermission:Invalid, desc="Intermediate state waiting for notify from r-dir";
- S_P, AccessPermission:Invalid, desc="Intermediate state while upgrading region";
-
- P_NP, AccessPermission:Invalid, desc="Intermediate state while evicting all lines in region";
- P_S, AccessPermission:Invalid, desc="Intermediate state while downgrading all lines in region";
-
- S_NP_PS, AccessPermission:Invalid, desc="Got an inv in S_P, waiting for all inv acks, then going to since the write is already out there NP_PS";
- P_NP_NP, AccessPermission:Invalid, desc="Evicting region on repl, then got an inv. Need to re-evict";
-
- P_NP_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
- P_S_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
- S_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
- S_NP_PS_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
-
- SS_P, AccessPermission:Invalid, desc="Waiting for CPU write that we know is there";
-
- P_NP_W, AccessPermission:Invalid, desc="Waiting for writeback ack";
-
- NP_W, AccessPermission:Invalid, desc="Got a done ack before request, waiting for that victim";
- }
-
- enumeration(Event, desc="Region directory events") {
- CPURead, desc="Access from CPU core";
- CPUWrite, desc="Access from CPU core";
- CPUWriteback, desc="Writeback request from CPU core";
-
- ReplRegion, desc="Start a replace on a region";
-
- PrivateNotify, desc="Update entry to private state";
- SharedNotify, desc="Update entry to shared state";
- WbNotify, desc="Writeback notification received";
- InvRegion, desc="Start invalidating a region";
- DowngradeRegion,desc="Start invalidating a region";
-
- InvAck, desc="Ack from core";
-
- DoneAck, desc="Ack from core that request has finished";
- AllOutstanding, desc="All outstanding requests have now finished";
-
- Evict, desc="Loopback to evict each block";
- LastAck_PrbResp, desc="Done eviciting all the blocks, got the last ack from core, now respond to region dir";
- LastAck_CleanWb, desc="Done eviciting all the blocks, got the last ack from core, now start clean writeback (note the dir has already been updated)";
-
- StallAccess, desc="Wait for the done ack on the address before proceeding";
- StallDoneAck, desc="Wait for the access on the address before proceeding";
-
- StaleRequest, desc="Got a stale victim from the cache, fwd it without incrementing outstanding";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
- structure(BoolVec, external="yes") {
- bool at(int);
- void resize(int);
- void clear();
- int size();
- }
-
- structure(Entry, desc="Region entry", interface="AbstractCacheEntry") {
- Addr addr, desc="Base address of this region";
- State RegionState, desc="Region state";
- DataBlock DataBlk, desc="Data for the block (always empty in region buffer)";
- BoolVec ValidBlocks, desc="A vector to keep track of valid blocks";
- int NumValidBlocks, desc="Number of trues in ValidBlocks to avoid iterating";
- BoolVec UsedBlocks, desc="A vector to keep track of blocks ever valid";
- bool dirty, desc="Dirty as best known by the region buffer";
- // This is needed so we don't ack an invalidate until all requests are ordered
- int NumOutstandingReqs, desc="Total outstanding private/shared requests";
- BoolVec OutstandingReqs, desc="Blocks that have outstanding private/shared requests";
- bool MustDowngrade, desc="Set when we got a downgrade before the shd or pvt permissions";
- Cycles ProbeRequestTime, default="Cycles(0)", desc="Time region dir started the probe";
- Cycles InitialRequestTime, default="Cycles(0)", desc="Time message was sent to region dir";
- bool MsgSentToDir, desc="True if the current request required a message to the dir";
- bool clearOnDone, default="false", desc="clear valid bit when request completes";
- Addr clearOnDoneAddr, desc="clear valid bit when request completes";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- //int NumValidBlocks, desc="Number of blocks valid so we don't have to count a BoolVec";
- BoolVec ValidBlocks, desc="A vector to keep track of valid blocks";
- bool AllAcksReceived, desc="Got all necessary acks from dir";
- bool DoneEvicting, desc="Done iterating through blocks checking for valids";
- BoolVec AcksReceived, desc="Received acks for theses blocks\n";
- bool SendAck, desc="If true, send an ack to the r-dir at end of inv";
- ProbeRequestType MsgType, desc="Type of message to send while 'evicting' ";
- int NumOutstandingReqs, desc="Total outstanding private/shared requests";
- BoolVec OutstandingReqs, desc="Blocks that have outstanding private/shared requests";
- MachineID Requestor, desc="Requestor for three hop transactions";
- bool DemandRequest, default="false", desc="Associated with a demand request";
- Addr DemandAddress, desc="Address for the demand request";
- bool DoneAckReceived, default="false", desc="True if the done ack arrived before the message";
- Addr DoneAckAddr, desc="Address of the done ack received early";
- int OutstandingThreshold, desc="Number of outstanding requests to trigger AllOutstanding on";
-
- ProbeRequestType NewMsgType, desc="Type of message to send while 'evicting' ";
- MachineID NewRequestor, desc="Requestor for three hop transactions";
- bool NewDemandRequest, default="false", desc="Associated with a demand request";
- Addr NewDemandAddress, desc="Address for the demand request";
- bool dirty, desc="dirty";
- bool AllOutstandingTriggered, default="false", desc="bit for only one all outstanding";
- int OutstandingAcks, default="0", desc="number of acks to wait for";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- // Stores only region addresses
- TBETable TBEs, template="<RegionBuffer_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- int blockBits, default="RubySystem::getBlockSizeBits()";
- int blockBytes, default="RubySystem::getBlockSizeBytes()";
- int regionBits, default="log2(m_blocksPerRegion)";
-
- // Functions
-
- int getRegionOffset(Addr addr) {
- if (blocksPerRegion > 1) {
- Addr offset := bitSelect(addr, blockBits, regionBits+blockBits-1);
- int ret := addressToInt(offset);
- assert(ret < blocksPerRegion);
- return ret;
- } else {
- return 0;
- }
- }
-
- Addr getRegionBase(Addr addr) {
- return maskLowOrderBits(addr, blockBits+regionBits);
- }
-
- Addr getNextBlock(Addr addr) {
- Addr a := addr;
- return makeNextStrideAddress(a, 1);
- }
-
- MachineID getPeer(MachineID mach, Addr address) {
- if (isOnCPU) {
- return createMachineID(MachineType:CorePair, intToID(0));
- } else if (noTCCdir) {
- return mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- } else {
- return createMachineID(MachineType:TCCdir, intToID(0));
- }
- }
-
- bool isOutstanding(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe) && tbe.OutstandingReqs.size() > 0) {
- DPRINTF(RubySlicc, " outstanding tbe reqs %s %s %d %d\n",
- tbe.OutstandingReqs, addr, getRegionOffset(addr),
- tbe.OutstandingReqs.at(getRegionOffset(addr)));
- return tbe.OutstandingReqs.at(getRegionOffset(addr));
- } else if (is_valid(cache_entry)) {
- DPRINTF(RubySlicc, " outstanding cache reqs %s %s %d %d\n",
- cache_entry.OutstandingReqs, addr, getRegionOffset(addr),
- cache_entry.OutstandingReqs.at(getRegionOffset(addr)));
- return cache_entry.OutstandingReqs.at(getRegionOffset(addr));
- } else {
- return false;
- }
- }
-
- bool isOnGPU() {
- if (isOnCPU) {
- return false;
- }
- return true;
- }
-
- bool isRead(CoherenceRequestType type) {
- return (type == CoherenceRequestType:RdBlk || type == CoherenceRequestType:RdBlkS ||
- type == CoherenceRequestType:VicClean);
- }
-
- bool presentOrAvail(Addr addr) {
- return cacheMemory.isTagPresent(getRegionBase(addr)) || cacheMemory.cacheAvail(getRegionBase(addr));
- }
-
- // Returns a region entry!
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", cacheMemory.lookup(getRegionBase(addr)));
- }
-
- TBE getTBE(Addr addr), return_by_pointer="yes" {
- return TBEs.lookup(getRegionBase(addr));
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- return getCacheEntry(getRegionBase(addr)).DataBlk;
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.RegionState;
- }
- return State:NP;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- if (is_valid(cache_entry)) {
- cache_entry.RegionState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := getTBE(addr);
- if(is_valid(tbe)) {
- return RegionBuffer_State_to_permission(tbe.TBEState);
- }
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return RegionBuffer_State_to_permission(cache_entry.RegionState);
- }
- return AccessPermission:NotPresent;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- functionalMemoryRead(pkt);
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- if (functionalMemoryWrite(pkt)) {
- return 1;
- } else {
- return 0;
- }
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(RegionBuffer_State_to_permission(state));
- }
- }
-
- void recordRequestType(RequestType stat, Addr addr) {
- if (stat == RequestType:TagArrayRead) {
- cacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (stat == RequestType:TagArrayWrite) {
- cacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:TagArrayRead) {
- return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- // Overloaded outgoing request nework for both probes to cores and reqeusts
- // to the directory.
- // Fix Me: These forwarded requests need to be on a separate virtual channel
- // to avoid deadlock!
- out_port(requestNetwork_out, CPURequestMsg, requestToNetwork);
- out_port(probeNetwork_out, NBProbeRequestMsg, requestToNetwork);
-
- out_port(responseNetwork_out, ResponseMsg, responseToRegDir);
-
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=4) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := getTBE(in_msg.addr);
- DPRINTF(RubySlicc, "trigger msg: %s (%s)\n", in_msg, getRegionBase(in_msg.addr));
- assert(is_valid(tbe));
- if (in_msg.Type == TriggerType:AcksComplete) {
- if (tbe.SendAck) {
- trigger(Event:LastAck_PrbResp, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:LastAck_CleanWb, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == TriggerType:AllOutstanding) {
- trigger(Event:AllOutstanding, in_msg.addr, cache_entry, tbe);
- } else {
- assert(in_msg.Type == TriggerType:InvNext);
- trigger(Event:Evict, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
- in_port(unblockNetwork_in, UnblockMsg, unblockFromDir, rank=3) {
- if (unblockNetwork_in.isReady(clockEdge())) {
- peek(unblockNetwork_in, UnblockMsg) {
- TBE tbe := getTBE(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.DoneAck) {
- if (isOutstanding(tbe, cache_entry, in_msg.addr)) {
- trigger(Event:DoneAck, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:StallDoneAck, in_msg.addr, cache_entry, tbe);
- }
- } else {
- assert(is_valid(tbe));
- trigger(Event:InvAck, in_msg.addr, cache_entry, tbe);
- }
- }
- }
- }
-
- in_port(probeNetwork_in, NBProbeRequestMsg, probeFromRegionDir, rank=2) {
- if (probeNetwork_in.isReady(clockEdge())) {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- TBE tbe := getTBE(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- assert(getRegionBase(in_msg.addr) == in_msg.addr);
- if (in_msg.Type == ProbeRequestType:PrbInv) {
- trigger(Event:InvRegion, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
- trigger(Event:DowngradeRegion, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unknown probe message\n");
- }
- }
- }
- }
-
- in_port(notifyNetwork_in, CPURequestMsg, notifyFromRegionDir, rank=1) {
- if (notifyNetwork_in.isReady(clockEdge())) {
- peek(notifyNetwork_in, CPURequestMsg) {
- TBE tbe := getTBE(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- //Fix Me...add back in: assert(is_valid(cache_entry));
- if (in_msg.Type == CoherenceRequestType:WbNotify) {
- trigger(Event:WbNotify, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:SharedNotify) {
- trigger(Event:SharedNotify, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:PrivateNotify) {
- trigger(Event:PrivateNotify, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unknown notify message\n");
- }
- }
- }
- }
-
- // In from cores
- // NOTE: We get the cache / TBE entry based on the region address,
- // but pass the block address to the actions
- in_port(requestNetwork_in, CPURequestMsg, requestFromCore, rank=0) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, CPURequestMsg) {
- TBE tbe := getTBE(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (is_valid(tbe) && tbe.DoneAckReceived && tbe.DoneAckAddr == in_msg.addr) {
- DPRINTF(RubySlicc, "Stale/Stall request %s\n", in_msg.Type);
- if (in_msg.Type == CoherenceRequestType:VicDirty || in_msg.Type == CoherenceRequestType:VicClean )
- {
- trigger(Event:StaleRequest, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:StallAccess, in_msg.addr, cache_entry, tbe);
- }
- } else if (isOutstanding(tbe, cache_entry, in_msg.addr)) {
- DPRINTF(RubySlicc, "Stall outstanding request %s\n", in_msg.Type);
- trigger(Event:StallAccess, in_msg.addr, cache_entry, tbe);
- } else {
- if (presentOrAvail(in_msg.addr)) {
- if (in_msg.Type == CoherenceRequestType:RdBlkM ) {
- trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WriteThrough ) {
- trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:Atomic ) {
- trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
- } else {
- if (in_msg.Type == CoherenceRequestType:VicDirty ||
- in_msg.Type == CoherenceRequestType:VicClean) {
- trigger(Event:CPUWriteback, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:CPURead, in_msg.addr, cache_entry, tbe);
- }
- }
- } else {
- Addr victim := cacheMemory.cacheProbe(getRegionBase(in_msg.addr));
- TBE victim_tbe := getTBE(victim);
- Entry victim_entry := getCacheEntry(victim);
- DPRINTF(RubySlicc, "Replacing region %s for %s(%s)\n", victim, in_msg.addr, getRegionBase(in_msg.addr));
- trigger(Event:ReplRegion, victim, victim_entry, victim_tbe);
- }
- }
- }
- }
- }
-
- // Actions
- action(f_fwdReqToDir, "f", desc="Forward CPU request to directory") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := in_msg.Type;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Private := true;
- out_msg.InitialRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := curCycle();
- if (getState(tbe, cache_entry, address) == State:S) {
- out_msg.ForceShared := true;
- }
- DPRINTF(RubySlicc, "Fwd: %s\n", out_msg);
- //assert(getState(tbe, cache_entry, address) == State:P || getState(tbe, cache_entry, address) == State:S);
- if (getState(tbe, cache_entry, address) == State:NP_W) {
- APPEND_TRANSITION_COMMENT(" fwding stale request: ");
- APPEND_TRANSITION_COMMENT(out_msg.Type);
- }
- }
- }
- }
-
- action(u_updateRegionEntry, "u", desc="Update the entry for profiling") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (is_valid(cache_entry)) {
- if (in_msg.CtoDSinked == false) {
- APPEND_TRANSITION_COMMENT(" incr outstanding ");
- cache_entry.NumOutstandingReqs := 1 + cache_entry.NumOutstandingReqs;
- assert(cache_entry.OutstandingReqs.at(getRegionOffset(address)) == false);
- cache_entry.OutstandingReqs.at(getRegionOffset(address)) := true;
- assert(cache_entry.NumOutstandingReqs == countBoolVec(cache_entry.OutstandingReqs));
- } else {
- APPEND_TRANSITION_COMMENT(" NOT incr outstanding ");
- assert(in_msg.Type == CoherenceRequestType:RdBlkM || in_msg.Type == CoherenceRequestType:RdBlkS);
- }
- APPEND_TRANSITION_COMMENT(cache_entry.NumOutstandingReqs);
- if (in_msg.Type == CoherenceRequestType:RdBlkM || in_msg.Type == CoherenceRequestType:Atomic ||
- in_msg.Type == CoherenceRequestType:WriteThrough )
- {
- cache_entry.dirty := true;
- }
- if (in_msg.Type == CoherenceRequestType:VicDirty ||
- in_msg.Type == CoherenceRequestType:VicClean) {
- DPRINTF(RubySlicc, "Got %s for addr %s\n", in_msg.Type, address);
- //assert(cache_entry.ValidBlocks.at(getRegionOffset(address)));
- // can in fact be inv if core got an inv after a vicclean before it got here
- if (cache_entry.ValidBlocks.at(getRegionOffset(address))) {
- cache_entry.clearOnDone := true;
- cache_entry.clearOnDoneAddr := address;
- //cache_entry.ValidBlocks.at(getRegionOffset(address)) := false;
- //cache_entry.NumValidBlocks := cache_entry.NumValidBlocks - 1;
- }
- } else {
- if (cache_entry.ValidBlocks.at(getRegionOffset(address)) == false) {
- cache_entry.NumValidBlocks := cache_entry.NumValidBlocks + 1;
- }
- DPRINTF(RubySlicc, "before valid addr %s bits %s\n",
- in_msg.Type, address, cache_entry.ValidBlocks);
- cache_entry.ValidBlocks.at(getRegionOffset(address)) := true;
- DPRINTF(RubySlicc, "after valid addr %s bits %s\n",
- in_msg.Type, address, cache_entry.ValidBlocks);
- cache_entry.UsedBlocks.at(getRegionOffset(address)) := true;
- }
- assert(cache_entry.NumValidBlocks <= blocksPerRegion);
- assert(cache_entry.NumValidBlocks >= 0);
- APPEND_TRANSITION_COMMENT(" valid blocks ");
- APPEND_TRANSITION_COMMENT(cache_entry.ValidBlocks);
- } else {
- error("This shouldn't happen anymore I think");
- //tbe.ValidBlocks.at(getRegionOffest(address)) := true;
- assert(getState(tbe, cache_entry, address) == State:P_NP);
- }
- }
- }
-
- action(uw_updatePossibleWriteback, "uw", desc="writeback request complete") {
- peek(unblockNetwork_in, UnblockMsg) {
- if (is_valid(cache_entry) && in_msg.validToInvalid &&
- cache_entry.clearOnDone && cache_entry.clearOnDoneAddr == address) {
- DPRINTF(RubySlicc, "I have no idea what is going on here\n");
- cache_entry.ValidBlocks.at(getRegionOffset(address)) := false;
- cache_entry.NumValidBlocks := cache_entry.NumValidBlocks - 1;
- cache_entry.clearOnDone := false;
- }
- }
- }
-
-
- action(rp_requestPrivate, "rp", desc="Send private request r-dir") {
- peek(requestNetwork_in, CPURequestMsg) {
- // No need to send acks on replacements
- assert(is_invalid(tbe));
- enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
- out_msg.addr := address; // use the actual address so the demand request can be fulfilled
- out_msg.DemandAddress := address;
- out_msg.Type := CoherenceRequestType:PrivateRequest;
- out_msg.OriginalType := in_msg.Type;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.InitialRequestTime := curCycle();
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- DPRINTF(RubySlicc, "Private request %s\n", out_msg);
- }
- cache_entry.ProbeRequestTime := curCycle();
- cache_entry.MsgSentToDir := true;
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- }
- }
-
- action(ru_requestUpgrade, "ru", desc="Send upgrade request r-dir") {
- peek(requestNetwork_in, CPURequestMsg) {
- // No need to send acks on replacements
- assert(is_invalid(tbe));
- enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
- out_msg.addr := address; // use the actual address so the demand request can be fulfilled
- out_msg.Type := CoherenceRequestType:UpgradeRequest;
- out_msg.OriginalType := in_msg.Type;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.InitialRequestTime := curCycle();
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- cache_entry.ProbeRequestTime := curCycle();
- cache_entry.MsgSentToDir := true;
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- }
- }
-
- action(rw_requestWriteback, "rq", desc="Send writeback request") {
- // No need to send acks on replacements
- enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
- out_msg.addr := getRegionBase(address); // use the actual address so the demand request can be fulfilled
- out_msg.Type := CoherenceRequestType:CleanWbRequest;
- out_msg.Requestor := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.Dirty := tbe.dirty;
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- }
- }
-
- action(rs_requestShared, "rs", desc="Send shared request r-dir") {
- peek(requestNetwork_in, CPURequestMsg) {
- // No need to send acks on replacements
- assert(is_invalid(tbe));
- enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
- out_msg.addr := address; // use the actual address so the demand request can be fulfilled
- out_msg.Type := CoherenceRequestType:SharedRequest;
- out_msg.OriginalType := in_msg.Type;
- out_msg.Requestor := machineID;
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.InitialRequestTime := curCycle();
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- cache_entry.ProbeRequestTime := curCycle();
- cache_entry.MsgSentToDir := true;
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- }
- }
-
- action(ai_ackRegionInv, "ai", desc="Send ack to r-dir on region inv if tbe says so") {
- // No need to send acks on replacements
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(ad_ackDircetory, "ad", desc="send probe response to directory") {
- if (noTCCdir && tbe.MsgType == ProbeRequestType:PrbDowngrade && isOnGPU()) { //VIPER tcc doesnt understand PrbShrData
- assert(tbe.DemandRequest); //So, let RegionBuffer take care of sending back ack
- enqueue(responseNetwork_out, ResponseMsg, toDirLatency) {
- out_msg.addr := tbe.DemandAddress;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := getPeer(machineID,address);
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false; // only true if sending back data i think
- out_msg.Hit := false;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.NoAckNeeded := true;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
-
- action(aie_ackRegionExclusiveInv, "aie", desc="Send ack to r-dir on region inv if tbe says so") {
- // No need to send acks on replacements
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.NotCached := true;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.Dirty := tbe.dirty;
- }
- }
-
- action(ain_ackRegionInvNow, "ain", desc="Send ack to r-dir on region inv") {
- enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(aine_ackRegionInvExlusiveNow, "aine", desc="Send ack to r-dir on region inv with exlusive permission") {
- enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceResponseType:CPUPrbResp;
- out_msg.Sender := machineID;
- out_msg.NotCached := true;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(ap_ackPrivateNotify, "ap", desc="Send ack to r-dir on private notify") {
- enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceResponseType:PrivateAck;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(aw_ackWbNotify, "aw", desc="Send ack to r-dir on writeback notify") {
- peek(notifyNetwork_in, CPURequestMsg) {
- if (in_msg.NoAckNeeded == false) {
- enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceResponseType:RegionWbAck;
- out_msg.Sender := machineID;
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
- }
-
- action(e_evictCurrent, "e", desc="Evict this block in the region") {
- // send force invalidate message to directory to invalidate this block
- // must invalidate all blocks since region buffer could have privitized it
- if (tbe.ValidBlocks.at(getRegionOffset(address)) &&
- (tbe.DemandRequest == false || tbe.DemandAddress != address)) {
- DPRINTF(RubySlicc, "trying to evict address %s (base: %s, offset: %d)\n", address, getRegionBase(address), getRegionOffset(address));
- DPRINTF(RubySlicc, "tbe valid blocks %s\n", tbe.ValidBlocks);
-
- enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := tbe.MsgType;
- out_msg.ReturnData := true;
- if (address == tbe.DemandAddress) {
- out_msg.DemandRequest := true;
- }
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(getPeer(machineID,address));
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- APPEND_TRANSITION_COMMENT(" current ");
- APPEND_TRANSITION_COMMENT(tbe.ValidBlocks.at(getRegionOffset(address)));
- tbe.AllAcksReceived := false;
- } else {
- DPRINTF(RubySlicc, "Not evicting demand %s\n", address);
- }
- }
-
- action(ed_evictDemand, "ed", desc="Evict the demand request if it's valid") {
- if (noTCCdir && tbe.MsgType == ProbeRequestType:PrbDowngrade && isOnGPU()) {
- tbe.OutstandingAcks := 0;
- tbe.AllAcksReceived := true;
- tbe.DoneEvicting := true;
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:AcksComplete;
- out_msg.addr := getRegionBase(address);
- }
- } else if (tbe.DemandRequest) {
- enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
- out_msg.addr := tbe.DemandAddress;
- out_msg.Type := tbe.MsgType;
- out_msg.ReturnData := true;
- out_msg.DemandRequest := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.add(getPeer(machineID,address));
- DPRINTF(RubySlicc, "%s\n", out_msg);
- tbe.AllAcksReceived := false;
- }
- if (tbe.ValidBlocks.at(getRegionOffset(tbe.DemandAddress)) == false) {
- tbe.OutstandingAcks := tbe.OutstandingAcks + 1;
- }
- APPEND_TRANSITION_COMMENT("Evicting demand ");
- APPEND_TRANSITION_COMMENT(tbe.DemandAddress);
- }
- APPEND_TRANSITION_COMMENT("waiting acks ");
- APPEND_TRANSITION_COMMENT(tbe.OutstandingAcks);
- }
-
- action(adp_AckDemandProbe, "fp", desc="forward demand probe even if we know that the core is invalid") {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- if (in_msg.DemandRequest) {
- enqueue(responseNetwork_out, ResponseMsg, toDirLatency) {
- out_msg.addr := in_msg.DemandAddress;
- out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
- out_msg.Sender := getPeer(machineID,address);
- // will this always be ok? probably not for multisocket
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := false; // only true if sending back data i think
- out_msg.Hit := false;
- out_msg.Ntsl := false;
- out_msg.State := CoherenceState:NA;
- out_msg.NoAckNeeded := true;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
- }
- }
-
- action(en_enqueueNextEvict, "en", desc="Queue evict the next block in the region") {
- // increment in_msg.addr by blockSize bytes and enqueue on triggerPort
- // Only enqueue if the next address doesn't overrun the region bound
- if (getRegionBase(getNextBlock(address)) == getRegionBase(address)) {
- enqueue(triggerQueue_out, TriggerMsg, nextEvictLatency) {
- out_msg.Type := TriggerType:InvNext;
- out_msg.addr := getNextBlock(address);
- }
- } else {
- tbe.DoneEvicting := true;
- DPRINTF(RubySlicc, "Done evicing region %s\n", getRegionBase(address));
- DPRINTF(RubySlicc, "Waiting for %s acks\n", tbe.OutstandingAcks);
- if (tbe.AllAcksReceived == true) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:AcksComplete;
- out_msg.addr := getRegionBase(address);
- }
- }
- }
- }
-
- action(ef_enqueueFirstEvict, "ef", desc="Queue the first block in the region to be evicted") {
- if (tbe.DoneEvicting == false) {
- enqueue(triggerQueue_out, TriggerMsg, nextEvictLatency) {
- out_msg.Type := TriggerType:InvNext;
- out_msg.addr := getRegionBase(address);
- }
- }
- }
-
- action(ra_receiveAck, "ra", desc="Mark TBE entry as received this ack") {
- DPRINTF(RubySlicc, "received ack for %s reg: %s vec: %s pos: %d\n",
- address, getRegionBase(address), tbe.ValidBlocks, getRegionOffset(address));
- peek(unblockNetwork_in, UnblockMsg) {
- //
- // Note the tbe ValidBlock vec will be a conservative list of the
- // valid blocks since the cache entry ValidBlock vec is set on the
- // request
- //
- if (in_msg.wasValid) {
- assert(tbe.ValidBlocks.at(getRegionOffset(address)));
- }
- }
- tbe.OutstandingAcks := tbe.OutstandingAcks - 1;
- tbe.AcksReceived.at(getRegionOffset(address)) := true;
- assert(tbe.OutstandingAcks >= 0);
- if (tbe.OutstandingAcks == 0) {
- tbe.AllAcksReceived := true;
- if (tbe.DoneEvicting) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:AcksComplete;
- out_msg.addr := getRegionBase(address);
- }
- }
- }
-
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- APPEND_TRANSITION_COMMENT(" Acks left receive ");
- APPEND_TRANSITION_COMMENT(tbe.OutstandingAcks);
- }
-
- action(do_decrementOutstanding, "do", desc="Decrement outstanding requests") {
- APPEND_TRANSITION_COMMENT(" decr outstanding ");
- if (is_valid(cache_entry)) {
- cache_entry.NumOutstandingReqs := cache_entry.NumOutstandingReqs - 1;
- assert(cache_entry.OutstandingReqs.at(getRegionOffset(address)));
- cache_entry.OutstandingReqs.at(getRegionOffset(address)) := false;
- assert(cache_entry.NumOutstandingReqs >= 0);
- assert(cache_entry.NumOutstandingReqs == countBoolVec(cache_entry.OutstandingReqs));
- APPEND_TRANSITION_COMMENT(cache_entry.NumOutstandingReqs);
- }
- if (is_valid(tbe)) {
- tbe.NumOutstandingReqs := tbe.NumOutstandingReqs - 1;
- assert(tbe.OutstandingReqs.at(getRegionOffset(address)));
- tbe.OutstandingReqs.at(getRegionOffset(address)) := false;
- assert(tbe.NumOutstandingReqs >= 0);
- assert(tbe.NumOutstandingReqs == countBoolVec(tbe.OutstandingReqs));
- APPEND_TRANSITION_COMMENT(tbe.NumOutstandingReqs);
- }
- }
-
- action(co_checkOutstanding, "co", desc="check if there are no more outstanding requests") {
- assert(is_valid(tbe));
- if ((tbe.NumOutstandingReqs <= tbe.OutstandingThreshold) &&
- (tbe.AllOutstandingTriggered == false)) {
- APPEND_TRANSITION_COMMENT(" no more outstanding: ");
- APPEND_TRANSITION_COMMENT(tbe.NumOutstandingReqs);
- APPEND_TRANSITION_COMMENT(tbe.OutstandingThreshold);
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:AllOutstanding;
- if (tbe.DemandRequest) {
- out_msg.addr := tbe.DemandAddress;
- } else {
- out_msg.addr := getRegionBase(address);
- }
- DPRINTF(RubySlicc, "co enqueuing %s\n", out_msg);
- tbe.AllOutstandingTriggered := true;
- }
- } else {
- APPEND_TRANSITION_COMMENT(" still more outstanding ");
- }
- }
-
- action(ro_resetAllOutstanding, "ro", desc="Reset all outstanding") {
- tbe.AllOutstandingTriggered := false;
- }
-
- action(so_setOutstandingCheckOne, "so", desc="Check outstanding is waiting for 1, not 0") {
- // Need this for S_P because one request is outstanding between here and r-dir
- tbe.OutstandingThreshold := 1;
- }
-
- action(a_allocateRegionEntry, "a", desc="Allocate a new entry") {
- set_cache_entry(cacheMemory.allocate(getRegionBase(address), new Entry));
- cache_entry.ValidBlocks.clear();
- cache_entry.ValidBlocks.resize(blocksPerRegion);
- cache_entry.UsedBlocks.clear();
- cache_entry.UsedBlocks.resize(blocksPerRegion);
- cache_entry.dirty := false;
- cache_entry.NumOutstandingReqs := 0;
- cache_entry.OutstandingReqs.clear();
- cache_entry.OutstandingReqs.resize(blocksPerRegion);
- }
-
- action(d_deallocateRegionEntry, "d", desc="Deallocate region entry") {
- cacheMemory.deallocate(getRegionBase(address));
- unset_cache_entry();
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- TBEs.allocate(getRegionBase(address));
- set_tbe(getTBE(address));
- tbe.OutstandingAcks := 0;
- tbe.AllAcksReceived := true; // starts true since the region could be empty
- tbe.DoneEvicting := false;
- tbe.AcksReceived.clear();
- tbe.AcksReceived.resize(blocksPerRegion);
- tbe.SendAck := false;
- tbe.OutstandingThreshold := 0;
- if (is_valid(cache_entry)) {
- tbe.NumOutstandingReqs := cache_entry.NumOutstandingReqs;
- tbe.OutstandingReqs := cache_entry.OutstandingReqs;
- assert(tbe.NumOutstandingReqs == countBoolVec(tbe.OutstandingReqs));
- tbe.dirty := cache_entry.dirty;
- tbe.ValidBlocks := cache_entry.ValidBlocks;
- tbe.OutstandingAcks := countBoolVec(tbe.ValidBlocks);
- APPEND_TRANSITION_COMMENT(" tbe valid blocks ");
- APPEND_TRANSITION_COMMENT(tbe.ValidBlocks);
- APPEND_TRANSITION_COMMENT(" cache valid blocks ");
- APPEND_TRANSITION_COMMENT(cache_entry.ValidBlocks);
- } else {
- tbe.dirty := false;
- }
- }
-
- action(m_markSendAck, "m", desc="Mark TBE that we need to ack at end") {
- assert(is_valid(tbe));
- tbe.SendAck := true;
- }
-
- action(db_markDirtyBit, "db", desc="Mark TBE dirty bit") {
- peek(unblockNetwork_in, UnblockMsg) {
- if (is_valid(tbe)) {
- tbe.dirty := tbe.dirty || in_msg.Dirty;
- }
- }
- }
-
- action(dr_markDoneAckReceived, "dr", desc="Mark TBE that a done ack has been received") {
- assert(is_valid(tbe));
- tbe.DoneAckReceived := true;
- tbe.DoneAckAddr := address;
- APPEND_TRANSITION_COMMENT(" marking done ack on TBE ");
- }
-
- action(se_setTBE, "se", desc="Set msg type to evict") {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- tbe.MsgType := in_msg.Type;
- tbe.Requestor := in_msg.Requestor;
- tbe.DemandAddress := in_msg.DemandAddress;
- tbe.DemandRequest := in_msg.DemandRequest;
- }
- }
-
- action(sne_setNewTBE, "sne", desc="Set msg type to evict") {
- peek(probeNetwork_in, NBProbeRequestMsg) {
- tbe.NewMsgType := in_msg.Type;
- tbe.NewRequestor := in_msg.Requestor;
- tbe.NewDemandAddress := in_msg.DemandAddress;
- tbe.NewDemandRequest := in_msg.DemandRequest;
- }
- }
-
- action(soe_setOldTBE, "soe", desc="Set msg type to evict") {
- tbe.MsgType := tbe.NewMsgType;
- tbe.Requestor := tbe.NewRequestor;
- tbe.DemandAddress := tbe.NewDemandAddress;
- tbe.DemandRequest := tbe.NewDemandRequest;
- tbe.OutstandingAcks := countBoolVec(tbe.ValidBlocks);
- tbe.AllAcksReceived := true; // starts true since the region could be empty
- tbe.DoneEvicting := false;
- tbe.AcksReceived.clear();
- tbe.AcksReceived.resize(blocksPerRegion);
- tbe.SendAck := false;
- }
-
- action(ser_setTBE, "ser", desc="Set msg type to evict repl") {
- tbe.MsgType := ProbeRequestType:PrbInv;
- }
-
- action(md_setMustDowngrade, "md", desc="When permissions finally get here, must be shared") {
- assert(is_valid(cache_entry));
- cache_entry.MustDowngrade := true;
- }
-
- action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
- TBEs.deallocate(getRegionBase(address));
- unset_tbe();
- }
-
- action(p_popRequestQueue, "p", desc="Pop the request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(pl_popUnblockQueue, "pl", desc="Pop the unblock queue") {
- unblockNetwork_in.dequeue(clockEdge());
- }
-
- action(pn_popNotifyQueue, "pn", desc="Pop the notify queue") {
- notifyNetwork_in.dequeue(clockEdge());
- }
-
- action(pp_popProbeQueue, "pp", desc="Pop the probe queue") {
- probeNetwork_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="Pop the trigger queue") {
- DPRINTF(RubySlicc, "Trigger Before Contents: %s\n", triggerQueue_in);
- triggerQueue_in.dequeue(clockEdge());
- DPRINTF(RubySlicc, "Trigger After Contents: %s\n", triggerQueue_in);
- }
-
- // Must always use wake all, since non-region address wait on region addresses
- action(wa_wakeUpAllDependents, "wa", desc="Wake up any requests waiting for this region") {
- wakeUpAllBuffers();
- }
-
- action(zz_stallAndWaitRequestQueue, "\z", desc="recycle request queue") {
- Addr regAddr := getRegionBase(address);
- DPRINTF(RubySlicc, "Stalling address %s\n", regAddr);
- stall_and_wait(requestNetwork_in, regAddr);
- }
-
- action(yy_stallAndWaitProbeQueue, "\y", desc="stall probe queue") {
- Addr regAddr := getRegionBase(address);
- stall_and_wait(probeNetwork_in, regAddr);
- }
-
- action(yyy_recycleProbeQueue, "\yy", desc="recycle probe queue") {
- probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(zzz_recycleRequestQueue, "\zz", desc="recycle request queue") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(www_recycleUnblockNetwork, "\ww", desc="recycle unblock queue") {
- unblockNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(z_stall, "z", desc="stall request queue") {
- // fake state
- }
-
- action(mru_setMRU, "mru", desc="set MRU") {
- cacheMemory.setMRU(address, cache_entry.NumValidBlocks);
- }
-
- // Transitions
-
- transition({NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, P_NP_W, P_NP_NP, NP_W}, {CPURead, CPUWriteback, CPUWrite}) {} {
- zz_stallAndWaitRequestQueue;
- }
-
- transition(SS_P, {CPURead, CPUWriteback}) {
- zz_stallAndWaitRequestQueue;
- }
-
- transition({NP, S, P, NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P, NP_W, P_NP_NP}, StallAccess) {} {
- zz_stallAndWaitRequestQueue;
- }
-
- transition({S, P, NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P, P_NP_W, P_NP_NP, NP_W}, StallDoneAck) {
- www_recycleUnblockNetwork;
- }
-
- transition(NP, StallDoneAck, NP_W) {
- t_allocateTBE;
- db_markDirtyBit;
- dr_markDoneAckReceived;
- pl_popUnblockQueue;
- }
-
- transition(NP_W, StaleRequest, NP) {
- f_fwdReqToDir;
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- p_popRequestQueue;
- }
-
- transition(P_NP_O, DowngradeRegion) {} {
- z_stall; // should stall and wait
- }
-
- transition({NP_PS, S_NP_PS, S_P, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P}, ReplRegion) {} {
- zz_stallAndWaitRequestQueue; // can't let things get out of order!
- }
-
- transition({P_NP_O, S_O, SS_P}, InvRegion) {} {
- yyy_recycleProbeQueue; // can't be z_stall because there could be a RdBlkM in the requestQueue which has the sinked flag which is blocking the inv
- }
-
- transition(P_NP, {InvRegion, DowngradeRegion}, P_NP_NP) {} {
- sne_setNewTBE;
- pp_popProbeQueue;
- }
-
- transition(S_P, DowngradeRegion) {} {
- adp_AckDemandProbe;
- ain_ackRegionInvNow;
- pp_popProbeQueue;
- }
-
- transition(P_NP_W, InvRegion) {
- adp_AckDemandProbe;
- ain_ackRegionInvNow;
- pp_popProbeQueue;
- }
-
- transition(P_NP_W, DowngradeRegion) {
- adp_AckDemandProbe;
- aine_ackRegionInvExlusiveNow;
- pp_popProbeQueue;
- }
-
- transition({P, S}, {CPURead, CPUWriteback}) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- f_fwdReqToDir;
- u_updateRegionEntry;
- p_popRequestQueue;
- }
-
- transition(P, CPUWrite) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- f_fwdReqToDir;
- u_updateRegionEntry;
- p_popRequestQueue;
- }
-
- transition(S, CPUWrite, S_O) {TagArrayRead} {
- mru_setMRU;
- t_allocateTBE;
- co_checkOutstanding;
- zz_stallAndWaitRequestQueue;
- }
-
- transition(S_O, AllOutstanding, SS_P) {
- wa_wakeUpAllDependents;
- ro_resetAllOutstanding;
- pt_popTriggerQueue;
- }
-
- transition(SS_P, CPUWrite, S_P) {
- mru_setMRU;
- dt_deallocateTBE;
- ru_requestUpgrade;
- u_updateRegionEntry;
- p_popRequestQueue;
- }
-
- transition(NP, {CPURead, CPUWriteback}, NP_PS) {TagArrayRead, TagArrayWrite} {
- a_allocateRegionEntry;
- rs_requestShared;
- u_updateRegionEntry;
- p_popRequestQueue;//zz_stallAndWaitRequestQueue;
- }
-
- transition(NP, CPUWrite, NP_PS) {TagArrayRead, TagArrayWrite} {
- a_allocateRegionEntry;
- rp_requestPrivate;
- u_updateRegionEntry;
- p_popRequestQueue;//zz_stallAndWaitRequestQueue;
- }
-
- transition(NP_PS, PrivateNotify, P) {} {
- ap_ackPrivateNotify;
- wa_wakeUpAllDependents;
- pn_popNotifyQueue;
- }
-
- transition(S_P, PrivateNotify, P) {} {
- ap_ackPrivateNotify;
- wa_wakeUpAllDependents;
- pn_popNotifyQueue;
- }
-
- transition(NP_PS, SharedNotify, S) {} {
- ap_ackPrivateNotify;
- wa_wakeUpAllDependents;
- pn_popNotifyQueue;
- }
-
- transition(P_NP_W, WbNotify, NP) {} {
- aw_ackWbNotify;
- wa_wakeUpAllDependents;
- dt_deallocateTBE;
- pn_popNotifyQueue;
- }
-
- transition({P, S}, ReplRegion, P_NP_O) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- ser_setTBE;
- d_deallocateRegionEntry;
- co_checkOutstanding;
- }
-
- transition({P, S}, InvRegion, P_NP_O) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- se_setTBE;
- m_markSendAck;
- d_deallocateRegionEntry;
- co_checkOutstanding;
- pp_popProbeQueue;
- }
-
- transition(P_NP_O, AllOutstanding, P_NP) {} {
- ed_evictDemand;
- ef_enqueueFirstEvict;
- ro_resetAllOutstanding;
- pt_popTriggerQueue;
- }
-
- transition(S_P, InvRegion, S_NP_PS_O) {TagArrayRead} {
- t_allocateTBE;
- se_setTBE;
- m_markSendAck;
- so_setOutstandingCheckOne;
- co_checkOutstanding;
- pp_popProbeQueue;
- }
-
- transition(S_NP_PS_O, AllOutstanding, S_NP_PS) {
- ed_evictDemand;
- ef_enqueueFirstEvict;
- ro_resetAllOutstanding;
- pt_popTriggerQueue;
- }
-
- transition(P, DowngradeRegion, P_S_O) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- se_setTBE;
- m_markSendAck;
- co_checkOutstanding;
- pp_popProbeQueue;
- }
-
- transition(P_S_O, AllOutstanding, P_S) {} {
- ed_evictDemand;
- ef_enqueueFirstEvict;
- ro_resetAllOutstanding;
- pt_popTriggerQueue;
- }
-
- transition({P, S}, DoneAck) {TagArrayWrite} {
- do_decrementOutstanding;
- wa_wakeUpAllDependents;
- db_markDirtyBit;
- uw_updatePossibleWriteback;
- pl_popUnblockQueue;
- }
-
- transition({S_P, NP_PS, S_NP_PS}, DoneAck) {TagArrayWrite} {
- www_recycleUnblockNetwork;
- }
-
- transition({P_NP_O, S_NP_PS_O, P_S_O, S_O}, DoneAck) {} {
- do_decrementOutstanding;
- co_checkOutstanding;
- db_markDirtyBit;
- uw_updatePossibleWriteback;
- pl_popUnblockQueue;
- }
-
- transition({P_NP, P_S, S_NP_PS, P_NP_NP}, Evict) {} {
- e_evictCurrent;
- en_enqueueNextEvict;
- pt_popTriggerQueue;
- }
-
- transition({P_NP, P_S, S_NP_PS, P_NP_NP}, InvAck) {} {
- ra_receiveAck;
- db_markDirtyBit;
- pl_popUnblockQueue;
- }
-
- transition(P_NP, LastAck_CleanWb, P_NP_W) {} {
- rw_requestWriteback;
- pt_popTriggerQueue;
- }
-
- transition(P_NP_NP, LastAck_CleanWb, P_NP) {} {
- soe_setOldTBE;
- m_markSendAck;
- ed_evictDemand;
- ef_enqueueFirstEvict;
- pt_popTriggerQueue;
- }
-
- transition(P_NP, LastAck_PrbResp, NP) {} {
- aie_ackRegionExclusiveInv;
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- pt_popTriggerQueue;
- }
-
- transition(S_NP_PS, LastAck_PrbResp, NP_PS) {} {
- aie_ackRegionExclusiveInv;
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- pt_popTriggerQueue;
- }
-
- transition(P_S, LastAck_PrbResp, S) {} {
- ai_ackRegionInv;
- ad_ackDircetory;
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- pt_popTriggerQueue;
- }
-
-}
-
+++ /dev/null
-/*
- * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Jason Power
- */
-
-machine(MachineType:RegionDir, "Region Directory for AMD_Base-like protocol")
-: CacheMemory *cacheMemory; // stores only region addresses. Must set block size same as below
- NodeID cpuRegionBufferNum;
- NodeID gpuRegionBufferNum;
- int blocksPerRegion := 64; // 4k regions
- Cycles toDirLatency := 10; // Latency to fwd requests and send invs to directory
- bool always_migrate := "False";
- bool sym_migrate := "False";
- bool asym_migrate := "False";
- bool noTCCdir := "False";
- int TCC_select_num_bits := 1;
-
- // To the directory
- MessageBuffer * requestToDir, network="To", virtual_network="5", vnet_type="request";
-
- // To the region buffers
- MessageBuffer * notifyToRBuffer, network="To", virtual_network="7", vnet_type="request";
- MessageBuffer * probeToRBuffer, network="To", virtual_network="8", vnet_type="request";
-
- // From the region buffers
- MessageBuffer * responseFromRBuffer, network="From", virtual_network="2", vnet_type="response";
- MessageBuffer * requestFromRegBuf, network="From", virtual_network="0", vnet_type="request";
-
- MessageBuffer * triggerQueue;
-{
-
- // States
- state_declaration(State, desc="Region states", default="RegionDir_State_NP") {
- NP, AccessPermission:Invalid, desc="Not present in region directory";
- P, AccessPermission:Invalid, desc="Region is private to owner";
- S, AccessPermission:Invalid, desc="Region is shared between CPU and GPU";
-
- P_NP, AccessPermission:Invalid, desc="Evicting the region";
- NP_P, AccessPermission:Invalid, desc="Must wait for ack from R-buf";
- NP_S, AccessPermission:Invalid, desc="Must wait for ack from R-buf";
- P_P, AccessPermission:Invalid, desc="Waiting for ack from R-buf";
- S_S, AccessPermission:Invalid, desc="Waiting for ack from R-buf";
- P_S, AccessPermission:Invalid, desc="Downgrading the region";
- S_P, AccessPermission:Invalid, desc="Upgrading the region";
- P_AS, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
- S_AP, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
- P_AP, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
-
- SP_NP_W, AccessPermission:Invalid, desc="Last sharer writing back, waiting for ack";
- S_W, AccessPermission:Invalid, desc="Sharer writing back, waiting for ack";
-
- P_AP_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
- P_AS_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
- S_AP_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
- }
-
- enumeration(Event, desc="Region directory events") {
- SendInv, desc="Send inv message to any machine that has a region buffer";
- SendUpgrade, desc="Send upgrade message to any machine that has a region buffer";
- SendDowngrade, desc="Send downgrade message to any machine that has a region buffer";
-
- Evict, desc="Evict this region";
-
- UpgradeRequest, desc="Request from r-buf for an upgrade";
- SharedRequest, desc="Request from r-buf for read";
- PrivateRequest, desc="Request from r-buf for write";
-
- InvAckCore, desc="Ack from region buffer to order the invalidate";
- InvAckCoreNoShare, desc="Ack from region buffer to order the invalidate, and it does not have the region";
- CPUPrivateAck, desc="Ack from region buffer to order private notification";
-
- LastAck, desc="Done eviciting all the blocks";
-
- StaleCleanWbRequest, desc="stale clean writeback reqeust";
- StaleCleanWbRequestNoShare, desc="stale clean wb req from a cache which should be removed from sharers";
- CleanWbRequest, desc="clean writeback reqeust, multiple sharers";
- CleanWbRequest_LastSharer, desc="clean writeback reqeust, last sharer";
- WritebackAck, desc="Writeback Ack from region buffer";
- DirReadyAck, desc="Directory is ready, waiting Ack from region buffer";
-
- TriggerInv, desc="trigger invalidate message";
- TriggerDowngrade, desc="trigger downgrade message";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- DataArrayRead, desc="Read the data array";
- DataArrayWrite, desc="Write the data array";
- TagArrayRead, desc="Read the data array";
- TagArrayWrite, desc="Write the data array";
- }
-
- structure(BoolVec, external="yes") {
- bool at(int);
- void resize(int);
- void clear();
- }
-
- structure(Entry, desc="Region entry", interface="AbstractCacheEntry") {
- Addr addr, desc="Base address of this region";
- NetDest Sharers, desc="Set of machines that are sharing, but not owners";
- State RegionState, desc="Region state";
- DataBlock DataBlk, desc="Data for the block (always empty in region dir)";
- MachineID Owner, desc="Machine which owns all blocks in this region";
- Cycles ProbeStart, desc="Time when the first probe request was issued";
- bool LastWriten, default="false", desc="The last time someone accessed this region, it wrote it";
- bool LastWritenByCpu, default="false", desc="The last time the CPU accessed this region, it wrote it";
- bool LastWritenByGpu, default="false", desc="The last time the GPU accessed this region, it wrote it";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- MachineID Owner, desc="Machine which owns all blocks in this region";
- NetDest Sharers, desc="Set of machines to send evicts";
- int NumValidBlocks, desc="Number of blocks valid so we don't have to count a BoolVec";
- bool AllAcksReceived, desc="Got all necessary acks from dir";
- CoherenceRequestType MsgType, desc="Msg type for the evicts could be inv or dwngrd";
- Cycles ProbeRequestTime, default="Cycles(0)", desc="Start of probe request";
- Cycles InitialRequestTime, default="Cycles(0)", desc="To forward back on out msg";
- Addr DemandAddress, desc="Demand address from original request";
- uint64_t probe_id, desc="probe id for lifetime profiling";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- // Stores only region addresses
- TBETable TBEs, template="<RegionDir_TBE>", constructor="m_number_of_TBEs";
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- int blockBits, default="RubySystem::getBlockSizeBits()";
- int blockBytes, default="RubySystem::getBlockSizeBytes()";
- int regionBits, default="log2(m_blocksPerRegion)";
-
- // Functions
-
- MachineID getCoreMachine(MachineID rBuf, Addr address) {
- if (machineIDToNodeID(rBuf) == cpuRegionBufferNum) {
- return createMachineID(MachineType:CorePair, intToID(0));
- } else if (machineIDToNodeID(rBuf) == gpuRegionBufferNum) {
- if (noTCCdir) {
- return mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits);
- } else {
- return createMachineID(MachineType:TCCdir, intToID(0));
- }
- } else {
- error("Unexpected region buffer number");
- }
- }
-
- bool isCpuMachine(MachineID rBuf) {
- if (machineIDToNodeID(rBuf) == cpuRegionBufferNum) {
- return true;
- } else if (machineIDToNodeID(rBuf) == gpuRegionBufferNum) {
- return false;
- } else {
- error("Unexpected region buffer number");
- }
- }
-
- bool symMigrate(Entry cache_entry) {
- return cache_entry.LastWriten;
- }
-
- bool asymMigrate(Entry cache_entry, MachineID requestor) {
- if (isCpuMachine(requestor)) {
- return cache_entry.LastWritenByCpu;
- } else {
- return cache_entry.LastWritenByGpu;
- }
- }
-
- int getRegionOffset(Addr addr) {
- if (blocksPerRegion > 1) {
- Addr offset := bitSelect(addr, blockBits, regionBits+blockBits-1);
- int ret := addressToInt(offset);
- assert(ret < blocksPerRegion);
- return ret;
- } else {
- return 0;
- }
- }
-
- Addr getRegionBase(Addr addr) {
- return maskLowOrderBits(addr, blockBits+regionBits);
- }
-
- Addr getNextBlock(Addr addr) {
- Addr a := addr;
- makeNextStrideAddress(a, 1);
- return a;
- }
-
- bool presentOrAvail(Addr addr) {
- DPRINTF(RubySlicc, "Present? %s, avail? %s\n", cacheMemory.isTagPresent(getRegionBase(addr)), cacheMemory.cacheAvail(getRegionBase(addr)));
- return cacheMemory.isTagPresent(getRegionBase(addr)) || cacheMemory.cacheAvail(getRegionBase(addr));
- }
-
- // Returns a region entry!
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", cacheMemory.lookup(getRegionBase(addr)));
- }
-
- TBE getTBE(Addr addr), return_by_pointer="yes" {
- return TBEs.lookup(getRegionBase(addr));
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- return getCacheEntry(getRegionBase(addr)).DataBlk;
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.RegionState;
- }
- return State:NP;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- if (is_valid(cache_entry)) {
- cache_entry.RegionState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := getTBE(addr);
- if(is_valid(tbe)) {
- return RegionDir_State_to_permission(tbe.TBEState);
- }
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return RegionDir_State_to_permission(cache_entry.RegionState);
- }
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(RegionDir_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- functionalMemoryRead(pkt);
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- if (functionalMemoryWrite(pkt)) {
- return 1;
- } else {
- return 0;
- }
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- cacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- cacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- cacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- cacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:DataArrayRead) {
- return cacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:DataArrayWrite) {
- return cacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:TagArrayRead) {
- return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:TagArrayWrite) {
- return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- out_port(requestNetwork_out, CPURequestMsg, requestToDir);
- out_port(notifyNetwork_out, CPURequestMsg, notifyToRBuffer);
- out_port(probeNetwork_out, NBProbeRequestMsg, probeToRBuffer);
-
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=2) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- assert(in_msg.addr == getRegionBase(in_msg.addr));
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := getTBE(in_msg.addr);
- DPRINTF(RubySlicc, "trigger msg: %s (%s)\n", in_msg, getRegionBase(in_msg.addr));
- if (in_msg.Type == TriggerType:AcksComplete) {
- assert(is_valid(tbe));
- trigger(Event:LastAck, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == TriggerType:InvRegion) {
- assert(is_valid(tbe));
- trigger(Event:TriggerInv, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == TriggerType:DowngradeRegion) {
- assert(is_valid(tbe));
- trigger(Event:TriggerDowngrade, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unknown trigger message");
- }
- }
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseFromRBuffer, rank=1) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- TBE tbe := getTBE(in_msg.addr);
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
- assert(in_msg.addr == getRegionBase(in_msg.addr));
- assert(is_valid(tbe));
- if (in_msg.NotCached) {
- trigger(Event:InvAckCoreNoShare, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:InvAckCore, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceResponseType:PrivateAck) {
- assert(in_msg.addr == getRegionBase(in_msg.addr));
- assert(is_valid(cache_entry));
- //Fix Me...add back in: assert(cache_entry.Sharers.isElement(in_msg.Sender));
- trigger(Event:CPUPrivateAck, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:RegionWbAck) {
- //Fix Me...add back in: assert(cache_entry.Sharers.isElement(in_msg.Sender) == false);
- assert(in_msg.addr == getRegionBase(in_msg.addr));
- trigger(Event:WritebackAck, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DirReadyAck) {
- assert(is_valid(tbe));
- trigger(Event:DirReadyAck, getRegionBase(in_msg.addr), cache_entry, tbe);
- } else {
- error("Invalid response type");
- }
- }
- }
- }
-
- // In from cores
- // NOTE: We get the cache / TBE entry based on the region address,
- // but pass the block address to the actions
- in_port(requestNetwork_in, CPURequestMsg, requestFromRegBuf, rank=0) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, CPURequestMsg) {
- //assert(in_msg.addr == getRegionBase(in_msg.addr));
- Addr address := getRegionBase(in_msg.addr);
- DPRINTF(RubySlicc, "Got %s, base %s\n", in_msg.addr, address);
- if (presentOrAvail(address)) {
- TBE tbe := getTBE(address);
- Entry cache_entry := getCacheEntry(address);
- if (in_msg.Type == CoherenceRequestType:PrivateRequest) {
- if (is_valid(cache_entry) && (cache_entry.Owner != in_msg.Requestor ||
- getState(tbe, cache_entry, address) == State:S)) {
- trigger(Event:SendInv, address, cache_entry, tbe);
- } else {
- trigger(Event:PrivateRequest, address, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:SharedRequest) {
- if (is_invalid(cache_entry)) {
- // If no one has ever requested this region give private permissions
- trigger(Event:PrivateRequest, address, cache_entry, tbe);
- } else {
- if (always_migrate ||
- (sym_migrate && symMigrate(cache_entry)) ||
- (asym_migrate && asymMigrate(cache_entry, in_msg.Requestor))) {
- if (cache_entry.Sharers.count() == 1 &&
- cache_entry.Sharers.isElement(in_msg.Requestor)) {
- trigger(Event:UpgradeRequest, address, cache_entry, tbe);
- } else {
- trigger(Event:SendInv, address, cache_entry, tbe);
- }
- } else { // don't migrate
- if(cache_entry.Sharers.isElement(in_msg.Requestor) ||
- getState(tbe, cache_entry, address) == State:S) {
- trigger(Event:SharedRequest, address, cache_entry, tbe);
- } else {
- trigger(Event:SendDowngrade, address, cache_entry, tbe);
- }
- }
- }
- } else if (in_msg.Type == CoherenceRequestType:UpgradeRequest) {
- if (is_invalid(cache_entry)) {
- trigger(Event:PrivateRequest, address, cache_entry, tbe);
- } else if (cache_entry.Sharers.count() == 1 && cache_entry.Sharers.isElement(in_msg.Requestor)) {
- trigger(Event:UpgradeRequest, address, cache_entry, tbe);
- } else {
- trigger(Event:SendUpgrade, address, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:CleanWbRequest) {
- if (is_invalid(cache_entry) || cache_entry.Sharers.isElement(in_msg.Requestor) == false) {
- trigger(Event:StaleCleanWbRequest, address, cache_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "wb address %s(%s) owner %s sharers %s requestor %s %d %d\n", in_msg.addr, getRegionBase(in_msg.addr), cache_entry.Owner, cache_entry.Sharers, in_msg.Requestor, cache_entry.Sharers.isElement(in_msg.Requestor), cache_entry.Sharers.count());
- if (cache_entry.Sharers.isElement(in_msg.Requestor) && cache_entry.Sharers.count() == 1) {
- DPRINTF(RubySlicc, "last wb\n");
- trigger(Event:CleanWbRequest_LastSharer, address, cache_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "clean wb\n");
- trigger(Event:CleanWbRequest, address, cache_entry, tbe);
- }
- }
- } else {
- error("unknown region dir request type");
- }
- } else {
- Addr victim := cacheMemory.cacheProbe(getRegionBase(in_msg.addr));
- TBE victim_tbe := getTBE(victim);
- Entry victim_entry := getCacheEntry(victim);
- DPRINTF(RubySlicc, "Evicting address %s for new region at address %s(%s)\n", victim, in_msg.addr, getRegionBase(in_msg.addr));
- assert(is_valid(victim_entry));
- trigger(Event:Evict, victim, victim_entry, victim_tbe);
- }
- }
- }
- }
-
- // Actions
-
- action(f_fwdReqToDir, "f", desc="Forward CPU request to directory") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
- out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
- out_msg.Type := in_msg.OriginalType;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Private := in_msg.Private;
- out_msg.NoAckNeeded := true;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ProbeRequestStartTime := curCycle();
- out_msg.DemandRequest := true;
- if (is_valid(cache_entry) && getState(tbe, cache_entry, address) != State:S) {
- out_msg.Acks := cache_entry.Sharers.count();
- } else {
- out_msg.Acks := 0;
- }
- }
- }
- }
-
- action(f_fwdReqToDirShared, "fs", desc="Forward CPU request to directory (shared)") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
- out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
- out_msg.Type := in_msg.OriginalType;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Private := in_msg.Private;
- out_msg.NoAckNeeded := true;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ProbeRequestStartTime := curCycle();
- out_msg.DemandRequest := true;
- out_msg.ForceShared := true;
- if (is_valid(cache_entry) && getState(tbe, cache_entry, address) != State:S) {
- out_msg.Acks := cache_entry.Sharers.count();
- } else {
- out_msg.Acks := 0;
- }
- }
- }
- }
-
- action(f_fwdReqToDirWithAck, "fa", desc="Forward CPU request to directory with ack request") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
- out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
- out_msg.Type := in_msg.OriginalType;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Private := in_msg.Private;
- out_msg.NoAckNeeded := false;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ProbeRequestStartTime := curCycle();
- out_msg.DemandRequest := true;
- if (is_valid(cache_entry)) {
- out_msg.Acks := cache_entry.Sharers.count();
- // Don't need an ack from the requestor!
- if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- } else {
- out_msg.Acks := 0;
- }
- }
- }
- }
-
- action(f_fwdReqToDirWithAckShared, "fas", desc="Forward CPU request to directory with ack request") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
- out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
- out_msg.Type := in_msg.OriginalType;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
- out_msg.Shared := in_msg.Shared;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Private := in_msg.Private;
- out_msg.NoAckNeeded := false;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ProbeRequestStartTime := curCycle();
- out_msg.DemandRequest := true;
- out_msg.ForceShared := true;
- if (is_valid(cache_entry)) {
- out_msg.Acks := cache_entry.Sharers.count();
- // Don't need an ack from the requestor!
- if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- } else {
- out_msg.Acks := 0;
- }
- }
- }
- }
-
- action(a_allocateRegionEntry, "a", desc="Allocate a new entry") {
- set_cache_entry(cacheMemory.allocate(getRegionBase(address), new Entry));
- peek(requestNetwork_in, CPURequestMsg) {
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- }
-
- action(d_deallocateRegionEntry, "d", desc="Deallocate region entry") {
- cacheMemory.deallocate(getRegionBase(address));
- unset_cache_entry();
- }
-
- action(ra_receiveAck, "ra", desc="Mark TBE entry as received this ack") {
- //assert(tbe.ValidBlocks.at(getRegionOffset(address)));
- DPRINTF(RubySlicc, "received ack for %s reg: %s\n", address, getRegionBase(address));
- tbe.NumValidBlocks := tbe.NumValidBlocks - 1;
- assert(tbe.NumValidBlocks >= 0);
- if (tbe.NumValidBlocks == 0) {
- tbe.AllAcksReceived := true;
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:AcksComplete;
- out_msg.addr := address;
- }
- }
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- APPEND_TRANSITION_COMMENT(" Acks left receive ");
- APPEND_TRANSITION_COMMENT(tbe.NumValidBlocks);
- }
-
- action(ca_checkAcks, "ca", desc="Check to see if we need more acks") {
- if (tbe.NumValidBlocks == 0) {
- tbe.AllAcksReceived := true;
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:AcksComplete;
- out_msg.addr := address;
- }
- }
- }
-
- action(ti_triggerInv, "ti", desc="") {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:InvRegion;
- out_msg.addr := address;
- }
- }
-
- action(td_triggerDowngrade, "td", desc="") {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.Type := TriggerType:DowngradeRegion;
- out_msg.addr := address;
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- TBEs.allocate(getRegionBase(address));
- set_tbe(getTBE(address));
- if (is_valid(cache_entry)) {
- tbe.Owner := cache_entry.Owner;
- tbe.Sharers := cache_entry.Sharers;
- tbe.AllAcksReceived := true; // assume no acks are required
- }
- tbe.ProbeRequestTime := curCycle();
- peek(requestNetwork_in, CPURequestMsg) {
- tbe.InitialRequestTime := in_msg.InitialRequestTime;
- tbe.DemandAddress := in_msg.addr;
- }
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- APPEND_TRANSITION_COMMENT(" Acks left ");
- APPEND_TRANSITION_COMMENT(tbe.NumValidBlocks);
- APPEND_TRANSITION_COMMENT(" Owner, ");
- APPEND_TRANSITION_COMMENT(tbe.Owner);
- APPEND_TRANSITION_COMMENT(" sharers, ");
- APPEND_TRANSITION_COMMENT(tbe.Sharers);
- }
-
- action(ss_setSharers, "ss", desc="Add requestor to sharers") {
- peek(requestNetwork_in, CPURequestMsg) {
- cache_entry.Sharers.add(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
- }
- }
-
- action(rs_removeSharer, "rs", desc="Remove requestor to sharers") {
- peek(requestNetwork_in, CPURequestMsg) {
- cache_entry.Sharers.remove(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" removing ");
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- APPEND_TRANSITION_COMMENT(" sharers ");
- APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
- }
- }
-
- action(rsr_removeSharerResponse, "rsr", desc="Remove requestor to sharers") {
- peek(responseNetwork_in, ResponseMsg) {
- cache_entry.Sharers.remove(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
- }
- }
-
- action(cs_clearSharers, "cs", desc="Add requestor to sharers") {
- cache_entry.Sharers.clear();
- }
-
- action(so_setOwner, "so", desc="Set the owner to the requestor") {
- peek(requestNetwork_in, CPURequestMsg) {
- cache_entry.Owner := in_msg.Requestor;
- APPEND_TRANSITION_COMMENT(" Owner now: ");
- APPEND_TRANSITION_COMMENT(cache_entry.Owner);
- }
- }
-
- action(rr_removeRequestorFromTBE, "rr", desc="Remove requestor from TBE sharers") {
- peek(requestNetwork_in, CPURequestMsg) {
- tbe.Sharers.remove(in_msg.Requestor);
- }
- }
-
- action(ur_updateDirtyStatusOnRequest, "ur", desc="Update dirty status on demand request") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (is_valid(cache_entry)) {
- if ((in_msg.Type == CoherenceRequestType:SharedRequest) &&
- (cache_entry.Sharers.isElement(in_msg.Requestor) == false)) {
- cache_entry.LastWriten := false;
- if (isCpuMachine(in_msg.Requestor)) {
- cache_entry.LastWritenByCpu := false;
- } else {
- cache_entry.LastWritenByGpu := false;
- }
- } else if ((in_msg.Type == CoherenceRequestType:PrivateRequest) ||
- (in_msg.Type == CoherenceRequestType:UpgradeRequest)) {
- cache_entry.LastWriten := true;
- if (isCpuMachine(in_msg.Requestor)) {
- cache_entry.LastWritenByCpu := true;
- } else {
- cache_entry.LastWritenByGpu := true;
- }
- }
- }
- }
- }
-
- action(ud_updateDirtyStatusWithWb, "ud", desc="Update dirty status on writeback") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (is_valid(cache_entry) && in_msg.Dirty) {
- cache_entry.LastWriten := true;
- if (isCpuMachine(in_msg.Requestor)) {
- cache_entry.LastWritenByCpu := true;
- } else {
- cache_entry.LastWritenByGpu := true;
- }
- }
- }
- }
-
- action(sns_setNumAcksSharers, "sns", desc="Set number of acks to one per shared region buffer") {
- assert(is_valid(tbe));
- assert(is_valid(cache_entry));
- tbe.NumValidBlocks := tbe.Sharers.count();
- }
-
- action(sno_setNumAcksOne, "sno", desc="Set number of acks to one per shared region buffer") {
- assert(is_valid(tbe));
- assert(is_valid(cache_entry));
- tbe.NumValidBlocks := 1;
- }
-
- action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
- TBEs.deallocate(getRegionBase(address));
- APPEND_TRANSITION_COMMENT(" reg: ");
- APPEND_TRANSITION_COMMENT(getRegionBase(address));
- unset_tbe();
- }
-
- action(wb_sendWbNotice, "wb", desc="Send notice to cache that writeback is acknowledged") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(notifyNetwork_out, CPURequestMsg, 1) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceRequestType:WbNotify;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Requestor := machineID;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- }
- }
- }
-
- action(wbn_sendWbNoticeNoAck, "wbn", desc="Send notice to cache that writeback is acknowledged (no ack needed)") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(notifyNetwork_out, CPURequestMsg, 1) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceRequestType:WbNotify;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Requestor := machineID;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.NoAckNeeded := true;
- }
- }
- }
-
- action(b_sendPrivateNotice, "b", desc="Send notice to private cache that it has private access") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(notifyNetwork_out, CPURequestMsg, 1) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceRequestType:PrivateNotify;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Requestor := machineID;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- }
- }
- }
-
- action(bs_sendSharedNotice, "bs", desc="Send notice to private cache that it has private access") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(notifyNetwork_out, CPURequestMsg, 1) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceRequestType:SharedNotify;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Requestor := machineID;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- }
- }
- }
-
- action(c_sendSharedNoticeToOrigReq, "c", desc="Send notice to private cache that it has shared access") {
- assert(is_valid(tbe));
- enqueue(notifyNetwork_out, CPURequestMsg, 1) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceRequestType:SharedNotify;
- out_msg.Destination.add(tbe.Owner);
- out_msg.Requestor := machineID;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestTime;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- APPEND_TRANSITION_COMMENT("dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
-
- action(sp_sendPrivateNoticeToOrigReq, "sp", desc="Send notice to private cache that it has private access") {
- assert(is_valid(tbe));
- enqueue(notifyNetwork_out, CPURequestMsg, 1) {
- out_msg.addr := getRegionBase(address);
- out_msg.Type := CoherenceRequestType:PrivateNotify;
- out_msg.Destination.add(tbe.Owner);
- out_msg.Requestor := machineID;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestTime;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- APPEND_TRANSITION_COMMENT("dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
-
- action(i_RegionInvNotify, "i", desc="Send notice to private cache that it no longer has private access") {
- enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.DemandAddress := tbe.DemandAddress;
- //out_msg.Requestor := tbe.Requestor;
- out_msg.Requestor := machineID;
- out_msg.Type := ProbeRequestType:PrbInv;
- //Fix me: assert(tbe.Sharers.count() > 0);
- out_msg.DemandRequest := true;
- out_msg.Destination := tbe.Sharers;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- APPEND_TRANSITION_COMMENT("dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
-
- action(i0_RegionInvNotifyDemand0, "i0", desc="Send notice to private cache that it no longer has private access") {
- enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
- out_msg.addr := address;
- // Demand address should default to 0 -> out_msg.DemandAddress := 0;
- out_msg.Requestor := machineID;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.Destination := tbe.Sharers;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- APPEND_TRANSITION_COMMENT("dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
-
- action(rd_RegionDowngrade, "rd", desc="Send notice to private cache that it only has shared access") {
- enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
- out_msg.addr := address;
- out_msg.DemandAddress := tbe.DemandAddress;
- out_msg.Requestor := machineID;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.DemandRequest := true;
- out_msg.Destination := tbe.Sharers;
- out_msg.MessageSize := MessageSizeType:Request_Control;
- APPEND_TRANSITION_COMMENT("dest: ");
- APPEND_TRANSITION_COMMENT(out_msg.Destination);
- }
- }
-
- action(p_popRequestQueue, "p", desc="Pop the request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="Pop the trigger queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="Pop the response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(s_stallAndWaitRequest, "s", desc="Stall and wait on the region address") {
- Addr regAddr := getRegionBase(address);
- stall_and_wait(requestNetwork_in, regAddr);
- }
-
- action(w_wakeUpRegionDependents, "w", desc="Wake up any requests waiting for this region") {
- wakeUpBuffers(getRegionBase(address));
- }
-
- action(wa_wakeUpAllDependents, "wa", desc="Wake up any requests waiting for this region") {
- wakeUpAllBuffers();
- }
-
- action(zz_recycleRequestQueue, "\z", desc="...") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(z_stall, "z", desc="stall request queue") {
- // fake state
- }
-
- action(mru_setMRU, "mru", desc="set MRU") {
- cacheMemory.setMRU(address);
- }
-
- // Transistions
-
- transition({NP_P, P_P, NP_S, S_S, S_P, P_S, P_NP, S_AP, P_AS, P_AP, SP_NP_W, S_W, P_AP_W, P_AS_W, S_AP_W}, {PrivateRequest, SharedRequest, UpgradeRequest, SendInv, SendUpgrade, SendDowngrade, CleanWbRequest, CleanWbRequest_LastSharer, StaleCleanWbRequest}) {
- s_stallAndWaitRequest
- }
-
- transition({NP_P, P_P, NP_S, S_S, S_P, S_W, P_S, P_NP, S_AP, P_AS, P_AP, P_AP_W, P_AS_W, S_AP_W}, Evict) {
- zz_recycleRequestQueue;
- }
-
- transition(NP, {PrivateRequest, SendUpgrade}, NP_P) {TagArrayRead, TagArrayWrite} {
- a_allocateRegionEntry;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDir;
- b_sendPrivateNotice;
- so_setOwner;
- ss_setSharers;
- t_allocateTBE;
- p_popRequestQueue;
- }
-
- transition(P, {PrivateRequest, UpgradeRequest}, P_P) {TagArrayRead} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDir;
- b_sendPrivateNotice;
- t_allocateTBE;
- p_popRequestQueue;
- }
-
- transition({NP_P, P_P}, CPUPrivateAck, P) {
- dt_deallocateTBE;
- w_wakeUpRegionDependents;
- pr_popResponseQueue;
- }
-
- transition({NP, P, S}, StaleCleanWbRequest) {TagArrayRead, TagArrayWrite} {
- wbn_sendWbNoticeNoAck;
- ud_updateDirtyStatusWithWb;
- p_popRequestQueue;
- }
-
- transition(NP, SharedRequest, NP_S) {TagArrayRead, TagArrayWrite} {
- a_allocateRegionEntry;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDirShared;
- bs_sendSharedNotice;
- so_setOwner;
- ss_setSharers;
- t_allocateTBE;
- p_popRequestQueue;
- }
-
- // Could probably do this in parallel with other shared requests
- transition(S, SharedRequest, S_S) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDirShared;
- bs_sendSharedNotice;
- ss_setSharers;
- t_allocateTBE;
- p_popRequestQueue;
- }
-
- transition({P, S}, CleanWbRequest_LastSharer, SP_NP_W) {TagArrayRead, TagArrayWrite} {
- ud_updateDirtyStatusWithWb;
- wb_sendWbNotice;
- rs_removeSharer;
- t_allocateTBE;
- d_deallocateRegionEntry;
- p_popRequestQueue;
- }
-
- transition(S, CleanWbRequest, S_W) {TagArrayRead, TagArrayWrite} {
- ud_updateDirtyStatusWithWb;
- wb_sendWbNotice;
- rs_removeSharer;
- t_allocateTBE;
- p_popRequestQueue;
- }
-
- transition(SP_NP_W, WritebackAck, NP) {
- dt_deallocateTBE;
- w_wakeUpRegionDependents;
- pr_popResponseQueue;
- }
-
- transition(S_W, WritebackAck, S) {
- dt_deallocateTBE;
- w_wakeUpRegionDependents;
- pr_popResponseQueue;
- }
-
- transition({NP_S, S_S}, CPUPrivateAck, S) {
- dt_deallocateTBE;
- w_wakeUpRegionDependents;
- pr_popResponseQueue;
- }
-
- transition(S, UpgradeRequest, S_P) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDir;
- b_sendPrivateNotice;
- so_setOwner;
- t_allocateTBE;
- p_popRequestQueue;
- }
-
- transition(S_P, CPUPrivateAck, P) {
- dt_deallocateTBE;
- w_wakeUpRegionDependents;
- pr_popResponseQueue;
- }
-
- transition(P, SendInv, P_AP_W) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDirWithAck;
- so_setOwner;
- t_allocateTBE;
- rr_removeRequestorFromTBE;
- sns_setNumAcksSharers;
- cs_clearSharers;
- ss_setSharers;
- //i_RegionInvNotify;
- p_popRequestQueue;
- }
-
- transition({P_AP_W, S_AP_W}, DirReadyAck) {
- ti_triggerInv;
- pr_popResponseQueue;
- }
-
- transition(P_AS_W, DirReadyAck) {
- td_triggerDowngrade;
- pr_popResponseQueue;
- }
-
- transition(P_AS_W, TriggerDowngrade, P_AS) {
- rd_RegionDowngrade;
- pt_popTriggerQueue;
- }
-
- transition(P_AP_W, TriggerInv, P_AP) {
- i_RegionInvNotify;
- pt_popTriggerQueue;
- }
-
- transition(S_AP_W, TriggerInv, S_AP) {
- i_RegionInvNotify;
- pt_popTriggerQueue;
- }
-
- transition(P, SendUpgrade, P_AP_W) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDirWithAck;
- so_setOwner;
- t_allocateTBE;
- rr_removeRequestorFromTBE;
- sns_setNumAcksSharers;
- cs_clearSharers;
- ss_setSharers;
- p_popRequestQueue;
- }
-
- transition(P, Evict, P_NP) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- sns_setNumAcksSharers;
- i0_RegionInvNotifyDemand0;
- d_deallocateRegionEntry;
- }
-
- transition(S, SendInv, P_AP_W) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDirWithAck;
- so_setOwner;
- t_allocateTBE;
- rr_removeRequestorFromTBE;
- sns_setNumAcksSharers;
- cs_clearSharers;
- ss_setSharers;
- p_popRequestQueue;
- }
-
- transition(S, Evict, P_NP) {TagArrayRead, TagArrayWrite} {
- t_allocateTBE;
- sns_setNumAcksSharers;
- i0_RegionInvNotifyDemand0;
- d_deallocateRegionEntry;
- }
-
- transition(P_NP, LastAck, NP) {
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- pt_popTriggerQueue;
- }
-
- transition(S, SendUpgrade, S_AP_W) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDirWithAck;
- so_setOwner;
- t_allocateTBE;
- rr_removeRequestorFromTBE;
- sns_setNumAcksSharers;
- cs_clearSharers;
- ss_setSharers;
- p_popRequestQueue;
- }
-
- transition(S_AP, LastAck, S_P) {
- sp_sendPrivateNoticeToOrigReq;
- pt_popTriggerQueue;
- }
-
- transition(P_AP, LastAck, P_P) {
- sp_sendPrivateNoticeToOrigReq;
- pt_popTriggerQueue;
- }
-
- transition(P, SendDowngrade, P_AS_W) {TagArrayRead, TagArrayWrite} {
- mru_setMRU;
- ur_updateDirtyStatusOnRequest;
- f_fwdReqToDirWithAckShared;
- so_setOwner;
- t_allocateTBE;
- sns_setNumAcksSharers;
- ss_setSharers; //why do we set the sharers before sending the downgrade? Are we sending a downgrade to the requestor?
- p_popRequestQueue;
- }
-
- transition(P_AS, LastAck, P_S) {
- c_sendSharedNoticeToOrigReq;
- pt_popTriggerQueue;
- }
-
- transition(P_S, CPUPrivateAck, S) {
- dt_deallocateTBE;
- w_wakeUpRegionDependents;
- pr_popResponseQueue;
- }
-
- transition({P_NP, P_AS, S_AP, P_AP}, InvAckCore) {} {
- ra_receiveAck;
- pr_popResponseQueue;
- }
-
- transition({P_NP, S_AP, P_AP}, InvAckCoreNoShare) {} {
- ra_receiveAck;
- pr_popResponseQueue;
- }
-
- transition(P_AS, InvAckCoreNoShare) {} {
- ra_receiveAck;
- rsr_removeSharerResponse;
- pr_popResponseQueue;
- }
-
-}
-
-
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-machine(MachineType:Directory, "AMD Baseline protocol")
-: DirectoryMemory * directory;
- CacheMemory * L3CacheMemory;
- Cycles response_latency := 5;
- Cycles l3_hit_latency := 50;
- bool noTCCdir := "False";
- bool CPUonly := "False";
- int TCC_select_num_bits;
- bool useL3OnWT := "False";
- Cycles to_memory_controller_latency := 1;
-
- // From the Cores
- MessageBuffer * requestFromCores, network="From", virtual_network="0", vnet_type="request";
- MessageBuffer * responseFromCores, network="From", virtual_network="2", vnet_type="response";
- MessageBuffer * unblockFromCores, network="From", virtual_network="4", vnet_type="unblock";
-
- MessageBuffer * probeToCore, network="To", virtual_network="0", vnet_type="request";
- MessageBuffer * responseToCore, network="To", virtual_network="2", vnet_type="response";
-
- MessageBuffer * triggerQueue;
- MessageBuffer * L3triggerQueue;
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_U") {
- U, AccessPermission:Backing_Store, desc="unblocked";
- BL, AccessPermission:Busy, desc="got L3 WB request";
- // BL is Busy because it's possible for the data only to be in the network
- // in the WB, L3 has sent it and gone on with its business in possibly I
- // state.
- BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
- BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- // CPU requests
- RdBlkS, desc="...";
- RdBlkM, desc="...";
- RdBlk, desc="...";
- CtoD, desc="...";
- WriteThrough, desc="WriteThrough Message";
- Atomic, desc="Atomic Message";
-
- // writebacks
- VicDirty, desc="...";
- VicClean, desc="...";
- CPUData, desc="WB data from CPU";
- StaleWB, desc="Notification that WB has been superceded by a probe";
-
- // probe responses
- CPUPrbResp, desc="Probe Response Msg";
-
- ProbeAcksComplete, desc="Probe Acks Complete";
-
- L3Hit, desc="Hit in L3 return data to core";
-
- // Memory Controller
- MemData, desc="Fetched data from memory arrives";
- WBAck, desc="Writeback Ack from memory arrives";
-
- CoreUnblock, desc="Core received data, unblock";
- UnblockWriteThrough, desc="Unblock because of writethrough request finishing";
-
- StaleVicDirty, desc="Core invalidated before VicDirty processed";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- L3DataArrayRead, desc="Read the data array";
- L3DataArrayWrite, desc="Write the data array";
- L3TagArrayRead, desc="Read the data array";
- L3TagArrayWrite, desc="Write the data array";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- DataBlock DataBlk, desc="data for the block";
- NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore";
- }
-
- structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
- DataBlock DataBlk, desc="data for the block";
- MachineID LastSender, desc="Mach which this block came from";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, desc="Is the data dirty?";
- int NumPendingAcks, desc="num acks expected";
- MachineID OriginalRequestor, desc="Original Requestor";
- MachineID WTRequestor, desc="WT Requestor";
- bool Cached, desc="data hit in Cache";
- bool MemData, desc="Got MemData?",default="false";
- bool wtData, desc="Got write through data?",default="false";
- bool atomicData, desc="Got Atomic op?",default="false";
- Cycles InitialRequestTime, desc="...";
- Cycles ForwardRequestTime, desc="...";
- Cycles ProbeRequestStartTime, desc="...";
- MachineID LastSender, desc="Mach which this block came from";
- bool L3Hit, default="false", desc="Was this an L3 hit?";
- uint64_t probe_id, desc="probe id for lifetime profiling";
- WriteMask writeMask, desc="outstanding write through mask";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
-
- if (is_valid(dir_entry)) {
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if (is_valid(tbe) && tbe.MemData) {
- DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
- return tbe.DataBlk;
- }
- DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
- return getDirectoryEntry(addr).DataBlk;
- }
-
- State getState(TBE tbe, CacheEntry entry, Addr addr) {
- return getDirectoryEntry(addr).DirectoryState;
- }
-
- void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
- getDirectoryEntry(addr).DirectoryState := state;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes
- + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- // For this Directory, all permissions are just tracked in Directory, since
- // it's not possible to have something in TBE but not Dir, just keep track
- // of state all in one place.
- if (directory.isPresent(addr)) {
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(CacheEntry entry, Addr addr, State state) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L3DataArrayRead) {
- L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L3DataArrayWrite) {
- L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L3TagArrayRead) {
- L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L3TagArrayWrite) {
- L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L3DataArrayRead) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L3DataArrayWrite) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L3TagArrayRead) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L3TagArrayWrite) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- // ** OUT_PORTS **
- out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
- out_port(responseNetwork_out, ResponseMsg, responseToCore);
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
-
- // ** IN_PORTS **
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == TriggerType:AcksComplete) {
- trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
- }else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
- trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
- } else {
- error("Unknown trigger msg");
- }
- }
- }
- }
-
- in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=4) {
- if (L3TriggerQueue_in.isReady(clockEdge())) {
- peek(L3TriggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == TriggerType:L3Hit) {
- trigger(Event:L3Hit, in_msg.addr, entry, tbe);
- } else {
- error("Unknown trigger msg");
- }
- }
- }
- }
-
- // Unblock Network
- in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=3) {
- if (unblockNetwork_in.isReady(clockEdge())) {
- peek(unblockNetwork_in, UnblockMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
- }
- }
- }
-
- // Core response network
- in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=2) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
- trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:CPUData) {
- trigger(Event:CPUData, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
- trigger(Event:StaleWB, in_msg.addr, entry, tbe);
- } else {
- error("Unexpected response type");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=1) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:MemData, in_msg.addr, entry, tbe);
- DPRINTF(RubySlicc, "%s\n", in_msg);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
- trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:Atomic) {
- trigger(Event:Atomic, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
- } else {
- DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
- trigger(Event:VicDirty, in_msg.addr, entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
- } else {
- DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
- trigger(Event:VicClean, in_msg.addr, entry, tbe);
- }
- } else {
- error("Bad request message type");
- }
- }
- }
- }
-
- // Actions
- action(s_sendResponseS, "s", desc="send Shared response") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Cached) {
- out_msg.State := CoherenceState:Shared;
- } else {
- out_msg.State := CoherenceState:Exclusive;
- }
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(m_sendResponseM, "m", desc="send Modified response") {
- if (tbe.wtData) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:UnblockWriteThrough;
- }
- }else{
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := false;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- if(tbe.atomicData){
- out_msg.WTRequestor := tbe.WTRequestor;
- }
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- if (tbe.atomicData) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:UnblockWriteThrough;
- }
- }
- }
- }
-
- action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := true;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(w_sendResponseWBAck, "w", desc="send WB Ack") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysWBAck;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(l_queueMemWBReq, "lq", desc="Write WB data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- queueMemoryWrite(machineID, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
-
- action(l_queueMemRdReq, "lr", desc="Read data from memory") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (L3CacheMemory.isTagPresent(address)) {
- enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- if (tbe.Dirty == false) {
- tbe.DataBlk := entry.DataBlk;
- }
- tbe.LastSender := entry.LastSender;
- tbe.L3Hit := true;
- tbe.MemData := true;
- L3CacheMemory.deallocate(address);
- } else {
- queueMemoryRead(machineID, address, to_memory_controller_latency);
- }
- }
- }
-
- action(dc_probeInvCoreData, "dc", desc="probe inv cores, return data") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
-
- // add relevant TCC node to list. This replaces all TCPs and SQCs
- if (((in_msg.Type == CoherenceRequestType:WriteThrough ||
- in_msg.Type == CoherenceRequestType:Atomic) &&
- in_msg.NoWriteConflict) ||
- CPUonly) {
- } else if (noTCCdir) {
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- } else {
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- }
- out_msg.Destination.remove(in_msg.Requestor);
- tbe.NumPendingAcks := out_msg.Destination.count();
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- DPRINTF(RubySlicc, "%s\n", out_msg);
- APPEND_TRANSITION_COMMENT(" dc: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
- peek(requestNetwork_in, CPURequestMsg) { // not the right network?
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
- // add relevant TCC node to the list. This replaces all TCPs and SQCs
- if (noTCCdir || CPUonly) {
- //Don't need to notify TCC about reads
- } else {
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:TCCdir,
- TCC_select_low_bit, TCC_select_num_bits));
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- }
- if (noTCCdir && !CPUonly) {
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- }
- out_msg.Destination.remove(in_msg.Requestor);
- tbe.NumPendingAcks := out_msg.Destination.count();
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
- peek(requestNetwork_in, CPURequestMsg) { // not the right network?
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := false;
- out_msg.MessageSize := MessageSizeType:Control;
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
-
- // add relevant TCC node to the list. This replaces all TCPs and SQCs
- if (noTCCdir && !CPUonly) {
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- } else {
- if (!noTCCdir) {
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:TCCdir,
- TCC_select_low_bit,
- TCC_select_num_bits));
- }
- }
- out_msg.Destination.remove(in_msg.Requestor);
- tbe.NumPendingAcks := out_msg.Destination.count();
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(d_writeDataToMemory, "d", desc="Write data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
- if (tbe.Dirty == false) {
- // have to update the TBE, too, because of how this
- // directory deals with functional writes
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- peek(requestNetwork_in, CPURequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.wtData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- if (in_msg.Type == CoherenceRequestType:Atomic) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.atomicData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
- tbe.Dirty := false;
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
- tbe.Dirty := true;
- }
- tbe.OriginalRequestor := in_msg.Requestor;
- tbe.NumPendingAcks := 0;
- tbe.Cached := in_msg.ForceShared;
- tbe.InitialRequestTime := in_msg.InitialRequestTime;
- }
- }
-
- action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
- if (tbe.Dirty == false) {
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- }
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(wd_writeBackData, "wd", desc="Write back data if needed") {
- if (tbe.wtData) {
- getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.writeMask);
- } else if (tbe.atomicData) {
- tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,tbe.writeMask);
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- } else if (tbe.Dirty == false) {
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- }
- }
-
- action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
- peek(memQueue_in, MemoryMsg) {
- if (tbe.wtData == true) {
- // do nothing
- } else if (tbe.Dirty == false) {
- tbe.DataBlk := getDirectoryEntry(address).DataBlk;
- }
- tbe.MemData := true;
- }
- }
-
- action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty) {
- if (tbe.wtData) {
- DataBlock tmp := in_msg.DataBlk;
- tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
- tbe.DataBlk := tmp;
- tbe.writeMask.fillMask();
- } else if (tbe.Dirty) {
- if(tbe.atomicData == false && tbe.wtData == false) {
- DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
- assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
- }
- } else {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- tbe.LastSender := in_msg.Sender;
- }
- }
- if (in_msg.Hit) {
- tbe.Cached := true;
- }
- }
- }
-
- action(mwc_markSinkWriteCancel, "mwc", desc="Mark to sink impending VicDirty") {
- peek(responseNetwork_in, ResponseMsg) {
- getDirectoryEntry(address).VicDirtyIgnore.add(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" setting bit to sink VicDirty ");
- }
- }
-
- action(x_decrementAcks, "x", desc="decrement Acks pending") {
- tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
- APPEND_TRANSITION_COMMENT(" Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(o_checkForCompletion, "o", desc="check for ack completion") {
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
- peek(requestNetwork_in, CPURequestMsg) {
- getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
- }
- }
-
- action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
- peek(responseNetwork_in, ResponseMsg) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
- entry.DataBlk := in_msg.DataBlk;
- entry.LastSender := in_msg.Sender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
- entry.DataBlk := in_msg.DataBlk;
-
- entry.LastSender := in_msg.Sender;
- }
- }
- }
-
- action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
- if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- }
- }
- }
-
- action(sf_setForwardReqTime, "sf", desc="...") {
- tbe.ForwardRequestTime := curCycle();
- }
-
- action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
- L3CacheMemory.deallocate(address);
- }
-
- action(p_popRequestQueue, "p", desc="pop request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(pm_popMemQueue, "pm", desc="pop mem queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
- L3TriggerQueue_in.dequeue(clockEdge());
- }
-
- action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
- unblockNetwork_in.dequeue(clockEdge());
- }
-
- action(zz_recycleRequestQueue, "zz", desc="recycle request queue") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
- responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
- stall_and_wait(requestNetwork_in, address);
- }
-
- action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
- wakeUpBuffers(address);
- }
-
- action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
- wakeUpAllBuffers();
- }
-
- action(z_stall, "z", desc="...") {
- }
-
- // TRANSITIONS
- transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {RdBlkS, RdBlkM, RdBlk, CtoD}) {
- st_stallAndWaitRequest;
- }
-
- // It may be possible to save multiple invalidations here!
- transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {Atomic, WriteThrough}) {
- st_stallAndWaitRequest;
- }
-
-
- // transitions from U
- transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead} {
- t_allocateTBE;
- l_queueMemRdReq;
- sc_probeShrCoreData;
- p_popRequestQueue;
- }
-
- transition(U, WriteThrough, BM_PM) {L3TagArrayRead, L3TagArrayWrite} {
- t_allocateTBE;
- w_sendResponseWBAck;
- l_queueMemRdReq;
- dc_probeInvCoreData;
- p_popRequestQueue;
- }
-
- transition(U, Atomic, BM_PM) {L3TagArrayRead, L3TagArrayWrite} {
- t_allocateTBE;
- l_queueMemRdReq;
- dc_probeInvCoreData;
- p_popRequestQueue;
- }
-
- transition(U, {RdBlkM}, BM_PM) {L3TagArrayRead} {
- t_allocateTBE;
- l_queueMemRdReq;
- dc_probeInvCoreData;
- p_popRequestQueue;
- }
-
- transition(U, RdBlk, B_PM) {L3TagArrayRead}{
- t_allocateTBE;
- l_queueMemRdReq;
- sc_probeShrCoreData;
- p_popRequestQueue;
- }
-
- transition(U, CtoD, BP) {L3TagArrayRead} {
- t_allocateTBE;
- ic_probeInvCore;
- p_popRequestQueue;
- }
-
- transition(U, VicDirty, BL) {L3TagArrayRead} {
- t_allocateTBE;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(U, VicClean, BL) {L3TagArrayRead} {
- t_allocateTBE;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition(BL, {VicDirty, VicClean}) {
- zz_recycleRequestQueue;
- }
-
- transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
- d_writeDataToMemory;
- al_allocateL3Block;
- wa_wakeUpDependents;
- dt_deallocateTBE;
- pr_popResponseQueue;
- }
-
- transition(BL, StaleWB, U) {L3TagArrayWrite} {
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- pr_popResponseQueue;
- }
-
- transition({B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm}, {VicDirty, VicClean}) {
- z_stall;
- }
-
- transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, WBAck) {
- pm_popMemQueue;
- }
-
- transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, StaleVicDirty) {
- rv_removeVicDirtyIgnore;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition({B}, CoreUnblock, U) {
- wa_wakeUpDependents;
- pu_popUnblockQueue;
- }
-
- transition(B, UnblockWriteThrough, U) {
- wa_wakeUpDependents;
- pt_popTriggerQueue;
- }
-
- transition(BS_PM, MemData, BS_Pm) {} {
- mt_writeMemDataToTBE;
- pm_popMemQueue;
- }
-
- transition(BM_PM, MemData, BM_Pm){} {
- mt_writeMemDataToTBE;
- pm_popMemQueue;
- }
-
- transition(B_PM, MemData, B_Pm){} {
- mt_writeMemDataToTBE;
- pm_popMemQueue;
- }
-
- transition(BS_PM, L3Hit, BS_Pm) {} {
- ptl_popTriggerQueue;
- }
-
- transition(BM_PM, L3Hit, BM_Pm) {} {
- ptl_popTriggerQueue;
- }
-
- transition(B_PM, L3Hit, B_Pm) {} {
- ptl_popTriggerQueue;
- }
-
- transition(BS_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition(BM_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition(B_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BP}, CPUPrbResp) {
- y_writeProbeDataToTBE;
- x_decrementAcks;
- o_checkForCompletion;
- pr_popResponseQueue;
- }
-
- transition(BS_PM, ProbeAcksComplete, BS_M) {} {
- sf_setForwardReqTime;
- pt_popTriggerQueue;
- }
-
- transition(BM_PM, ProbeAcksComplete, BM_M) {} {
- sf_setForwardReqTime;
- pt_popTriggerQueue;
- }
-
- transition(B_PM, ProbeAcksComplete, B_M){} {
- sf_setForwardReqTime;
- pt_popTriggerQueue;
- }
-
- transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- c_sendResponseCtoD;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu
- */
-
-
-enumeration(CoherenceRequestType, desc="Coherence Request Types") {
- // CPU Request Types ONLY
- RdBlk, desc="Read Blk";
- RdBlkM, desc="Read Blk Modified";
- RdBlkS, desc="Read Blk Shared";
- CtoD, desc="Change To Dirty";
- VicClean, desc="L2 clean eviction";
- VicDirty, desc="L2 dirty eviction";
- Atomic, desc="Upper level atomic";
- AtomicWriteBack, desc="Upper level atomic";
- WriteThrough, desc="Ordered WriteThrough w/Data";
- WriteThroughFifo, desc="WriteThrough with no data";
- WriteThroughDummy, desc="WriteThrough with no data for atomic operation";
- WriteFlush, desc="Release Flush";
-
- WrCancel, desc="want to cancel WB to Memory"; // should this be here?
-
- WBApproval, desc="WB Approval";
-
- // Messages between Dir and R-Dir
- ForceInv, desc="Send invalide to the block";
- ForceDowngrade, desc="Send downgrade to the block";
- Unblock, desc="Used to let the dir know a message has been sunk";
-
- // Messages between R-Dir and R-Buffer
- PrivateNotify, desc="Let region buffer know it has private access";
- SharedNotify, desc="Let region buffer know it has shared access";
- WbNotify, desc="Let region buffer know it saw its wb request";
- Downgrade, desc="Force the region buffer to downgrade to shared";
- // Response to R-Dir (probably should be on a different network, but
- // I need it to be ordered with respect to requests)
- InvAck, desc="Let the R-Dir know when the inv has occured";
-
- PrivateRequest, desc="R-buf wants the region in private";
- UpgradeRequest, desc="R-buf wants the region in private";
- SharedRequest, desc="R-buf wants the region in shared (could respond with private)";
- CleanWbRequest, desc="R-buf wants to deallocate clean region";
-
- NA, desc="So we don't get segfaults";
-}
-
-enumeration(ProbeRequestType, desc="Probe Request Types") {
- PrbDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
- PrbInv, desc="Probe to Invalidate";
-
- // For regions
- PrbRepl, desc="Force the cache to do a replacement";
- PrbRegDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
- PrbAtomic, desc="Forwarded Atomic Operation";
-}
-
-
-enumeration(CoherenceResponseType, desc="Coherence Response Types") {
- NBSysResp, desc="Northbridge response to CPU Rd request";
- NBSysWBAck, desc="Northbridge response ok to WB";
- TDSysResp, desc="TCCdirectory response to CPU Rd request";
- TDSysWBAck, desc="TCCdirectory response ok to WB";
- TDSysWBNack, desc="TCCdirectory response ok to drop";
- CPUPrbResp, desc="CPU Probe Response";
- CPUData, desc="CPU Data";
- StaleNotif, desc="Notification of Stale WBAck, No data to writeback";
- CPUCancelWB, desc="want to cancel WB to Memory";
- MemData, desc="Data from Memory";
-
- // for regions
- PrivateAck, desc="Ack that r-buf received private notify";
- RegionWbAck, desc="Writeback Ack that r-buf completed deallocation";
- DirReadyAck, desc="Directory (mem ctrl)<->region dir handshake";
-}
-
-enumeration(CoherenceState, default="CoherenceState_NA", desc="Coherence State") {
- Modified, desc="Modified";
- Owned, desc="Owned state";
- Exclusive, desc="Exclusive";
- Shared, desc="Shared";
- NA, desc="NA";
-}
-
-structure(CPURequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- Addr DemandAddress, desc="Physical block address for this request";
- CoherenceRequestType Type, desc="Type of request";
- DataBlock DataBlk, desc="data for the cache line"; // only for WB
- bool Dirty, desc="whether WB data is dirty"; // only for WB
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Multicast destination mask";
- bool Shared, desc="For CPU_WrVicBlk, vic is O not M. For CPU_ClVicBlk, vic is S";
- MessageSizeType MessageSize, desc="size category of the message";
- Cycles InitialRequestTime, desc="time the initial requests was sent from the L1Cache";
- Cycles ForwardRequestTime, desc="time the dir forwarded the request";
- Cycles ProbeRequestStartTime, desc="the time the dir started the probe request";
- bool DemandRequest, default="false", desc="For profiling purposes";
-
- NetDest Sharers, desc="Caches that may have a valid copy of the data";
- bool ForceShared, desc="R-dir knows it is shared, pass on so it sends an S copy, not E";
- bool Private, default="false", desc="Requestor already has private permissions, no need for dir check";
- bool CtoDSinked, default="false", desc="This is true if the CtoD previously sent must have been sunk";
-
- bool NoAckNeeded, default="false", desc="True if region buffer doesn't need to ack";
- int Acks, default="0", desc="Acks that the dir (mem ctrl) should expect to receive";
- CoherenceRequestType OriginalType, default="CoherenceRequestType_NA", desc="Type of request from core fwded through region buffer";
- WriteMask writeMask, desc="Write Through Data";
- MachineID WTRequestor, desc="Node who initiated the write through";
- HSAScope scope, default="HSAScope_SYSTEM", desc="Request Scope";
- int wfid, default="0", desc="wavefront id";
- bool NoWriteConflict, default="true", desc="write collided with CAB entry";
- int ProgramCounter, desc="PC that accesses to this block";
-
- bool functionalRead(Packet *pkt) {
- // Only PUTX messages contains the data block
- if (Type == CoherenceRequestType:VicDirty) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-structure(NBProbeRequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- ProbeRequestType Type, desc="NB_PrbNxtState signal";
- bool ReturnData, desc="Indicates CPU should return data";
- NetDest Destination, desc="Node to whom the data is sent";
- MessageSizeType MessageSize, desc="size category of the message";
- bool DemandRequest, default="false", desc="demand request, requesting 3-hop transfer";
- Addr DemandAddress, desc="Demand block address for a region request";
- MachineID Requestor, desc="Requestor id for 3-hop requests";
- bool NoAckNeeded, default="false", desc="For short circuting acks";
- int ProgramCounter, desc="PC that accesses to this block";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-
-}
-
-structure(TDProbeRequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- ProbeRequestType Type, desc="TD_PrbNxtState signal";
- bool ReturnData, desc="Indicates CPU should return data";
- bool localCtoD, desc="Indicates CtoD is within the GPU hierarchy (aka TCC subtree)";
- NetDest Destination, desc="Node to whom the data is sent";
- MessageSizeType MessageSize, desc="size category of the message";
- int Phase, desc="Synchronization Phase";
- int wfid, desc="wavefront id for Release";
- MachineID Requestor, desc="Node who initiated the request";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-}
-
-// Response Messages seemed to be easily munged into one type
-structure(ResponseMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceResponseType Type, desc="NB Sys Resp or CPU Response to Probe";
- MachineID Sender, desc="Node who sent the data";
- NetDest Destination, desc="Node to whom the data is sent";
- // Begin Used Only By CPU Response
- DataBlock DataBlk, desc="data for the cache line";
- bool Hit, desc="probe hit valid line";
- bool Shared, desc="True if S, or if NB Probe ReturnData==1 && O";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- bool Ntsl, desc="indicates probed lin will be invalid after probe";
- bool UntransferredOwner, desc="pending confirmation of ownership change";
- // End Used Only By CPU Response
-
- // Begin NB Response Only
- CoherenceState State, default=CoherenceState_NA, desc="What returned data from NB should be in";
- bool CtoD, desc="was the originator a CtoD?";
- // End NB Response Only
-
- // Normally if a block gets hit by a probe while waiting to be written back,
- // you flip the NbReqShared signal (part of the CPURequest signal group).
- // But since this is in packets and I don't want to send a separate packet,
- // let's just send this signal back with the data instead
- bool NbReqShared, desc="modification of Shared field from initial request, e.g. hit by shared probe";
-
- MessageSizeType MessageSize, desc="size category of the message";
- Cycles InitialRequestTime, desc="time the initial requests was sent from the L1Cache";
- Cycles ForwardRequestTime, desc="time the dir forwarded the request";
- Cycles ProbeRequestStartTime, desc="the time the dir started the probe request";
- bool DemandRequest, default="false", desc="For profiling purposes";
-
- bool L3Hit, default="false", desc="Did memory or L3 supply the data?";
- MachineID OriginalResponder, desc="Mach which wrote the data to the L3";
- MachineID WTRequestor, desc="Node who started the writethrough";
-
- bool NotCached, default="false", desc="True when the Region buffer has already evicted the line";
-
- bool NoAckNeeded, default="false", desc="For short circuting acks";
- bool isValid, default="false", desc="Is acked block valid";
- int wfid, default="0", desc="wavefront id";
- int Phase, desc="Synchronization Phase";
-
- int ProgramCounter, desc="PC that issues this request";
- bool mispred, desc="tell TCP if the block should not be bypassed";
-
-
- bool functionalRead(Packet *pkt) {
- // Only PUTX messages contains the data block
- if (Type == CoherenceResponseType:CPUData ||
- Type == CoherenceResponseType:MemData) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-structure(UnblockMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- NetDest Destination, desc="Destination (always directory)";
- MessageSizeType MessageSize, desc="size category of the message";
- MachineID Sender, desc="Node who sent the data";
- bool currentOwner, default="false", desc="Is the sender the current owner";
- bool DoneAck, default="false", desc="Is this a done ack?";
- bool Dirty, default="false", desc="Was block dirty when evicted";
- bool wasValid, default="false", desc="Was block valid when evicted";
- bool valid, default="false", desc="Is block valid";
- bool validToInvalid, default="false", desc="Was block valid when evicted";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-}
-
-enumeration(TriggerType, desc="Trigger Type") {
- L2_to_L1, desc="L2 to L1 fill";
- AcksComplete, desc="NB received all needed Acks";
-
- // For regions
- InvNext, desc="Invalidate the next block";
- PrivateAck, desc="Loopback ack for machines with no Region Buffer";
- AllOutstanding, desc="All outstanding requests have finished";
- L3Hit, desc="L3 hit in dir";
-
- // For region directory once the directory is blocked
- InvRegion, desc="Invalidate region";
- DowngradeRegion, desc="downgrade region";
- //For writethrough
- UnblockWriteThrough, desc="unblock";
- WriteData, desc="Write to full cacheblock data";
- WriteDone, desc="Sequencer says that write is done";
- AtomicDone, desc="Atomic is done";
-}
-
-enumeration(CacheId, desc="Which Cache in the Core") {
- L1I, desc="L1 I-cache";
- L1D0, desc="L1 D-cache cluster 0";
- L1D1, desc="L1 D-cache cluster 1";
- NA, desc="Default";
-}
-
-structure(TriggerMsg, desc="...", interface="Message") {
- Addr addr, desc="Address";
- TriggerType Type, desc="Type of trigger";
- CacheId Dest, default="CacheId_NA", desc="Cache to invalidate";
- int ProgramCounter, desc="PC that accesses to this block";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-
-}
-
-enumeration(FifoType, desc="Fifo Type") {
- WriteDummy, desc="Dummy Write for atomic operation";
- WriteThrough, desc="simple writethrough request";
- WriteFlush, desc="synchronization message";
-}
-
-structure(FifoMsg, desc="...", interface="Message") {
- Addr addr, desc="Address";
- FifoType Type, desc="WriteThrough/WriteFlush";
- int wfid, default="0",desc="wavefront id";
- MachineID Requestor, desc="Flush Requestor";
- MachineID oRequestor, desc="original Flush Requestor";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check on message type required since the protocol should
- // read data from those messages that contain the block
- return false;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * For use for simulation and test purposes only
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Lisa Hsu,
- * Sooraj Puthoor
- */
-
-/*
- * This file is based on MOESI_AMD_Base.sm
- * Differences with AMD base protocol
- * -- Uses a probe filter memory to track sharers.
- * -- The probe filter can be inclusive or non-inclusive
- * -- Only two sharers tracked. Sharers are a) GPU or/and b) CPU
- * -- If sharer information available, the sharer is probed
- * -- If sharer information not available, probes are broadcasted
- */
-
-machine(MachineType:Directory, "AMD Baseline protocol")
-: DirectoryMemory * directory;
- CacheMemory * L3CacheMemory;
- CacheMemory * ProbeFilterMemory;
- Cycles response_latency := 5;
- Cycles l3_hit_latency := 50;
- bool noTCCdir := "False";
- bool CAB_TCC := "False";
- int TCC_select_num_bits:=1;
- bool useL3OnWT := "False";
- bool inclusiveDir := "True";
- Cycles to_memory_controller_latency := 1;
-
- // From the Cores
- MessageBuffer * requestFromCores, network="From", virtual_network="0", ordered="false", vnet_type="request";
- MessageBuffer * responseFromCores, network="From", virtual_network="2", ordered="false", vnet_type="response";
- MessageBuffer * unblockFromCores, network="From", virtual_network="4", ordered="false", vnet_type="unblock";
-
- MessageBuffer * probeToCore, network="To", virtual_network="0", ordered="false", vnet_type="request";
- MessageBuffer * responseToCore, network="To", virtual_network="2", ordered="false", vnet_type="response";
-
- MessageBuffer * triggerQueue, ordered="true";
- MessageBuffer * L3triggerQueue, ordered="true";
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_U") {
- U, AccessPermission:Backing_Store, desc="unblocked";
- BL, AccessPermission:Busy, desc="got L3 WB request";
- // BL is Busy because it is busy waiting for the data
- // which is possibly in the network. The cache which evicted the data
- // might have moved to some other state after doing the eviction
- // BS==> Received a read request; has not requested ownership
- // B==> Received a read request; has requested ownership
- // BM==> Received a modification request
- B_P, AccessPermission:Backing_Store, desc="Back invalidation, waiting for probes";
- BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
- BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
- BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
- BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
- B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- // CPU requests
- RdBlkS, desc="...";
- RdBlkM, desc="...";
- RdBlk, desc="...";
- CtoD, desc="...";
- WriteThrough, desc="WriteThrough Message";
- Atomic, desc="Atomic Message";
-
- // writebacks
- VicDirty, desc="...";
- VicClean, desc="...";
- CPUData, desc="WB data from CPU";
- StaleWB, desc="Notification that WB has been superceded by a probe";
-
- // probe responses
- CPUPrbResp, desc="Probe Response Msg";
-
- ProbeAcksComplete, desc="Probe Acks Complete";
-
- L3Hit, desc="Hit in L3 return data to core";
-
- // Replacement
- PF_Repl, desc="Replace address from probe filter";
-
- // Memory Controller
- MemData, desc="Fetched data from memory arrives";
- WBAck, desc="Writeback Ack from memory arrives";
-
- CoreUnblock, desc="Core received data, unblock";
- UnblockWriteThrough, desc="Unblock because of writethrough request finishing";
-
- StaleVicDirty, desc="Core invalidated before VicDirty processed";
- }
-
- enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
- L3DataArrayRead, desc="Read the data array";
- L3DataArrayWrite, desc="Write the data array";
- L3TagArrayRead, desc="Read the data array";
- L3TagArrayWrite, desc="Write the data array";
-
- PFTagArrayRead, desc="Read the data array";
- PFTagArrayWrite, desc="Write the data array";
- }
-
- // TYPES
-
- enumeration(ProbeFilterState, desc="") {
- T, desc="Tracked";
- NT, desc="Not tracked";
- B, desc="Blocked, This entry is being replaced";
- }
-
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- DataBlock DataBlk, desc="data for the block";
- NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore";
- }
-
- structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
- DataBlock DataBlk, desc="data for the block";
- MachineID LastSender, desc="Mach which this block came from";
- ProbeFilterState pfState, desc="ProbeFilter state",default="Directory_ProbeFilterState_NT";
- bool isOnCPU, desc="Block valid in the CPU complex",default="false";
- bool isOnGPU, desc="Block valid in the GPU complex",default="false";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block";
- bool Dirty, desc="Is the data dirty?";
- int NumPendingAcks, desc="num acks expected";
- MachineID OriginalRequestor, desc="Original Requestor";
- MachineID WTRequestor, desc="WT Requestor";
- bool Cached, desc="data hit in Cache";
- bool MemData, desc="Got MemData?",default="false";
- bool wtData, desc="Got write through data?",default="false";
- bool atomicData, desc="Got Atomic op?",default="false";
- Cycles InitialRequestTime, desc="...";
- Cycles ForwardRequestTime, desc="...";
- Cycles ProbeRequestStartTime, desc="...";
- MachineID LastSender, desc="Mach which this block came from";
- bool L3Hit, default="false", desc="Was this an L3 hit?";
- uint64_t probe_id, desc="probe id for lifetime profiling";
- WriteMask writeMask, desc="outstanding write through mask";
- Addr demandAddress, desc="Address of demand request which caused probe filter eviction";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
-
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
-
- if (is_valid(dir_entry)) {
- //DPRINTF(RubySlicc, "Getting entry %s: %s\n", addr, dir_entry.DataBlk);
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
- TBE tbe := TBEs.lookup(addr);
- if (is_valid(tbe) && tbe.MemData) {
- DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
- return tbe.DataBlk;
- }
- DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
- return getDirectoryEntry(addr).DataBlk;
- }
-
- State getState(TBE tbe, CacheEntry entry, Addr addr) {
- CacheEntry probeFilterEntry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(addr));
- if (inclusiveDir) {
- if (is_valid(probeFilterEntry) && probeFilterEntry.pfState == ProbeFilterState:B) {
- return State:B_P;
- }
- }
- return getDirectoryEntry(addr).DirectoryState;
- }
-
- void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
- getDirectoryEntry(addr).DirectoryState := state;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs.lookup(addr);
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes +
- functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- // For this Directory, all permissions are just tracked in Directory, since
- // it's not possible to have something in TBE but not Dir, just keep track
- // of state all in one place.
- if (directory.isPresent(addr)) {
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(CacheEntry entry, Addr addr, State state) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
-
- void recordRequestType(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L3DataArrayRead) {
- L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
- } else if (request_type == RequestType:L3DataArrayWrite) {
- L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
- } else if (request_type == RequestType:L3TagArrayRead) {
- L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:L3TagArrayWrite) {
- L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- } else if (request_type == RequestType:PFTagArrayRead) {
- ProbeFilterMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
- } else if (request_type == RequestType:PFTagArrayWrite) {
- ProbeFilterMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
- }
- }
-
- bool checkResourceAvailable(RequestType request_type, Addr addr) {
- if (request_type == RequestType:L3DataArrayRead) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L3DataArrayWrite) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
- } else if (request_type == RequestType:L3TagArrayRead) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:L3TagArrayWrite) {
- return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:PFTagArrayRead) {
- return ProbeFilterMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else if (request_type == RequestType:PFTagArrayWrite) {
- return ProbeFilterMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
- } else {
- error("Invalid RequestType type in checkResourceAvailable");
- return true;
- }
- }
-
- bool isNotPresentProbeFilter(Addr address) {
- if (ProbeFilterMemory.isTagPresent(address) ||
- ProbeFilterMemory.cacheAvail(address)) {
- return false;
- }
- return true;
- }
-
- bool isGPUSharer(Addr address) {
- assert(ProbeFilterMemory.isTagPresent(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
- if (entry.pfState == ProbeFilterState:NT) {
- return true;
- } else if (entry.isOnGPU){
- return true;
- }
- return false;
- }
-
- bool isCPUSharer(Addr address) {
- assert(ProbeFilterMemory.isTagPresent(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
- if (entry.pfState == ProbeFilterState:NT) {
- return true;
- } else if (entry.isOnCPU){
- return true;
- }
- return false;
- }
-
-
- // ** OUT_PORTS **
- out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
- out_port(responseNetwork_out, ResponseMsg, responseToCore);
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
-
- // ** IN_PORTS **
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == TriggerType:AcksComplete) {
- trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
- }else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
- trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
- } else {
- error("Unknown trigger msg");
- }
- }
- }
- }
-
- in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=4) {
- if (L3TriggerQueue_in.isReady(clockEdge())) {
- peek(L3TriggerQueue_in, TriggerMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == TriggerType:L3Hit) {
- trigger(Event:L3Hit, in_msg.addr, entry, tbe);
- } else {
- error("Unknown trigger msg");
- }
- }
- }
- }
-
- // Unblock Network
- in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=3) {
- if (unblockNetwork_in.isReady(clockEdge())) {
- peek(unblockNetwork_in, UnblockMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
- }
- }
- }
-
- // Core response network
- in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=2) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
- trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:CPUData) {
- trigger(Event:CPUData, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
- trigger(Event:StaleWB, in_msg.addr, entry, tbe);
- } else {
- error("Unexpected response type");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=1) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:MemData, in_msg.addr, entry, tbe);
- DPRINTF(RubySlicc, "%s\n", in_msg);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, CPURequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
- if (inclusiveDir && isNotPresentProbeFilter(in_msg.addr)) {
- Addr victim := ProbeFilterMemory.cacheProbe(in_msg.addr);
- tbe := TBEs.lookup(victim);
- entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(victim));
- trigger(Event:PF_Repl, victim, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
- trigger(Event:RdBlk, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
- trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:Atomic) {
- trigger(Event:Atomic, in_msg.addr, entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
- } else {
- DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
- trigger(Event:VicDirty, in_msg.addr, entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
- DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
- trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
- } else {
- DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
- trigger(Event:VicClean, in_msg.addr, entry, tbe);
- }
- } else {
- error("Bad request message type");
- }
- }
- }
- }
-
- // Actions
- action(s_sendResponseS, "s", desc="send Shared response") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Shared;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Cached) {
- out_msg.State := CoherenceState:Shared;
- } else {
- out_msg.State := CoherenceState:Exclusive;
- }
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- // write-through and atomics do not send an unblock ack back to the
- // directory. Hence, directory has to generate a self unblocking
- // message. Additionally, write through's does not require data
- // in its response. Hence, write through is treated seperately from
- // write-back and atomics
- action(m_sendResponseM, "m", desc="send Modified response") {
- if (tbe.wtData) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:UnblockWriteThrough;
- }
- }else{
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- if (tbe.L3Hit) {
- out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
- } else {
- out_msg.Sender := machineID;
- }
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Dirty := tbe.Dirty;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := false;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- out_msg.OriginalResponder := tbe.LastSender;
- if(tbe.atomicData){
- out_msg.WTRequestor := tbe.WTRequestor;
- }
- out_msg.L3Hit := tbe.L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- if (tbe.atomicData) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:UnblockWriteThrough;
- }
- }
- }
- }
-
- action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysResp;
- out_msg.Sender := machineID;
- out_msg.Destination.add(tbe.OriginalRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.Dirty := false;
- out_msg.State := CoherenceState:Modified;
- out_msg.CtoD := true;
- out_msg.InitialRequestTime := tbe.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- }
-
- action(w_sendResponseWBAck, "w", desc="send WB Ack") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:NBSysWBAck;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.WTRequestor := in_msg.WTRequestor;
- out_msg.Sender := machineID;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(l_queueMemWBReq, "lq", desc="Write WB data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- queueMemoryWrite(machineID, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
-
- action(l_queueMemRdReq, "lr", desc="Read data from memory") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (L3CacheMemory.isTagPresent(address)) {
- enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L3Hit;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- tbe.DataBlk := entry.DataBlk;
- tbe.LastSender := entry.LastSender;
- tbe.L3Hit := true;
- tbe.MemData := true;
- L3CacheMemory.deallocate(address);
- } else {
- queueMemoryRead(machineID, address, to_memory_controller_latency);
- }
- }
- }
-
- action(dc_probeInvCoreData, "dc", desc="probe inv cores, return data") {
- peek(requestNetwork_in, CPURequestMsg) {
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- if(isCPUSharer(address)) {
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
- }
-
- // add relevant TCC node to list. This replaces all TCPs and SQCs
- if(isGPUSharer(address)) {
- if ((in_msg.Type == CoherenceRequestType:WriteThrough ||
- in_msg.Type == CoherenceRequestType:Atomic) &&
- in_msg.NoWriteConflict) {
- // Don't Include TCCs unless there was write-CAB conflict in the TCC
- } else if(noTCCdir) {
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- } else {
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
- }
- }
- out_msg.Destination.remove(in_msg.Requestor);
- tbe.NumPendingAcks := out_msg.Destination.count();
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- DPRINTF(RubySlicc, "%s\n", out_msg);
- APPEND_TRANSITION_COMMENT(" dc: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(bp_backProbe, "bp", desc="back probe") {
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- if(isCPUSharer(address)) {
- // won't be realistic for multisocket
- out_msg.Destination.broadcast(MachineType:CorePair);
- }
- // add relevant TCC node to the list. This replaces all TCPs and SQCs
- if(isGPUSharer(address)) {
- if (noTCCdir) {
- //Don't need to notify TCC about reads
- } else {
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- }
- if (noTCCdir && CAB_TCC) {
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- }
- }
- tbe.NumPendingAcks := out_msg.Destination.count();
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- APPEND_TRANSITION_COMMENT(" - back probe");
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
-
- action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
- peek(requestNetwork_in, CPURequestMsg) { // not the right network?
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbDowngrade;
- out_msg.ReturnData := true;
- out_msg.MessageSize := MessageSizeType:Control;
- if(isCPUSharer(address)) {
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
- }
- // add relevant TCC node to the list. This replaces all TCPs and SQCs
- if(isGPUSharer(address)) {
- if (noTCCdir) {
- //Don't need to notify TCC about reads
- } else {
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
- tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
- }
- if (noTCCdir && CAB_TCC) {
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- }
- }
- out_msg.Destination.remove(in_msg.Requestor);
- tbe.NumPendingAcks := out_msg.Destination.count();
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- DPRINTF(RubySlicc, "%s\n", (out_msg));
- APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
- peek(requestNetwork_in, CPURequestMsg) { // not the right network?
- enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := ProbeRequestType:PrbInv;
- out_msg.ReturnData := false;
- out_msg.MessageSize := MessageSizeType:Control;
- if(isCPUSharer(address)) {
- out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
- }
-
- // add relevant TCC node to the list. This replaces all TCPs and SQCs
- if(isGPUSharer(address)) {
- if (noTCCdir) {
- out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
- TCC_select_low_bit, TCC_select_num_bits));
- } else {
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
- }
- }
- out_msg.Destination.remove(in_msg.Requestor);
- tbe.NumPendingAcks := out_msg.Destination.count();
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- tbe.ProbeRequestStartTime := curCycle();
- }
- }
- }
-
- action(sm_setMRU, "sm", desc="set probe filter entry as MRU") {
- ProbeFilterMemory.setMRU(address);
- }
-
- action(d_writeDataToMemory, "d", desc="Write data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Writing Data: %s to address %s\n", in_msg.DataBlk,
- in_msg.addr);
- }
- }
-
- action(te_allocateTBEForEviction, "te", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- tbe.writeMask.clear();
- tbe.wtData := false;
- tbe.atomicData := false;
- tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
- tbe.Dirty := false;
- tbe.NumPendingAcks := 0;
- }
-
- action(t_allocateTBE, "t", desc="allocate TBE Entry") {
- check_allocate(TBEs);
- peek(requestNetwork_in, CPURequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.wtData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- if (in_msg.Type == CoherenceRequestType:Atomic) {
- tbe.writeMask.clear();
- tbe.writeMask.orMask(in_msg.writeMask);
- tbe.atomicData := true;
- tbe.WTRequestor := in_msg.WTRequestor;
- tbe.LastSender := in_msg.Requestor;
- }
- tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
- tbe.Dirty := false;
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
- tbe.Dirty := false;
- }
- tbe.OriginalRequestor := in_msg.Requestor;
- tbe.NumPendingAcks := 0;
- tbe.Cached := in_msg.ForceShared;
- tbe.InitialRequestTime := in_msg.InitialRequestTime;
- }
- }
-
- action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
- if (tbe.Dirty == false) {
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- }
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(wd_writeBackData, "wd", desc="Write back data if needed") {
- if (tbe.wtData) {
- DataBlock tmp := getDirectoryEntry(address).DataBlk;
- tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
- tbe.DataBlk := tmp;
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- } else if (tbe.atomicData) {
- tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,
- tbe.writeMask);
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- } else if (tbe.Dirty == false) {
- getDirectoryEntry(address).DataBlk := tbe.DataBlk;
- }
- }
-
- action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
- peek(memQueue_in, MemoryMsg) {
- if (tbe.wtData == true) {
- // DO Nothing (already have the directory data)
- } else if (tbe.Dirty == false) {
- tbe.DataBlk := getDirectoryEntry(address).DataBlk;
- }
- tbe.MemData := true;
- }
- }
-
- action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Dirty) {
- DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
- DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
- if (tbe.wtData) {
- DataBlock tmp := in_msg.DataBlk;
- tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
- tbe.DataBlk := tmp;
- } else if (tbe.Dirty) {
- if(tbe.atomicData == false && tbe.wtData == false) {
- DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
- assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
- }
- } else {
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- tbe.LastSender := in_msg.Sender;
- }
- }
- if (in_msg.Hit) {
- tbe.Cached := true;
- }
- }
- }
-
- action(mwc_markSinkWriteCancel, "mwc", desc="Mark to sink impending VicDirty") {
- peek(responseNetwork_in, ResponseMsg) {
- DPRINTF(RubySlicc, "Write cancel bit set on address %s\n", address);
- getDirectoryEntry(address).VicDirtyIgnore.add(in_msg.Sender);
- APPEND_TRANSITION_COMMENT(" setting bit to sink VicDirty ");
- }
- }
-
- action(x_decrementAcks, "x", desc="decrement Acks pending") {
- tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
- APPEND_TRANSITION_COMMENT(" Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(o_checkForCompletion, "o", desc="check for ack completion") {
- if (tbe.NumPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:AcksComplete;
- }
- }
- APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
- APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
- }
-
- action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
- peek(requestNetwork_in, CPURequestMsg) {
- getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
- }
- }
-
- action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
- peek(responseNetwork_in, ResponseMsg) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
- entry.DataBlk := in_msg.DataBlk;
- entry.LastSender := in_msg.Sender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
- entry.DataBlk := in_msg.DataBlk;
-
- entry.LastSender := in_msg.Sender;
- }
- }
- }
-
- action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
- if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
- if (L3CacheMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- } else {
- if (L3CacheMemory.cacheAvail(address) == false) {
- Addr victim := L3CacheMemory.cacheProbe(address);
- CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
- L3CacheMemory.lookup(victim));
- queueMemoryWrite(machineID, victim, to_memory_controller_latency,
- victim_entry.DataBlk);
- L3CacheMemory.deallocate(victim);
- }
- assert(L3CacheMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
- entry.DataBlk := tbe.DataBlk;
- entry.LastSender := tbe.LastSender;
- }
- }
- }
-
- action(apf_allocateProbeFilterEntry, "apf", desc="Allocate probe filte entry") {
- if (!ProbeFilterMemory.isTagPresent(address)) {
- if (inclusiveDir) {
- assert(ProbeFilterMemory.cacheAvail(address));
- } else if (ProbeFilterMemory.cacheAvail(address) == false) {
- Addr victim := ProbeFilterMemory.cacheProbe(address);
- ProbeFilterMemory.deallocate(victim);
- }
- assert(ProbeFilterMemory.cacheAvail(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.allocate(address, new CacheEntry));
- APPEND_TRANSITION_COMMENT(" allocating a new probe filter entry");
- entry.pfState := ProbeFilterState:NT;
- if (inclusiveDir) {
- entry.pfState := ProbeFilterState:T;
- }
- entry.isOnCPU := false;
- entry.isOnGPU := false;
- }
- }
-
- action(mpfe_markPFEntryForEviction, "mpfe", desc="Mark this PF entry is being evicted") {
- assert(ProbeFilterMemory.isTagPresent(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
- entry.pfState := ProbeFilterState:B;
- peek(requestNetwork_in, CPURequestMsg) {
- tbe.demandAddress := in_msg.addr;
- }
- }
-
- action(we_wakeUpEvictionDependents, "we", desc="Wake up requests waiting for demand address and victim address") {
- wakeUpBuffers(address);
- wakeUpBuffers(tbe.demandAddress);
- }
-
- action(dpf_deallocateProbeFilter, "dpf", desc="deallocate PF entry") {
- assert(ProbeFilterMemory.isTagPresent(address));
- ProbeFilterMemory.deallocate(address);
- }
-
- action(upf_updateProbeFilter, "upf", desc="") {
- peek(requestNetwork_in, CPURequestMsg) {
- assert(ProbeFilterMemory.isTagPresent(address));
- CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
- if (in_msg.Type == CoherenceRequestType:WriteThrough) {
- entry.pfState := ProbeFilterState:T;
- entry.isOnCPU := false;
- entry.isOnGPU := false;
- } else if (in_msg.Type == CoherenceRequestType:Atomic) {
- entry.pfState := ProbeFilterState:T;
- entry.isOnCPU := false;
- entry.isOnGPU := false;
- } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
- entry.pfState := ProbeFilterState:T;
- entry.isOnCPU := false;
- entry.isOnGPU := false;
- } else if (in_msg.Type == CoherenceRequestType:CtoD) {
- entry.pfState := ProbeFilterState:T;
- entry.isOnCPU := false;
- entry.isOnGPU := false;
- }
- if(machineIDToMachineType(in_msg.Requestor) == MachineType:CorePair) {
- entry.isOnCPU := true;
- } else {
- entry.isOnGPU := true;
- }
- }
- }
-
- action(rmcd_removeSharerConditional, "rmcd", desc="remove sharer from probe Filter, conditional") {
- peek(requestNetwork_in, CPURequestMsg) {
- if (ProbeFilterMemory.isTagPresent(address)) {
- CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
- if(machineIDToMachineType(in_msg.Requestor) == MachineType:CorePair) {//CorePair has inclusive L2
- if (in_msg.Type == CoherenceRequestType:VicDirty) {
- entry.isOnCPU := false;
- } else if (in_msg.Type == CoherenceRequestType:VicClean) {
- entry.isOnCPU := false;
- }
- }
- }
- }
- }
-
- action(sf_setForwardReqTime, "sf", desc="...") {
- tbe.ForwardRequestTime := curCycle();
- }
-
- action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
- L3CacheMemory.deallocate(address);
- }
-
- action(p_popRequestQueue, "p", desc="pop request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(pr_popResponseQueue, "pr", desc="pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(pm_popMemQueue, "pm", desc="pop mem queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
- L3TriggerQueue_in.dequeue(clockEdge());
- }
-
- action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
- unblockNetwork_in.dequeue(clockEdge());
- }
-
- action(zz_recycleRequestQueue, "zz", desc="recycle request queue") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
- responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
- stall_and_wait(requestNetwork_in, address);
- }
-
- action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
- wakeUpBuffers(address);
- }
-
- action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
- wakeUpAllBuffers();
- }
-
- action(z_stall, "z", desc="...") {
- }
-
- // TRANSITIONS
- transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, {RdBlkS, RdBlkM, RdBlk, CtoD}) {
- st_stallAndWaitRequest;
- }
-
- // It may be possible to save multiple invalidations here!
- transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, {Atomic, WriteThrough}) {
- st_stallAndWaitRequest;
- }
-
-
- // transitions from U
- transition(U, PF_Repl, B_P) {PFTagArrayRead, PFTagArrayWrite}{
- te_allocateTBEForEviction;
- apf_allocateProbeFilterEntry;
- bp_backProbe;
- sm_setMRU;
- mpfe_markPFEntryForEviction;
- }
-
- transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite} {
- t_allocateTBE;
- apf_allocateProbeFilterEntry;
- l_queueMemRdReq;
- sc_probeShrCoreData;
- sm_setMRU;
- upf_updateProbeFilter;
- p_popRequestQueue;
- }
-
- transition(U, WriteThrough, BM_PM) {L3TagArrayRead, L3TagArrayWrite, PFTagArrayRead, PFTagArrayWrite} {
- t_allocateTBE;
- apf_allocateProbeFilterEntry;
- w_sendResponseWBAck;
- l_queueMemRdReq;
- dc_probeInvCoreData;
- sm_setMRU;
- upf_updateProbeFilter;
- p_popRequestQueue;
- }
-
- transition(U, Atomic, BM_PM) {L3TagArrayRead, L3TagArrayWrite, PFTagArrayRead, PFTagArrayWrite} {
- t_allocateTBE;
- apf_allocateProbeFilterEntry;
- l_queueMemRdReq;
- dc_probeInvCoreData;
- sm_setMRU;
- upf_updateProbeFilter;
- p_popRequestQueue;
- }
-
- transition(U, {RdBlkM}, BM_PM) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite} {
- t_allocateTBE;
- apf_allocateProbeFilterEntry;
- l_queueMemRdReq;
- dc_probeInvCoreData;
- sm_setMRU;
- upf_updateProbeFilter;
- p_popRequestQueue;
- }
-
- transition(U, RdBlk, B_PM) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite}{
- t_allocateTBE;
- apf_allocateProbeFilterEntry;
- l_queueMemRdReq;
- sc_probeShrCoreData;
- sm_setMRU;
- upf_updateProbeFilter;
- p_popRequestQueue;
- }
-
- transition(U, CtoD, BP) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite} {
- t_allocateTBE;
- apf_allocateProbeFilterEntry;
- ic_probeInvCore;
- sm_setMRU;
- upf_updateProbeFilter;
- p_popRequestQueue;
- }
-
- transition(U, VicDirty, BL) {L3TagArrayRead} {
- t_allocateTBE;
- w_sendResponseWBAck;
- rmcd_removeSharerConditional;
- p_popRequestQueue;
- }
-
- transition(U, VicClean, BL) {L3TagArrayRead} {
- t_allocateTBE;
- w_sendResponseWBAck;
- rmcd_removeSharerConditional;
- p_popRequestQueue;
- }
-
- transition(BL, {VicDirty, VicClean}) {
- zz_recycleRequestQueue;
- }
-
- transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
- d_writeDataToMemory;
- al_allocateL3Block;
- wa_wakeUpDependents;
- dt_deallocateTBE;
- //l_queueMemWBReq; // why need an ack? esp. with DRAMSim, just put it in queue no ack needed
- pr_popResponseQueue;
- }
-
- transition(BL, StaleWB, U) {L3TagArrayWrite} {
- dt_deallocateTBE;
- wa_wakeUpAllDependents;
- pr_popResponseQueue;
- }
-
- transition({B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P}, {VicDirty, VicClean}) {
- z_stall;
- }
-
- transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, WBAck) {
- pm_popMemQueue;
- }
-
- transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, PF_Repl) {
- zz_recycleRequestQueue;
- }
-
- transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, StaleVicDirty) {
- rv_removeVicDirtyIgnore;
- w_sendResponseWBAck;
- p_popRequestQueue;
- }
-
- transition({B}, CoreUnblock, U) {
- wa_wakeUpDependents;
- pu_popUnblockQueue;
- }
-
- transition(B, UnblockWriteThrough, U) {
- wa_wakeUpDependents;
- pt_popTriggerQueue;
- }
-
- transition(BS_PM, MemData, BS_Pm) {} {
- mt_writeMemDataToTBE;
- pm_popMemQueue;
- }
-
- transition(BM_PM, MemData, BM_Pm){} {
- mt_writeMemDataToTBE;
- pm_popMemQueue;
- }
-
- transition(B_PM, MemData, B_Pm){} {
- mt_writeMemDataToTBE;
- pm_popMemQueue;
- }
-
- transition(BS_PM, L3Hit, BS_Pm) {} {
- ptl_popTriggerQueue;
- }
-
- transition(BM_PM, L3Hit, BM_Pm) {} {
- ptl_popTriggerQueue;
- }
-
- transition(B_PM, L3Hit, B_Pm) {} {
- ptl_popTriggerQueue;
- }
-
- transition(BS_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
- mt_writeMemDataToTBE;
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pm_popMemQueue;
- }
-
- transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition(BM_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition(B_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- ptl_popTriggerQueue;
- }
-
- transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, BP}, CPUPrbResp) {
- y_writeProbeDataToTBE;
- x_decrementAcks;
- o_checkForCompletion;
- pr_popResponseQueue;
- }
-
- transition(BS_PM, ProbeAcksComplete, BS_M) {} {
- sf_setForwardReqTime;
- pt_popTriggerQueue;
- }
-
- transition(BM_PM, ProbeAcksComplete, BM_M) {} {
- sf_setForwardReqTime;
- pt_popTriggerQueue;
- }
-
- transition(B_PM, ProbeAcksComplete, B_M){} {
- sf_setForwardReqTime;
- pt_popTriggerQueue;
- }
-
- transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- s_sendResponseS;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- m_sendResponseM;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- es_sendResponseES;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(B_P, ProbeAcksComplete, U) {
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- we_wakeUpEvictionDependents;
- dpf_deallocateProbeFilter;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-
- transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
- sf_setForwardReqTime;
- c_sendResponseCtoD;
- wd_writeBackData;
- alwt_allocateL3BlockOnWT;
- dt_deallocateTBE;
- pt_popTriggerQueue;
- }
-}
+++ /dev/null
-protocol "MOESI_AMD_Base";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_AMD_Base-msg.sm";
-include "MOESI_AMD_Base-CorePair.sm";
-include "MOESI_AMD_Base-L3cache.sm";
-include "MOESI_AMD_Base-dir.sm";
+++ /dev/null
-/*
- * Copyright (c) 2019 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L1Cache, "L1 cache protocol")
- : Sequencer * sequencer;
- CacheMemory * L1Icache;
- CacheMemory * L1Dcache;
- Cycles request_latency := 1;
- Cycles response_latency := 1;
- Cycles use_timeout_latency := 50;
- bool send_evictions;
-
- // Message Queues
- // From this node's L1 cache TO the network
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
- vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
- vnet_type="response";
-
- // To this node's L1 cache FROM the network
- // a L2 bank -> this L1
- MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
- vnet_type="request";
- // a L2 bank -> this L1
- MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
- vnet_type="response";
-
- MessageBuffer * triggerQueue;
-
- MessageBuffer * mandatoryQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
- // Base states
- I, AccessPermission:Invalid, desc="Idle";
- S, AccessPermission:Read_Only, desc="Shared";
- O, AccessPermission:Read_Only, desc="Owned";
- M, AccessPermission:Read_Only, desc="Modified (dirty)";
- M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
- MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
- MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
-
- // Transient States
- IM, AccessPermission:Busy, "IM", desc="Issued GetX";
- SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
- OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
- IS, AccessPermission:Busy, "IS", desc="Issued GetS";
- SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
- OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
- MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
- II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- Load, desc="Load request from the processor";
- Ifetch, desc="I-fetch request from the processor";
- Store, desc="Store request from the processor";
- L1_Replacement, desc="Replacement";
-
- // Requests
- Own_GETX, desc="We observe our own GetX forwarded back to us";
- Fwd_GETX, desc="A GetX from another processor";
- Fwd_GETS, desc="A GetS from another processor";
- Fwd_DMA, desc="A GetS from another processor";
- Inv, desc="Invalidations from the directory";
-
- // Responses
- Ack, desc="Received an ack message";
- Data, desc="Received a data message, responder has a shared copy";
- Exclusive_Data, desc="Received a data message";
-
- Writeback_Ack, desc="Writeback O.K. from directory";
- Writeback_Ack_Data, desc="Writeback O.K. from directory";
- Writeback_Nack, desc="Writeback not O.K. from directory";
-
- // Triggers
- All_acks, desc="Received all required data and message acks";
-
- // Timeouts
- Use_Timeout, desc="lockout period ended";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- DataBlock DataBlk, desc="data for the block";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Addr addr, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
- }
-
- structure(TBETable, external ="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
- TimerTable useTimerTable;
-
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
- if(is_valid(L1Dcache_entry)) {
- return L1Dcache_entry;
- }
-
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
- return L1Icache_entry;
- }
-
- Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L1Dcache.lookup(addr));
- }
-
- Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L1Icache.lookup(addr));
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- // L1 hit latency
- Cycles mandatoryQueueLatency(RubyRequestType type) {
- if (type == RubyRequestType:IFETCH) {
- return L1Icache.getTagLatency();
- } else {
- return L1Dcache.getTagLatency();
- }
- }
-
- // Latency for responses that fetch data from cache
- Cycles cacheResponseLatency() {
- if (L1Dcache.getTagLatency() > response_latency) {
- return L1Dcache.getTagLatency();
- } else {
- return response_latency;
- }
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
-
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
- ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
- ((cache_entry.CacheState != State:S) && (state == State:S)) ||
- ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
-
- cache_entry.CacheState := state;
- sequencer.checkCoherence(addr);
- }
- else {
- cache_entry.CacheState := state;
- }
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
- return L1Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
- return L1Cache_State_to_permission(cache_entry.CacheState);
- }
-
- DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L1Cache_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- testAndRead(addr, cache_entry.DataBlk, pkt);
- } else {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- error("Data block missing!");
- }
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, cache_entry.DataBlk, pkt);
- return num_functional_writes;
- }
-
- TBE tbe := TBEs[addr];
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Load;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Ifetch;
- } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
- return Event:Store;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- // ** OUT_PORTS **
-
- out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
- out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- // ** IN_PORTS **
-
- // Use Timer
- in_port(useTimerTable_in, Addr, useTimerTable, rank=4) {
- if (useTimerTable_in.isReady(clockEdge())) {
- Addr readyAddress := useTimerTable.nextAddress();
- trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
- TBEs.lookup(readyAddress));
- }
- }
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Response Network
- in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache, rank=2) {
- if (responseToL1Cache_in.isReady(clockEdge())) {
- peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
- if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:DATA) {
- trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- trigger(Event:Exclusive_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
- trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:WB_ACK_DATA) {
- trigger(Event:Writeback_Ack_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:WB_NACK) {
- trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
-
- // Request Network
- in_port(requestNetwork_in, RequestMsg, requestToL1Cache, rank=1) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, RequestMsg, block_on="addr") {
- assert(in_msg.Destination.isElement(machineID));
- DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
-
- if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
- if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
- trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- }
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
- trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:INV) {
- trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, rank=0) {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
-
- // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
-
- if (in_msg.Type == RubyRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
-
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The tag matches for the L1, so the L1 asks the L2 for it.
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Icache_entry,
- TBEs[in_msg.LineAddress]);
- } else {
-
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- // Check to see if it is in the OTHER L1
- if (is_valid(L1Dcache_entry)) {
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
- TBEs[in_msg.LineAddress]);
- }
- if (L1Icache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Icache_entry,
- TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L1, so we need to make room in the L1
- // Check if the line we want to evict is not locked
- Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, addr);
- trigger(Event:L1_Replacement,
- addr,
- getL1ICacheEntry(addr),
- TBEs[addr]);
- }
- }
- } else {
- // *** DATA ACCESS ***
-
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The tag matches for the L1, so the L1 ask the L2 for it
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Dcache_entry,
- TBEs[in_msg.LineAddress]);
- } else {
-
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- // Check to see if it is in the OTHER L1
- if (is_valid(L1Icache_entry)) {
- // The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
- }
- if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Dcache_entry,
- TBEs[in_msg.LineAddress]);
- } else {
- // No room in the L1, so we need to make room in the L1
- // Check if the line we want to evict is not locked
- Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, addr);
- trigger(Event:L1_Replacement,
- addr,
- getL1DCacheEntry(addr),
- TBEs[addr]);
- }
- }
- }
- }
- }
- }
-
-
- // ACTIONS
-
- action(a_issueGETS, "a", desc="Issue GETS") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.AccessMode := in_msg.AccessMode;
- out_msg.Prefetch := in_msg.Prefetch;
- }
- }
- }
-
- action(b_issueGETX, "b", desc="Issue GETX") {
- peek(mandatoryQueue_in, RubyRequest) {
- enqueue(requestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.AccessMode := in_msg.AccessMode;
- out_msg.Prefetch := in_msg.Prefetch;
- }
- }
- }
-
- action(d_issuePUTX, "d", desc="Issue PUTX") {
- enqueue(requestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTX;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(dd_issuePUTO, "\d", desc="Issue PUTO") {
- enqueue(requestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTO;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
- enqueue(requestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTS;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(e_sendData, "e", desc="Send data from cache to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.DataBlk := cache_entry.DataBlk;
- // out_msg.Dirty := cache_entry.Dirty;
- out_msg.Dirty := false;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- DPRINTF(RubySlicc, "Sending data to L2: %#x\n", in_msg.addr);
- }
- else {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- // out_msg.Dirty := cache_entry.Dirty;
- out_msg.Dirty := false;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- }
- DPRINTF(RubySlicc, "Sending data to L1\n");
- }
- }
- }
-
- action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
- }
- else {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- }
- DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
- }
- }
- }
-
- action(f_sendAck, "f", desc="Send ack from cache to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Acks := 0 - 1; // -1
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- else {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.Acks := 0 - 1; // -1
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
- }
-
- action(g_sendUnblock, "g", desc="Send unblock to memory") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
- action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.readCallback(address, cache_entry.DataBlk, true);
- }
-
- action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk);
- cache_entry.Dirty := true;
- }
-
- action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.writeCallback(address, cache_entry.DataBlk, true);
- cache_entry.Dirty := true;
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE") {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- assert(is_valid(cache_entry));
- tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
- tbe.Dirty := cache_entry.Dirty;
- }
-
- action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
- useTimerTable.unset(address);
- }
-
- action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(l_popForwardQueue, "l", desc="Pop forwarded request queue.") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
- peek(responseToL1Cache_in, ResponseMsg) {
- assert(is_valid(tbe));
- DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
- }
- }
-
- action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
- }
- }
-
- action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToL1Cache_in.dequeue(clockEdge());
- }
-
- action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
- assert(is_valid(tbe));
- if (tbe.NumPendingMsgs == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:ALL_ACKS;
- }
- }
- }
-
- action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
- useTimerTable.set(address,
- clockEdge() + cyclesToTicks(use_timeout_latency));
- }
-
- action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DMA_ACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.Dirty := false;
- out_msg.Acks := 1;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- if (in_msg.RequestorMachine == MachineType:L1Cache ||
- in_msg.RequestorMachine == MachineType:DMA) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- // out_msg.Dirty := tbe.Dirty;
- out_msg.Dirty := false;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- }
- }
- else {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.DataBlk := tbe.DataBlk;
- // out_msg.Dirty := tbe.Dirty;
- out_msg.Dirty := false;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
- }
-
- action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- }
- }
- else {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Acks := in_msg.Acks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
- }
-
- // L2 will usually request data for a writeback
- action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
- enqueue(requestNetwork_out, RequestMsg, request_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(mapAddressToMachine(address,
- MachineType:L2Cache));
- if (tbe.Dirty) {
- out_msg.Type := CoherenceRequestType:WRITEBACK_DIRTY_DATA;
- } else {
- out_msg.Type := CoherenceRequestType:WRITEBACK_CLEAN_DATA;
- }
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(u_writeDataToCache, "u", desc="Write data to cache") {
- peek(responseToL1Cache_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
-
- if (in_msg.Type == CoherenceResponseType:DATA) {
- //assert(in_msg.Dirty == false);
- }
- }
- }
-
- action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
- if (L1Dcache.isTagPresent(address)) {
- L1Dcache.deallocate(address);
- } else {
- L1Icache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
- if ((is_invalid(cache_entry))) {
- set_cache_entry(L1Dcache.allocate(address, new Entry));
- }
- }
-
- action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
- if ((is_invalid(cache_entry))) {
- set_cache_entry(L1Icache.allocate(address, new Entry));
- }
- }
-
- action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
- ++L1Icache.demand_misses;
- }
-
- action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
- ++L1Icache.demand_hits;
- }
-
- action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
- ++L1Dcache.demand_misses;
- }
-
- action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
- ++L1Dcache.demand_hits;
- }
-
- action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // Transitions for Load/Store/L2_Replacement from transient states
- transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
- zz_recycleMandatoryQueue;
- }
-
- transition({M_W, MM_W}, L1_Replacement) {
- zz_recycleMandatoryQueue;
- }
-
- transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
- z_recycleRequestQueue;
- }
-
- transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
- zz_recycleMandatoryQueue;
- }
-
- // Transitions from Idle
- transition(I, Load, IS) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, Ifetch, IS) {
- jj_allocateL1ICacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileInstMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, Store, IM) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- b_issueGETX;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, L1_Replacement) {
- kk_deallocateL1CacheBlock;
- }
-
- transition(I, Inv) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- transition({S, SM, O, OM, MM, MM_W, M, M_W}, Load) {
- h_load_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
- h_ifetch_hit;
- uu_profileInstHit;
- k_popMandatoryQueue;
- }
-
- // Transitions from Shared
- transition(S, Store, SM) {
- i_allocateTBE;
- b_issueGETX;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(S, L1_Replacement, SI) {
- i_allocateTBE;
- dd_issuePUTS;
- forward_eviction_to_cpu;
- kk_deallocateL1CacheBlock;
- }
-
- transition(S, Inv, I) {
- f_sendAck;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(S, Fwd_GETS) {
- e_sendData;
- l_popForwardQueue;
- }
-
- transition(S, Fwd_DMA) {
- e_sendData;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- // Transitions from Owned
- transition(O, Store, OM) {
- i_allocateTBE;
- b_issueGETX;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(O, L1_Replacement, OI) {
- i_allocateTBE;
- dd_issuePUTO;
- forward_eviction_to_cpu;
- kk_deallocateL1CacheBlock;
- }
-
- transition(O, Fwd_GETX, I) {
- ee_sendDataExclusive;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(O, Fwd_GETS) {
- e_sendData;
- l_popForwardQueue;
- }
-
- transition(O, Fwd_DMA) {
- e_sendData;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- // Transitions from MM
- transition({MM, MM_W}, Store) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(MM, L1_Replacement, MI) {
- i_allocateTBE;
- d_issuePUTX;
- forward_eviction_to_cpu;
- kk_deallocateL1CacheBlock;
- }
-
- transition(MM, Fwd_GETX, I) {
- ee_sendDataExclusive;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(MM, Fwd_GETS, I) {
- ee_sendDataExclusive;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(MM, Fwd_DMA, MM) {
- e_sendData;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- // Transitions from M
- transition(M, Store, MM) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(M_W, Store, MM_W) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(M, L1_Replacement, MI) {
- i_allocateTBE;
- d_issuePUTX;
- forward_eviction_to_cpu;
- kk_deallocateL1CacheBlock;
- }
-
- transition(M, Fwd_GETX, I) {
- // e_sendData;
- ee_sendDataExclusive;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(M, Fwd_GETS, O) {
- e_sendData;
- l_popForwardQueue;
- }
-
- transition(M, Fwd_DMA) {
- e_sendData;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- // Transitions from IM
-
- transition(IM, Inv) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- transition(IM, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(IM, {Exclusive_Data, Data}, OM) {
- u_writeDataToCache;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- // Transitions from SM
- transition(SM, Inv, IM) {
- f_sendAck;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(SM, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(SM, {Data, Exclusive_Data}, OM) {
- // v_writeDataToCacheVerify;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(SM, Fwd_GETS) {
- e_sendData;
- l_popForwardQueue;
- }
-
- transition(SM, Fwd_DMA) {
- e_sendData;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- // Transitions from OM
- transition(OM, Own_GETX) {
- mm_decrementNumberOfMessages;
- o_checkForCompletion;
- l_popForwardQueue;
- }
-
-
- // transition(OM, Fwd_GETX, OMF) {
- transition(OM, Fwd_GETX, IM) {
- ee_sendDataExclusive;
- l_popForwardQueue;
- }
-
- transition(OM, Fwd_GETS) {
- e_sendData;
- l_popForwardQueue;
- }
-
- transition(OM, Fwd_DMA) {
- e_sendData;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- //transition({OM, OMF}, Ack) {
- transition(OM, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(OM, All_acks, MM_W) {
- xx_store_hit;
- gg_sendUnblockExclusive;
- s_deallocateTBE;
- o_scheduleUseTimeout;
- j_popTriggerQueue;
- }
-
- transition(MM_W, Use_Timeout, MM) {
- jj_unsetUseTimer;
- }
-
- // Transitions from IS
-
- transition(IS, Inv) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- transition(IS, Data, S) {
- u_writeDataToCache;
- m_decrementNumberOfMessages;
- hx_load_hit;
- g_sendUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition(IS, Exclusive_Data, M_W) {
- u_writeDataToCache;
- m_decrementNumberOfMessages;
- hx_load_hit;
- gg_sendUnblockExclusive;
- o_scheduleUseTimeout;
- s_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition(M_W, Use_Timeout, M) {
- jj_unsetUseTimer;
- }
-
- // Transitions from OI/MI
-
- transition(MI, Fwd_GETS, OI) {
- q_sendDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition(MI, Fwd_DMA) {
- q_sendDataFromTBEToCache;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- transition(MI, Fwd_GETX, II) {
- q_sendExclusiveDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition({SI, OI}, Fwd_GETS) {
- q_sendDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition({SI, OI}, Fwd_DMA) {
- q_sendDataFromTBEToCache;
- ub_dmaUnblockL2Cache;
- l_popForwardQueue;
- }
-
- transition(OI, Fwd_GETX, II) {
- q_sendExclusiveDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition({SI, OI, MI}, Writeback_Ack_Data, I) {
- qq_sendWBDataFromTBEToL2; // always send data
- s_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition({SI, OI, MI}, Writeback_Ack, I) {
- g_sendUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition({MI, OI}, Writeback_Nack, OI) {
- // FIXME: This might cause deadlock by re-using the writeback
- // channel, we should handle this case differently.
- dd_issuePUTO;
- n_popResponseQueue;
- }
-
- // Transitions from II
- transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
- g_sendUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- }
-
- // transition({II, SI}, Writeback_Nack, I) {
- transition(II, Writeback_Nack, I) {
- s_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition(SI, Writeback_Nack) {
- dd_issuePUTS;
- n_popResponseQueue;
- }
-
- transition(II, Inv) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- transition(SI, Inv, II) {
- f_sendAck;
- l_popForwardQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2019 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L2Cache, "Token protocol")
-: CacheMemory * L2cache;
- Cycles response_latency := 1;
- Cycles request_latency := 1;
-
- // L2 BANK QUEUES
- // From local bank of L2 cache TO the network
- MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="0",
- vnet_type="request"; // this L2 bank -> a local L1
- MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="1",
- vnet_type="request"; // this L2 bank -> mod-directory
- MessageBuffer * responseFromL2Cache, network="To", virtual_network="2",
- vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
-
- // FROM the network to this local bank of L2 cache
- MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
- vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
- MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="1",
- vnet_type="request"; // mod-directory -> this L2 bank
- MessageBuffer * responseToL2Cache, network="From", virtual_network="2",
- vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
-
- MessageBuffer * triggerQueue;
-{
- // STATES
- state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
-
- // Stable states
- NP, AccessPermission:Invalid, desc="Not Present";
- I, AccessPermission:Invalid, desc="Invalid";
- ILS, AccessPermission:Invalid, desc="Idle/NP, but local sharers exist";
- ILX, AccessPermission:Invalid, desc="Idle/NP, but local exclusive exists";
- ILO, AccessPermission:Invalid, desc="Idle/NP, but local owner exists";
- ILOX, AccessPermission:Invalid, desc="Idle/NP, but local owner exists and chip is exclusive";
- ILOS, AccessPermission:Invalid, desc="Idle/NP, but local owner exists and local sharers as well";
- ILOSX, AccessPermission:Invalid, desc="Idle/NP, but local owner exists, local sharers exist, chip is exclusive ";
- S, AccessPermission:Read_Only, desc="Shared, no local sharers";
- O, AccessPermission:Read_Only, desc="Owned, no local sharers";
- OLS, AccessPermission:Read_Only, desc="Owned with local sharers";
- OLSX, AccessPermission:Read_Only, desc="Owned with local sharers, chip is exclusive";
- SLS, AccessPermission:Read_Only, desc="Shared with local sharers";
- M, AccessPermission:Read_Write, desc="Modified";
-
- // Transient States
-
- IFGX, AccessPermission:Busy, desc="Blocked, forwarded global GETX to local owner/exclusive. No other on-chip invs needed";
- IFGS, AccessPermission:Busy, desc="Blocked, forwarded global GETS to local owner";
- ISFGS, AccessPermission:Busy, desc="Blocked, forwarded global GETS to local owner, local sharers exist";
- IFGXX, AccessPermission:Busy, desc="Blocked, forwarded global GETX to local owner but may need acks from other sharers";
- OLSF, AccessPermission:Busy, desc="Blocked, got Fwd_GETX with local sharers, waiting for local inv acks";
-
- // writebacks
- ILOW, AccessPermission:Busy, desc="local WB request, was ILO";
- ILOXW, AccessPermission:Busy, desc="local WB request, was ILOX";
- ILOSW, AccessPermission:Busy, desc="local WB request, was ILOS";
- ILOSXW, AccessPermission:Busy, desc="local WB request, was ILOSX";
- SLSW, AccessPermission:Busy, desc="local WB request, was SLS";
- OLSW, AccessPermission:Busy, desc="local WB request, was OLS";
- ILSW, AccessPermission:Busy, desc="local WB request, was ILS";
- IW, AccessPermission:Busy, desc="local WB request from only sharer, was ILS";
- OW, AccessPermission:Busy, desc="local WB request from only sharer, was OLS";
- SW, AccessPermission:Busy, desc="local WB request from only sharer, was SLS";
- OXW, AccessPermission:Busy, desc="local WB request from only sharer, was OLSX";
- OLSXW, AccessPermission:Busy, desc="local WB request from sharer, was OLSX";
- ILXW, AccessPermission:Busy, desc="local WB request, was ILX";
-
- IFLS, AccessPermission:Busy, desc="Blocked, forwarded local GETS to _some_ local sharer";
- IFLO, AccessPermission:Busy, desc="Blocked, forwarded local GETS to local owner";
- IFLOX, AccessPermission:Busy, desc="Blocked, forwarded local GETS to local owner but chip is exclusive";
- IFLOXX, AccessPermission:Busy, desc="Blocked, forwarded local GETX to local owner/exclusive, chip is exclusive";
- IFLOSX, AccessPermission:Busy, desc="Blocked, forwarded local GETS to local owner w/ other sharers, chip is exclusive";
- IFLXO, AccessPermission:Busy, desc="Blocked, forwarded local GETX to local owner with other sharers, chip is exclusive";
-
- IGS, AccessPermission:Busy, desc="Semi-blocked, issued local GETS to directory";
- IGM, AccessPermission:Busy, desc="Blocked, issued local GETX to directory. Need global acks and data";
- IGMLS, AccessPermission:Busy, desc="Blocked, issued local GETX to directory but may need to INV local sharers";
- IGMO, AccessPermission:Busy, desc="Blocked, have data for local GETX but need all acks";
- IGMIO, AccessPermission:Busy, desc="Blocked, issued local GETX, local owner with possible local sharer, may need to INV";
- OGMIO, AccessPermission:Busy, desc="Blocked, issued local GETX, was owner, may need to INV";
- IGMIOF, AccessPermission:Busy, desc="Blocked, issued local GETX, local owner, waiting for global acks, got Fwd_GETX";
- IGMIOFS, AccessPermission:Busy, desc="Blocked, issued local GETX, local owner, waiting for global acks, got Fwd_GETS";
- OGMIOF, AccessPermission:Busy, desc="Blocked, issued local GETX, was owner, waiting for global acks, got Fwd_GETX";
-
- II, AccessPermission:Busy, desc="Blocked, handling invalidations";
- MM, AccessPermission:Busy, desc="Blocked, was M satisfying local GETX";
- SS, AccessPermission:Busy, desc="Blocked, was S satisfying local GETS";
- OO, AccessPermission:Busy, desc="Blocked, was O satisfying local GETS";
- OLSS, AccessPermission:Busy, desc="Blocked, satisfying local GETS";
- OLSXS, AccessPermission:Busy, desc="Blocked, satisfying local GETS";
- SLSS, AccessPermission:Busy, desc="Blocked, satisfying local GETS";
-
- OI, AccessPermission:Busy, desc="Blocked, doing writeback, was O";
- MI, AccessPermission:Busy, desc="Blocked, doing writeback, was M";
- MII, AccessPermission:Busy, desc="Blocked, doing writeback, was M, got Fwd_GETX";
- OLSI, AccessPermission:Busy, desc="Blocked, doing writeback, was OLS";
- ILSI, AccessPermission:Busy, desc="Blocked, doing writeback, was OLS got Fwd_GETX";
-
- // DMA blocking states
- ILOSD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
- ILOSXD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
- ILOD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
- ILXD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
- ILOXD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
-
- // Requests
- L1_GETS, desc="local L1 GETS request";
- L1_GETX, desc="local L1 GETX request";
- L1_PUTO, desc="local owner wants to writeback";
- L1_PUTX, desc="local exclusive wants to writeback";
- L1_PUTS_only, desc="only local sharer wants to writeback";
- L1_PUTS, desc="local sharer wants to writeback";
- Fwd_GETX, desc="A GetX from another processor";
- Fwd_GETS, desc="A GetS from another processor";
- Fwd_DMA, desc="A request from DMA";
- Own_GETX, desc="A GetX from this node";
- Inv, desc="Invalidations from the directory";
-
- // Responses
- IntAck, desc="Received an ack message";
- ExtAck, desc="Received an ack message";
- All_Acks, desc="Received all ack messages";
- Data, desc="Received a data message, responder has a shared copy";
- Data_Exclusive, desc="Received a data message";
- L1_WBCLEANDATA, desc="Writeback from L1, with data";
- L1_WBDIRTYDATA, desc="Writeback from L1, with data";
-
- Writeback_Ack, desc="Writeback O.K. from directory";
- Writeback_Nack, desc="Writeback not O.K. from directory";
-
- Unblock, desc="Local L1 is telling L2 dir to unblock";
- Exclusive_Unblock, desc="Local L1 is telling L2 dir to unblock";
-
- DmaAck, desc="DMA ack from local L1";
- // events initiated by this L2
- L2_Replacement, desc="L2 Replacement", format="!r";
-
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- NetDest Sharers, desc="Set of the internal processors that want the block in shared state";
- MachineID Owner, desc="ID of the L1 cache to forward the block to once we get a response";
- bool OwnerValid, default="false", desc="true if Owner means something";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- DataBlock DataBlk, desc="data for the block";
- }
-
-
- structure(DirEntry, desc="...", interface="AbstractEntry") {
- NetDest Sharers, desc="Set of the internal processors that want the block in shared state";
- MachineID Owner, desc="ID of the L1 cache to forward the block to once we get a response";
- bool OwnerValid, default="false", desc="true if Owner means something";
- State DirState, desc="directory state";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- Addr addr, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- Addr PC, desc="Program counter of request";
- DataBlock DataBlk, desc="Buffer for the data block";
- bool Dirty, desc="Is the data dirty (different than memory)?";
-
- int NumExtPendingAcks, default="0", desc="Number of global acks/data messages waiting for";
- int NumIntPendingAcks, default="0", desc="Number of global acks/data messages waiting for";
- int Fwd_GETX_ExtAcks, default="0", desc="Number of acks that requestor will need";
- int Local_GETX_IntAcks, default="0", desc="Number of acks that requestor will need";
-
- NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
- MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
- NetDest Fwd_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
- MachineID Fwd_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- structure(PerfectCacheMemory, external = "yes") {
- void allocate(Addr);
- void deallocate(Addr);
- DirEntry lookup(Addr);
- bool isTagPresent(Addr);
- }
-
- TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
- PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
- void wakeUpAllBuffers(Addr a);
-
- // Latency for responses that fetch data from cache
- Cycles cacheResponseLatency() {
- if (L2cache.getTagLatency() > response_latency) {
- return L2cache.getTagLatency();
- }
- else {
- return response_latency;
- }
- }
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache[address]);
- }
-
- bool isDirTagPresent(Addr addr) {
- return (localDirectory.isTagPresent(addr) );
- }
-
- DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
- return localDirectory.lookup(address);
- }
-
- bool isOnlySharer(Entry cache_entry, Addr addr, MachineID shar_id) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- if (cache_entry.Sharers.count() > 1) {
- return false;
- }
- else if (cache_entry.Sharers.count() == 1) {
- if (cache_entry.Sharers.isElement(shar_id)) {
- return true;
- }
- else {
- return false; // something happened which should cause this PUTS to be nacked
- }
- return true;
- }
- else {
- return false;
- }
- }
- else if (localDirectory.isTagPresent(addr)){
- DirEntry dir_entry := getDirEntry(addr);
- if (dir_entry.Sharers.count() > 1) {
- return false;
- }
- else if (dir_entry.Sharers.count() == 1) {
- if (dir_entry.Sharers.isElement(shar_id)) {
- return true;
- }
- else {
- return false; // something happened which should cause this PUTS to be nacked
- }
- }
- else {
- return false;
- }
- }
- else {
- // shouldn't happen unless L1 issues PUTS before unblock received
- return false;
- }
- }
-
- void copyCacheStateToDir(Entry cache_entry, Addr addr) {
- assert(localDirectory.isTagPresent(addr) == false);
- assert(is_valid(cache_entry));
- localDirectory.allocate(addr);
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.DirState := cache_entry.CacheState;
- dir_entry.Sharers := cache_entry.Sharers;
- dir_entry.Owner := cache_entry.Owner;
- dir_entry.OwnerValid := cache_entry.OwnerValid;
-
- }
-
- void copyDirToCache(Entry cache_entry, Addr addr) {
- assert(is_valid(cache_entry));
- DirEntry dir_entry := getDirEntry(addr);
- cache_entry.Sharers := dir_entry.Sharers;
- cache_entry.Owner := dir_entry.Owner;
- cache_entry.OwnerValid := dir_entry.OwnerValid;
- }
-
-
- void recordLocalSharerInDir(Entry cache_entry, Addr addr, MachineID shar_id) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- cache_entry.Sharers.add(shar_id);
- }
- else {
- if (localDirectory.isTagPresent(addr) == false) {
- localDirectory.allocate(addr);
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.clear();
- dir_entry.OwnerValid := false;
- }
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.add(shar_id);
- }
- }
-
- void recordNewLocalExclusiveInDir(Entry cache_entry, Addr addr, MachineID exc_id) {
-
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- cache_entry.Sharers.clear();
- cache_entry.OwnerValid := true;
- cache_entry.Owner := exc_id;
- }
- else {
- if (localDirectory.isTagPresent(addr) == false) {
- localDirectory.allocate(addr);
- }
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.clear();
- dir_entry.OwnerValid := true;
- dir_entry.Owner := exc_id;
- }
- }
-
- void removeAllLocalSharersFromDir(Entry cache_entry, Addr addr) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- cache_entry.Sharers.clear();
- cache_entry.OwnerValid := false;
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.clear();
- dir_entry.OwnerValid := false;
- }
- }
-
- void removeSharerFromDir(Entry cache_entry, Addr addr, MachineID sender) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- cache_entry.Sharers.remove(sender);
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.remove(sender);
- }
- }
-
- void removeOwnerFromDir(Entry cache_entry, Addr addr, MachineID sender) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- cache_entry.OwnerValid := false;
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.OwnerValid := false;
- }
- }
-
- bool isLocalSharer(Entry cache_entry, Addr addr, MachineID shar_id) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- return cache_entry.Sharers.isElement(shar_id);
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- return dir_entry.Sharers.isElement(shar_id);
- }
- }
-
- NetDest getLocalSharers(Entry cache_entry, Addr addr) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- return cache_entry.Sharers;
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- return dir_entry.Sharers;
- }
- }
-
- MachineID getLocalOwner(Entry cache_entry, Addr addr) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- return cache_entry.Owner;
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- return dir_entry.Owner;
- }
- }
-
- int countLocalSharers(Entry cache_entry, Addr addr) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- return cache_entry.Sharers.count();
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- return dir_entry.Sharers.count();
- }
- }
-
- bool isLocalOwnerValid(Entry cache_entry, Addr addr) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- return cache_entry.OwnerValid;
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- return dir_entry.OwnerValid;
- }
- }
-
- int countLocalSharersExceptRequestor(Entry cache_entry, Addr addr, MachineID requestor) {
- if (is_valid(cache_entry)) {
- assert (localDirectory.isTagPresent(addr) == false);
- if (cache_entry.Sharers.isElement(requestor)) {
- return ( cache_entry.Sharers.count() - 1 );
- }
- else {
- return cache_entry.Sharers.count();
- }
- }
- else {
- DirEntry dir_entry := getDirEntry(addr);
- if (dir_entry.Sharers.isElement(requestor)) {
- return ( dir_entry.Sharers.count() - 1 );
- }
- else {
- return dir_entry.Sharers.count();
- }
- }
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
-
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- } else if (isDirTagPresent(addr)) {
- DirEntry dir_entry := getDirEntry(addr);
- return dir_entry.DirState;
- } else {
- return State:NP;
- }
- }
-
- std::string getCoherenceRequestTypeStr(CoherenceRequestType type) {
- return CoherenceRequestType_to_string(type);
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- assert((localDirectory.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
-
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (
- (state == State:M) ||
- (state == State:O) ||
- (state == State:S) ||
- (state == State:OLS) ||
- (state == State:SLS) ||
- (state == State:OLSX) ||
- (state == State:SLS)
- ) {
- assert(is_valid(cache_entry));
- }
- else if (
- (state == State:ILS) ||
- (state == State:ILX) ||
- (state == State:ILO) ||
- (state == State:ILOX) ||
- (state == State:ILOS) ||
- (state == State:ILOSX)
- ) {
- // assert(isCacheTagPresent(addr) == false);
- }
-
- if (is_valid(cache_entry)) {
- if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
- ((cache_entry.CacheState != State:S) && (state == State:S)) ||
- ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
- cache_entry.CacheState := state;
- // disable Coherence Checker for now
- // sequencer.checkCoherence(addr);
- }
- else {
- cache_entry.CacheState := state;
- }
- }
- else if (localDirectory.isTagPresent(addr)) {
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.DirState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
- return L2Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(cache_entry.CacheState));
- return L2Cache_State_to_permission(cache_entry.CacheState);
- }
-
- DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L2Cache_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
- out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
- out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
-
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
-
- // ** IN_PORTS **
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_Acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Response Network
- in_port(responseNetwork_in, ResponseMsg, responseToL2Cache, rank=2) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceResponseType:ACK) {
- if (in_msg.SenderMachine == MachineType:L2Cache) {
- trigger(Event:ExtAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- }
- else {
- trigger(Event:IntAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- }
- } else if (in_msg.Type == CoherenceResponseType:DATA) {
- trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- trigger(Event:Data_Exclusive, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- DPRINTF(RubySlicc, "Received Unblock from L1 addr: %x\n", in_msg.addr);
- trigger(Event:Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
- trigger(Event:Exclusive_Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
- trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:WB_NACK) {
- trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
- trigger(Event:DmaAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
-
- // Request Network
- in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache, rank=1) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
- if (in_msg.Requestor == machineID) {
- trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- }
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
- trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:INV) {
- trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache, rank=0) {
- if (L1requestNetwork_in.isReady(clockEdge())) {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:L1_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:L1_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:L1_PUTO, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:L1_PUTX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:PUTS) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
- trigger(Event:L1_PUTS_only, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
- }
- else {
- trigger(Event:L1_PUTS, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
- }
- } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (is_invalid(cache_entry) &&
- L2cache.cacheAvail(in_msg.addr) == false) {
- trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
- getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs[L2cache.cacheProbe(in_msg.addr)]);
- }
- else {
- trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
- }
- } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_CLEAN_DATA) {
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (is_invalid(cache_entry) &&
- L2cache.cacheAvail(in_msg.addr) == false) {
- trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
- getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs[L2cache.cacheProbe(in_msg.addr)]);
- }
- else {
- trigger(Event:L1_WBCLEANDATA, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
- }
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
-
- // ACTIONS
-
- action(a_issueGETS, "a", desc="issue local request globally") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
- }
-
- action(a_issueGETX, "\a", desc="issue local request globally") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- }
- }
- }
-
- action(b_issuePUTX, "b", desc="Issue PUTX") {
- enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTX;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(b_issuePUTO, "\b", desc="Issue PUTO") {
- enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTO;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- /* PUTO, but local sharers exist */
- action(b_issuePUTO_ls, "\bb", desc="Issue PUTO") {
- enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTO_SHARERS;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(c_sendDataFromTBEToL1GETS, "c", desc="Send data from TBE to L1 requestors in TBE") {
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.addNetDest(tbe.L1_GetS_IDs);
- out_msg.DataBlk := tbe.DataBlk;
- // out_msg.Dirty := tbe.Dirty;
- // shared data should be clean
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, tbe.DataBlk);
- }
-
- action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") {
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(tbe.L1_GetX_ID);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Acks := tbe.Local_GETX_IntAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, tbe.DataBlk);
- }
-
- action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") {
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.addNetDest(tbe.L1_GetS_IDs);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(c_sendDataFromTBEToFwdGETX, "cc", desc="Send data from TBE to external GETX") {
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(tbe.Fwd_GetX_ID);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
-
- action(cd_sendDataFromTBEToFwdDma, "cd", desc="Send data from TBE to external GETX") {
- assert(is_valid(tbe));
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- // out_msg.Dirty := tbe.Dirty;
- // shared data should be clean
- out_msg.Dirty := false;
- out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, tbe.DataBlk);
- }
-
- action(c_sendDataFromTBEToFwdGETS, "ccc", desc="Send data from TBE to external GETX") {
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.addNetDest(tbe.Fwd_GetS_IDs);
- out_msg.DataBlk := tbe.DataBlk;
- // out_msg.Dirty := tbe.Dirty;
- // shared data should be clean
- out_msg.Dirty := false;
- out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, tbe.DataBlk);
- }
-
- action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") {
- assert(is_valid(tbe));
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.addNetDest(tbe.Fwd_GetS_IDs);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, tbe.DataBlk);
- }
-
- action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") {
- assert(is_valid(cache_entry));
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- // out_msg.Dirty := cache_entry.Dirty;
- // shared data should be clean
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- }
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- }
-
- action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") {
- assert(is_valid(cache_entry));
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- out_msg.Acks := tbe.Local_GETX_IntAcks;
- }
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- }
-
- action(dd_sendDataToFwdGETX, "dd", desc="send data") {
- assert(is_valid(cache_entry));
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.Acks := in_msg.Acks;
- }
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- }
-
-
- action(dd_sendDataToFwdGETS, "\dd", desc="send data") {
- assert(is_valid(cache_entry));
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- // out_msg.Dirty := cache_entry.Dirty;
- // shared data should be clean
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- }
-
- action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") {
- assert(is_valid(cache_entry));
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(e_sendAck, "e", desc="Send ack with the tokens we've collected thus far.") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
-
- out_msg.Destination.add( tbe.Fwd_GetX_ID);
- out_msg.Acks := 0 - 1;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(e_sendAckToL1Requestor, "\e", desc="Send ack with the tokens we've collected thus far.") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Acks := 0 - 1;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
-
- action(e_sendAckToL1RequestorFromTBE, "eee", desc="Send ack with the tokens we've collected thus far.") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(tbe.L1_GetX_ID);
- out_msg.Acks := 0 - 1;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(ee_sendLocalInv, "\ee", desc="Send local invalidates") {
- assert(is_valid(tbe));
- tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
- DPRINTF(RubySlicc, "Address: %#x, Local Sharers: %s, Pending Acks: %d\n",
- address, getLocalSharers(cache_entry, address),
- tbe.NumIntPendingAcks);
- if (isLocalOwnerValid(cache_entry, address)) {
- tbe.NumIntPendingAcks := tbe.NumIntPendingAcks + 1;
- DPRINTF(RubySlicc, "%s\n", getLocalOwner(cache_entry, address));
- }
-
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
- if (isLocalOwnerValid(cache_entry, address))
- {
- out_msg.Destination.add(getLocalOwner(cache_entry, address));
- }
- out_msg.MessageSize := MessageSizeType:Invalidate_Control;
- }
- }
-
- action(ee_sendLocalInvSharersOnly, "\eee", desc="Send local invalidates to sharers if they exist") {
-
- // assert(countLocalSharers(address) > 0);
- assert(is_valid(tbe));
- tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
-
- if (countLocalSharers(cache_entry, address) > 0) {
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
- out_msg.MessageSize := MessageSizeType:Invalidate_Control;
- }
- }
- }
-
- action(ee_addLocalIntAck, "e\ee", desc="add a local ack to wait for") {
- assert(is_valid(tbe));
- tbe.NumIntPendingAcks := tbe.NumIntPendingAcks + 1;
- }
-
- action(ee_issueLocalInvExceptL1Requestor, "\eeee", desc="Send local invalidates to sharers if they exist") {
- peek(L1requestNetwork_in, RequestMsg) {
-
-// assert(countLocalSharers(address) > 0);
- if (countLocalSharers(cache_entry, address) == 0) {
- tbe.NumIntPendingAcks := 0;
- }
- else {
-
- if (isLocalSharer(cache_entry, address, in_msg.Requestor)) {
- tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address) - 1;
- }
- else {
- tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
- }
-
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Invalidate_Control;
- }
- }
- }
- }
-
- action(ee_issueLocalInvExceptL1RequestorInTBE, "\eeeeee", desc="Send local invalidates to sharers if they exist") {
- assert(is_valid(tbe));
- if (countLocalSharers(cache_entry, address) == 0) {
- tbe.NumIntPendingAcks := 0;
- }
- else {
- if (isLocalSharer(cache_entry, address, tbe.L1_GetX_ID)) {
- tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address) - 1;
- }
- else {
- tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
- }
- }
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := tbe.L1_GetX_ID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
- out_msg.Destination.remove(tbe.L1_GetX_ID);
- out_msg.MessageSize := MessageSizeType:Invalidate_Control;
- }
- }
-
-
- action(f_sendUnblock, "f", desc="Send unblock to global directory") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
-
- action(f_sendExclusiveUnblock, "\f", desc="Send unblock to global directory") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
-
- action(g_recordLocalSharer, "g", desc="Record new local sharer from unblock message") {
- peek(responseNetwork_in, ResponseMsg) {
- recordLocalSharerInDir(cache_entry, in_msg.addr, in_msg.Sender);
- }
- }
-
- action(g_recordLocalExclusive, "\g", desc="Record new local exclusive sharer from unblock message") {
- peek(responseNetwork_in, ResponseMsg) {
- recordNewLocalExclusiveInDir(cache_entry, address, in_msg.Sender);
- }
- }
-
- action(gg_clearLocalSharers, "gg", desc="Clear local sharers") {
- removeAllLocalSharersFromDir(cache_entry, address);
- }
-
- action(gg_clearSharerFromL1Response, "\gg", desc="Clear sharer from L1 response queue") {
- peek(responseNetwork_in, ResponseMsg) {
- removeSharerFromDir(cache_entry, in_msg.addr, in_msg.Sender);
- }
- }
-
- action(gg_clearSharerFromL1Request, "clsl1r", desc="Clear sharer from L1 request queue") {
- peek(L1requestNetwork_in, RequestMsg) {
- removeSharerFromDir(cache_entry, in_msg.addr, in_msg.Requestor);
- }
- }
-
- action(gg_clearOwnerFromL1Request, "clol1r", desc="Clear owner from L1 request queue") {
- peek(L1requestNetwork_in, RequestMsg) {
- removeOwnerFromDir(cache_entry, in_msg.addr, in_msg.Requestor);
- }
- }
-
- action(h_countLocalSharersExceptRequestor, "h", desc="counts number of acks needed for L1 GETX") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.Local_GETX_IntAcks := countLocalSharersExceptRequestor(cache_entry, address, in_msg.Requestor);
- }
- }
-
- action(h_clearIntAcks, "\h", desc="clear IntAcks") {
- assert(is_valid(tbe));
- tbe.Local_GETX_IntAcks := 0;
- }
-
- action(hh_countLocalSharersExceptL1GETXRequestorInTBE, "hh", desc="counts number of acks needed for L1 GETX") {
- assert(is_valid(tbe));
- tbe.Local_GETX_IntAcks := countLocalSharersExceptRequestor(cache_entry, address, tbe.L1_GetX_ID);
- }
-
- action(i_copyDataToTBE, "\i", desc="Copy data from response queue to TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- }
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- if(is_valid(cache_entry)) {
- tbe.DataBlk := cache_entry.DataBlk;
- tbe.Dirty := cache_entry.Dirty;
- }
- tbe.NumIntPendingAcks := 0; // default value
- tbe.NumExtPendingAcks := 0; // default value
- tbe.Fwd_GetS_IDs.clear();
- tbe.L1_GetS_IDs.clear();
- }
-
-
-
- action(j_forwardGlobalRequestToLocalOwner, "j", desc="Forward external request to local owner") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
- out_msg.Type := in_msg.Type;
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- out_msg.Acks := 0 - 1;
- }
- }
- }
-
- action(jd_forwardDmaRequestToLocalOwner, "jd", desc="Forward dma request to local owner") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := in_msg.RequestorMachine;
- out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
- out_msg.Type := in_msg.Type;
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- out_msg.Acks := 0 - 1;
- }
- }
- }
-
-
- action(k_forwardLocalGETSToLocalSharer, "k", desc="Forward local request to local sharer/owner") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := MachineType:L1Cache;
- // should randomize this so one node doesn't get abused more than others
- DirEntry dir_entry := getDirEntry(in_msg.addr);
- out_msg.Destination.add(dir_entry.Sharers.smallestElement(MachineType:L1Cache));
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- }
- }
- }
-
- action(k_forwardLocalGETXToLocalOwner, "\k", desc="Forward local request to local owner") {
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := tbe.L1_GetX_ID;
- out_msg.RequestorMachine := MachineType:L1Cache;
- DirEntry dir_entry := getDirEntry(address);
- out_msg.Destination.add(dir_entry.Owner);
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- out_msg.Acks := 1 + tbe.Local_GETX_IntAcks;
- }
- }
-
- // same as previous except that it assumes to TBE is present to get number of acks
- action(kk_forwardLocalGETXToLocalExclusive, "kk", desc="Forward local request to local owner") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- out_msg.Acks := 1;
- }
- }
- }
-
- action(kk_forwardLocalGETSToLocalOwner, "\kk", desc="Forward local request to local owner") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := MachineType:L1Cache;
- out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- }
- }
- }
-
-
- action(l_writebackAckNeedData, "l", desc="Send writeback ack to L1 requesting data") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue( responseNetwork_out, ResponseMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := CoherenceResponseType:WB_ACK_DATA;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(l_writebackAckDropData, "\l", desc="Send writeback ack to L1 indicating to drop data") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue( responseNetwork_out, ResponseMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := CoherenceResponseType:WB_ACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(ll_writebackNack, "\ll", desc="Send writeback nack to L1") {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue( responseNetwork_out, ResponseMsg, response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := CoherenceResponseType:WB_NACK;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(m_decrementNumberOfMessagesInt, "\m", desc="Decrement the number of messages for which we're waiting") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.NumIntPendingAcks := tbe.NumIntPendingAcks + in_msg.Acks;
- }
- }
-
- action(m_decrementNumberOfMessagesExt, "\mmm", desc="Decrement the number of messages for which we're waiting") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.NumExtPendingAcks := tbe.NumExtPendingAcks - in_msg.Acks;
- }
- }
-
- action(mm_decrementNumberOfMessagesExt, "\mm", desc="Decrement the number of messages for which we're waiting") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.NumExtPendingAcks := tbe.NumExtPendingAcks - in_msg.Acks;
- }
- }
-
- action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(n_popTriggerQueue, "\n", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue(clockEdge());
- }
-
-
- action(o_checkForIntCompletion, "\o", desc="Check if we have received all the messages required for completion") {
- assert(is_valid(tbe));
- if (tbe.NumIntPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:ALL_ACKS;
- }
- }
- }
-
- action(o_checkForExtCompletion, "\oo", desc="Check if we have received all the messages required for completion") {
- assert(is_valid(tbe));
- if (tbe.NumExtPendingAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:ALL_ACKS;
- }
- }
- }
-
-
- action( qq_sendDataFromTBEToMemory, "qq", desc="Send data from TBE to directory") {
- enqueue(globalRequestNetwork_out, RequestMsg, response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:L2Cache;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- if (tbe.Dirty) {
- out_msg.Type := CoherenceRequestType:WRITEBACK_DIRTY_DATA;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.Type := CoherenceRequestType:WRITEBACK_CLEAN_ACK;
- // NOTE: in a real system this would not send data. We send
- // data here only so we can check it at the memory
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action( r_setMRU, "\rrr", desc="manually set the MRU bit for cache line" ) {
- if(is_valid(cache_entry)) {
- L2cache.setMRU(address);
- }
- }
-
- action( s_recordGetXL1ID, "ss", desc="record local GETX requestor") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.L1_GetX_ID := in_msg.Requestor;
- }
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action( s_recordGetSL1ID, "\ss", desc="record local GETS requestor") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.L1_GetS_IDs.add(in_msg.Requestor);
- }
- }
-
- action(t_recordFwdXID, "t", desc="record global GETX requestor") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.Fwd_GetX_ID := in_msg.Requestor;
- tbe.Fwd_GETX_ExtAcks := in_msg.Acks;
- }
- }
-
- action(t_recordFwdSID, "\t", desc="record global GETS requestor") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.Fwd_GetS_IDs.clear();
- tbe.Fwd_GetS_IDs.add(in_msg.Requestor);
- }
- }
-
-
- action(u_writeCleanDataToCache, "wCd", desc="Write clean data to cache") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- assert(cache_entry.Dirty == false);
- }
- }
-
- action(u_writeDirtyDataToCache, "wDd", desc="Write dirty data to cache") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- cache_entry.Dirty := true;
- }
- }
-
- action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
- set_cache_entry(L2cache.allocate(address, new Entry));
- }
-
- action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cache.deallocate(address);
- unset_cache_entry();
- }
-
- action(uu_profileMiss, "\um", desc="Profile the demand miss") {
- ++L2cache.demand_misses;
- }
-
- action(uu_profileHit, "\uh", desc="Profile the demand hit") {
- ++L2cache.demand_hits;
- }
-
- action(y_copyCacheStateToDir, "y", desc="Copy cache state to directory state") {
- copyCacheStateToDir(cache_entry, address);
- }
-
- action(y_copyDirToCacheAndRemove, "/y", desc="Copy dir state to cache and remove") {
- copyDirToCache(cache_entry, address);
- localDirectory.deallocate(address);
- }
-
- action(zz_recycleGlobalRequestQueue, "\zglb", desc="Send the head of the mandatory queue to the back of the queue.") {
- peek(requestNetwork_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(zz_recycleL1RequestQueue, "\zl1", desc="Send the head of the mandatory queue to the back of the queue.") {
- peek(L1requestNetwork_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- L1requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(st_stallAndWaitL1RequestQueue, "st", desc="Stall and wait on the address") {
- stall_and_wait(L1requestNetwork_in, address);
- }
-
- action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
- wakeUpAllBuffers(address);
- }
-
- action(da_sendDmaAckUnblock, "da", desc="Send dma ack to global directory") {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DMA_ACK;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:L2Cache;
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
-
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- transition({II, IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX, OLSXS, IGS, IGM, IGMLS, IGMO, IGMIO, OGMIO, IGMIOF, OGMIOF, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {L1_PUTO, L1_PUTS, L1_PUTS_only, L1_PUTX}) {
- st_stallAndWaitL1RequestQueue;
- }
-
- transition({II, IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX, OLSXS, IGS, IGM, IGMLS, IGMO, IGMIO, OGMIO, IGMIOF, OGMIOF, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {L1_GETX, L1_GETS}) {
- st_stallAndWaitL1RequestQueue;
- }
-
- transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, ILXW, OW, SW, OXW, OLSXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, IGS, IGM, IGMLS, IGMO, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, L2_Replacement) {
- zz_recycleL1RequestQueue;
- }
-
- transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, IGS, IGM, MM, SS, OO, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {Fwd_GETX, Fwd_GETS, Fwd_DMA}) {
- zz_recycleGlobalRequestQueue;
- }
-
- transition({OGMIO, IGMIO, IGMO}, Fwd_DMA) {
- zz_recycleGlobalRequestQueue;
- }
-
- transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, MM, SS, OO, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {Inv}) {
- zz_recycleGlobalRequestQueue;
- }
-
- transition({IGM, IGS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {Own_GETX}) {
- zz_recycleGlobalRequestQueue;
- }
-
- // must happened because we forwarded GETX to local exclusive trying to do wb
- transition({I, M, O, ILS, ILOX, OLS, OLSX, SLS, S}, L1_PUTX) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- transition({M}, {L1_PUTS, L1_PUTO} ) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- transition({ILS, OLSX}, L1_PUTO){
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
-// happened if we forwarded GETS to exclusive who tried to do writeback
-// ?? should we just Nack these instead? Could be a bugs here
- transition(ILO, L1_PUTX, ILOW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- // this can happen if we forwarded a L1_GETX to exclusiver after it issued a PUTX
- transition(ILOS, L1_PUTX, ILOSW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- transition(ILOSX, L1_PUTX, ILOSXW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- // must happened because we got Inv when L1 attempted PUTS
- transition(I, L1_PUTS) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- transition(I, L1_PUTO) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- // FORWARDED REQUESTS
-
- transition({ILO, ILX, ILOX}, Fwd_GETS, IFGS) {
- i_allocateTBE;
- t_recordFwdSID;
- j_forwardGlobalRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition({ILOS, ILOSX}, Fwd_GETS, ISFGS) {
- i_allocateTBE;
- t_recordFwdSID;
- j_forwardGlobalRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(ILOS, Fwd_DMA, ILOSD) {
- i_allocateTBE;
- jd_forwardDmaRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(ILOSD, DmaAck, ILOS) {
- s_deallocateTBE;
- da_sendDmaAckUnblock;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOSX, Fwd_DMA, ILOSXD) {
- i_allocateTBE;
- t_recordFwdSID;
- jd_forwardDmaRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(ILOSXD, DmaAck, ILOSX) {
- s_deallocateTBE;
- da_sendDmaAckUnblock;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILO, Fwd_DMA, ILOD) {
- i_allocateTBE;
- t_recordFwdSID;
- jd_forwardDmaRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(ILOD, DmaAck, ILO) {
- s_deallocateTBE;
- da_sendDmaAckUnblock;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILX, Fwd_DMA, ILXD) {
- i_allocateTBE;
- t_recordFwdSID;
- jd_forwardDmaRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(ILXD, DmaAck, ILX) {
- s_deallocateTBE;
- da_sendDmaAckUnblock;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOX, Fwd_DMA, ILOXD) {
- i_allocateTBE;
- t_recordFwdSID;
- jd_forwardDmaRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(ILOXD, DmaAck, ILOX) {
- s_deallocateTBE;
- da_sendDmaAckUnblock;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition({ILOS, ILOSX, ILO, ILX, ILOX, ILXW}, Data) {
- i_copyDataToTBE;
- c_sendDataFromTBEToFwdGETS;
- s_deallocateTBE;
- n_popResponseQueue;
- }
-
- transition(IFGS, Data, ILO) {
- i_copyDataToTBE;
- c_sendDataFromTBEToFwdGETS;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ISFGS, Data, ILOS) {
- i_copyDataToTBE;
- c_sendDataFromTBEToFwdGETS;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IFGS, Data_Exclusive, I) {
- i_copyDataToTBE;
- c_sendExclusiveDataFromTBEToFwdGETS;
- gg_clearLocalSharers;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
-
- transition({ILX, ILO, ILOX}, Fwd_GETX, IFGX) {
- i_allocateTBE;
- t_recordFwdXID;
- j_forwardGlobalRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(IFGX, {Data_Exclusive, Data}, I) {
- i_copyDataToTBE;
- c_sendDataFromTBEToFwdGETX;
- gg_clearLocalSharers;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition({ILOSX, ILOS}, Fwd_GETX, IFGXX) {
- i_allocateTBE;
- t_recordFwdXID;
- j_forwardGlobalRequestToLocalOwner;
- ee_sendLocalInvSharersOnly;
- ee_addLocalIntAck;
- m_popRequestQueue;
- }
-
-
- transition(IFGXX, IntAck) {
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(IFGXX, Data_Exclusive) {
- i_copyDataToTBE;
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(IFGXX, All_Acks, I) {
- c_sendDataFromTBEToFwdGETX;
- gg_clearLocalSharers;
- s_deallocateTBE;
- n_popTriggerQueue;
- wa_wakeUpDependents;
- }
-
-
- // transition({O, OX}, Fwd_GETX, I) {
- transition(O, Fwd_GETX, I) {
- dd_sendDataToFwdGETX;
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- m_popRequestQueue;
- }
-
- transition({O, OLS}, Fwd_GETS) {
- dd_sendDataToFwdGETS;
- m_popRequestQueue;
- }
-
- transition({O, OLS}, Fwd_DMA) {
- dd_sendDataToFwdGETS;
- da_sendDmaAckUnblock;
- m_popRequestQueue;
- }
-
- // transition({OLSX, OX}, Fwd_GETS, O) {
- transition(OLSX, Fwd_GETS, OLS) {
- dd_sendDataToFwdGETS;
- m_popRequestQueue;
- }
-
- transition(OLSX, Fwd_DMA) {
- dd_sendDataToFwdGETS;
- da_sendDmaAckUnblock;
- m_popRequestQueue;
- }
-
- transition(M, Fwd_GETX, I) {
- dd_sendDataToFwdGETX;
- rr_deallocateL2CacheBlock;
- m_popRequestQueue;
- }
-
- // MAKE THIS THE SAME POLICY FOR NOW
-
- // transition(M, Fwd_GETS, O) {
- // dd_sendDataToFwdGETS;
- // m_popRequestQueue;
- // }
-
- transition(M, Fwd_GETS, I) {
- dd_sendExclusiveDataToFwdGETS;
- rr_deallocateL2CacheBlock;
- m_popRequestQueue;
- }
-
- transition(M, Fwd_DMA) {
- dd_sendExclusiveDataToFwdGETS;
- da_sendDmaAckUnblock;
- m_popRequestQueue;
- }
-
- transition({OLS, OLSX}, Fwd_GETX, OLSF) {
- i_allocateTBE;
- t_recordFwdXID;
- ee_sendLocalInv;
- m_popRequestQueue;
- }
-
- transition(OLSF, IntAck) {
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(OLSF, All_Acks, I) {
- c_sendDataFromTBEToFwdGETX;
- gg_clearLocalSharers;
- s_deallocateTBE;
- rr_deallocateL2CacheBlock;
- n_popTriggerQueue;
- wa_wakeUpDependents;
- }
-
-
-
- // INVALIDATIONS FROM GLOBAL DIRECTORY
-
- transition({IGM, IGS}, Inv) {
- t_recordFwdXID;
- e_sendAck;
- m_popRequestQueue;
- }
-
- transition({I,NP}, Inv) {
- i_allocateTBE;
- t_recordFwdXID;
- e_sendAck;
- s_deallocateTBE;
- m_popRequestQueue;
- }
-
- // NEED INV for S state
-
- transition({ILS, ILO, ILX}, Inv, II) {
- i_allocateTBE;
- t_recordFwdXID;
- ee_sendLocalInv;
- gg_clearLocalSharers;
- m_popRequestQueue;
- }
-
- transition(SLS, Inv, II) {
- i_allocateTBE;
- t_recordFwdXID;
- ee_sendLocalInv;
- rr_deallocateL2CacheBlock;
- m_popRequestQueue;
- }
-
- transition(II, IntAck) {
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(II, All_Acks, I) {
- e_sendAck;
- s_deallocateTBE;
- n_popTriggerQueue;
- wa_wakeUpDependents;
- }
-
- transition(S, Inv, I) {
- i_allocateTBE;
- t_recordFwdXID;
- e_sendAck;
- s_deallocateTBE;
- rr_deallocateL2CacheBlock;
- m_popRequestQueue;
- }
-
-
- // LOCAL REQUESTS SATISFIED LOCALLY
-
- transition(OLSX, L1_GETX, IFLOX) {
- i_allocateTBE;
- s_recordGetXL1ID;
- // count number of INVs needed that doesn't include requestor
- h_countLocalSharersExceptRequestor;
- // issue INVs to everyone except requestor
- ee_issueLocalInvExceptL1Requestor;
- d_sendDataToL1GETX
- y_copyCacheStateToDir;
- r_setMRU;
- rr_deallocateL2CacheBlock;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(IFLOX, Exclusive_Unblock, ILX) {
- g_recordLocalExclusive;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(OLSX, L1_GETS, OLSXS) {
- d_sendDataToL1GETS;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(OLSXS, Unblock, OLSX) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- // after this, can't get Fwd_GETX
- transition(IGMO, Own_GETX) {
- mm_decrementNumberOfMessagesExt;
- o_checkForExtCompletion;
- m_popRequestQueue;
-
- }
-
-
- transition(ILX, L1_GETS, IFLOXX) {
- kk_forwardLocalGETSToLocalOwner;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(ILOSX, L1_GETS, IFLOSX) {
- kk_forwardLocalGETSToLocalOwner;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition({ILOS, ILO}, L1_GETS, IFLO) {
- kk_forwardLocalGETSToLocalOwner;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(ILS, L1_GETS, IFLS) {
- k_forwardLocalGETSToLocalSharer;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition({ILX, ILOX}, L1_GETX, IFLOXX) {
- kk_forwardLocalGETXToLocalExclusive;
- e_sendAckToL1Requestor;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(ILOX, L1_GETS, IFLOX) {
- kk_forwardLocalGETSToLocalOwner;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(IFLOX, Unblock, ILOSX) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IFLS, Unblock, ILS) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IFLOXX, Unblock, ILOSX) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IFLOSX, Unblock, ILOSX) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition({IFLOSX, IFLOXX}, Exclusive_Unblock, ILX) {
- g_recordLocalExclusive;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IFLO, Unblock, ILOS) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
-
- transition(ILOSX, L1_GETX, IFLXO) {
- i_allocateTBE;
- s_recordGetXL1ID;
- h_countLocalSharersExceptRequestor;
- ee_issueLocalInvExceptL1Requestor;
- k_forwardLocalGETXToLocalOwner;
- e_sendAckToL1RequestorFromTBE;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(IFLXO, Exclusive_Unblock, ILX) {
- g_recordLocalExclusive;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- // LOCAL REQUESTS THAT MUST ISSUE
-
- transition(NP, {L1_PUTS, L1_PUTX, L1_PUTO}) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- transition({NP, I}, L1_GETS, IGS) {
- i_allocateTBE;
- s_recordGetSL1ID;
- a_issueGETS;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition({NP, I}, L1_GETX, IGM) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(S, L1_GETX, IGM) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- y_copyCacheStateToDir;
- r_setMRU;
- rr_deallocateL2CacheBlock;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(ILS, L1_GETX, IGMLS) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- // count number of INVs (just sharers?) needed that doesn't include requestor
- h_countLocalSharersExceptRequestor;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(IGMLS, Inv) {
- t_recordFwdXID;
- ee_sendLocalInv;
- m_popRequestQueue;
- }
-
- transition(IGMLS, IntAck) {
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(IGMLS, All_Acks, IGM) {
- gg_clearLocalSharers;
- h_clearIntAcks;
- e_sendAck;
- n_popTriggerQueue;
- }
-
- // transition(IGMLS, ExtAck, IGMO) {
- transition(IGMLS, ExtAck) {
- m_decrementNumberOfMessagesExt;
- o_checkForExtCompletion;
- n_popResponseQueue;
- }
-
- transition(IGMLS, {Data, Data_Exclusive}, IGMO) {
- ee_issueLocalInvExceptL1RequestorInTBE;
- i_copyDataToTBE;
- m_decrementNumberOfMessagesExt;
- o_checkForExtCompletion;
- n_popResponseQueue;
- }
-
-
- transition(ILOS, L1_GETX, IGMIO) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- // new exclusive happened while sharer attempted writeback
- transition(ILX, {L1_PUTS, L1_PUTS_only, L1_PUTO}) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- transition(S, L1_PUTS) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- transition(OLS, L1_GETX, OGMIO) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- h_countLocalSharersExceptRequestor;
- // COPY DATA FROM CACHE TO TBE (happens during i_allocateTBE)
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(OGMIO, Fwd_GETS) {
- t_recordFwdSID;
- c_sendDataFromTBEToFwdGETS;
- m_popRequestQueue;
- }
-
- transition(ILO, L1_GETX, IGMIO) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- // the following, of course, returns 0 sharers but do anyways for consistency
- h_countLocalSharersExceptRequestor;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition({ILO, ILOX}, L1_PUTS) {
- ll_writebackNack;
- o_popL1RequestQueue;
- }
-
- transition(IGMIO, Fwd_GETX, IGMIOF) {
- t_recordFwdXID;
- j_forwardGlobalRequestToLocalOwner;
- ee_sendLocalInvSharersOnly;
- ee_addLocalIntAck;
- m_popRequestQueue;
- }
-
- transition(IGMIO, Fwd_GETS, IGMIOFS) {
- t_recordFwdSID;
- j_forwardGlobalRequestToLocalOwner;
- m_popRequestQueue;
- }
-
- transition(IGMIOFS, Data, IGMIO) {
- i_copyDataToTBE;
- c_sendDataFromTBEToFwdGETS;
- n_popResponseQueue;
- }
-
- transition(OGMIO, Fwd_GETX, OGMIOF) {
- t_recordFwdXID;
- ee_sendLocalInvSharersOnly;
- m_popRequestQueue;
- }
-
- transition(OGMIOF, IntAck) {
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(OGMIOF, All_Acks, IGM) {
- gg_clearLocalSharers;
- hh_countLocalSharersExceptL1GETXRequestorInTBE;
- c_sendDataFromTBEToFwdGETX;
- n_popTriggerQueue;
- }
-
- transition(IGMIOF, IntAck) {
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(IGMIOF, Data_Exclusive) {
- i_copyDataToTBE;
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(IGMIOF, All_Acks, IGM) {
- gg_clearLocalSharers;
- c_sendDataFromTBEToFwdGETX;
- n_popTriggerQueue;
- }
-
- transition(IGMIO, All_Acks, IGMO) {
- hh_countLocalSharersExceptL1GETXRequestorInTBE;
- ee_issueLocalInvExceptL1RequestorInTBE;
- k_forwardLocalGETXToLocalOwner;
- e_sendAckToL1RequestorFromTBE;
- n_popTriggerQueue;
- }
-
- transition(OGMIO, All_Acks, IGMO) {
- ee_issueLocalInvExceptL1RequestorInTBE;
- c_sendDataFromTBEToL1GETX;
- n_popTriggerQueue;
- }
-
- transition({IGMIO, OGMIO}, Own_GETX) {
- mm_decrementNumberOfMessagesExt;
- o_checkForExtCompletion;
- m_popRequestQueue;
-
- }
-
- transition(IGM, {Data, Data_Exclusive}, IGMO) {
- i_copyDataToTBE;
- m_decrementNumberOfMessagesExt;
- o_checkForExtCompletion;
- n_popResponseQueue;
- }
-
- transition({IGM, IGMIO, OGMIO}, ExtAck) {
- m_decrementNumberOfMessagesExt;
- o_checkForExtCompletion;
- n_popResponseQueue;
- }
-
- transition(IGMO, ExtAck) {
- m_decrementNumberOfMessagesExt;
- o_checkForExtCompletion;
- n_popResponseQueue;
- }
-
- transition(IGS, Data) {
- i_copyDataToTBE;
- m_decrementNumberOfMessagesExt;
- c_sendDataFromTBEToL1GETS;
- n_popResponseQueue;
- }
-
- transition(IGS, Data_Exclusive) {
- i_copyDataToTBE;
- m_decrementNumberOfMessagesExt;
- c_sendExclusiveDataFromTBEToL1GETS;
- n_popResponseQueue;
- }
-
- transition(IGS, Unblock, ILS) {
- g_recordLocalSharer;
- f_sendUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IGS, Exclusive_Unblock, ILX) {
- g_recordLocalExclusive;
- f_sendExclusiveUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IGMO, All_Acks) {
- c_sendDataFromTBEToL1GETX;
- n_popTriggerQueue;
- }
-
- transition(IGMO, Exclusive_Unblock, ILX) {
- g_recordLocalExclusive;
- f_sendExclusiveUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
-
- transition(SLS, L1_GETX, IGMLS) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- // count number of INVs needed that doesn't include requestor
- h_countLocalSharersExceptRequestor;
- // issue INVs to everyone except requestor
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- uu_profileMiss;
- o_popL1RequestQueue;
-
- }
-
- transition(SLS, L1_GETS, SLSS ) {
- d_sendDataToL1GETS;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(SLSS, Unblock, SLS) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
-
- transition(O, L1_GETX, IGMO) {
- i_allocateTBE;
- s_recordGetXL1ID;
- a_issueGETX;
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(OLS, L1_GETS, OLSS) {
- d_sendDataToL1GETS;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(OLSS, Unblock, OLS) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(IGMO, Fwd_GETX, IGM) {
- t_recordFwdXID;
- c_sendDataFromTBEToFwdGETX;
- m_popRequestQueue;
-
- }
-
- transition(IGMO, Fwd_GETS) {
- t_recordFwdSID;
- c_sendDataFromTBEToFwdGETS;
- m_popRequestQueue;
- }
-
-
- // LOCAL REQUESTS SATISFIED DIRECTLY BY L2
-
- transition(M, L1_GETX, MM) {
- i_allocateTBE;
- // should count 0 of course
- h_countLocalSharersExceptRequestor;
- d_sendDataToL1GETX;
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- s_deallocateTBE;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(MM, Exclusive_Unblock, ILX) {
- g_recordLocalExclusive;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(M, L1_GETS, OO) {
- i_allocateTBE;
- // should count 0 of course
- h_countLocalSharersExceptRequestor;
- d_sendDataToL1GETX;
- r_setMRU;
- s_deallocateTBE;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(S, L1_GETS, SS) {
- d_sendDataToL1GETS;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(SS, Unblock, SLS) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(O, L1_GETS, OO) {
- d_sendDataToL1GETS;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(OO, Unblock, OLS) {
- g_recordLocalSharer;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(OO, Exclusive_Unblock, ILX) {
- g_recordLocalExclusive
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
-
- // L1 WRITEBACKS
- transition(ILO, L1_PUTO, ILOW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- transition(ILOX, L1_PUTO, ILOXW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
-
- transition(ILOS, L1_PUTO, ILOSW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- transition(ILOSX, L1_PUTO, ILOSXW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
-
- // hmmm...keep data or drop. Just drop for now
- transition(ILOS, L1_PUTS_only, ILOW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(ILSW, Unblock, ILS) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOW, Unblock, ILO) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOSX, L1_PUTS_only, ILOXW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(ILOXW, Unblock, ILOX) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- // hmmm...keep data or drop. Just drop for now
- transition(ILOS, L1_PUTS, ILOSW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(ILOSX, L1_PUTS, ILOSXW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(ILOSW, Unblock, ILOS) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOSXW, Unblock, ILOSX) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(SLS, L1_PUTS, SLSW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(SLS, L1_PUTS_only, SW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(SW, {Unblock}, S) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(OLS, L1_PUTS, OLSW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(ILS, L1_PUTS, ILSW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- transition(ILS, L1_PUTS_only, IW) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- transition(OLS, L1_PUTS_only, OW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(OLSX, L1_PUTS_only, OXW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(OLSX, L1_PUTS, OLSXW) {
- l_writebackAckDropData;
- o_popL1RequestQueue;
- }
-
- transition(OLSXW, {Unblock}, OLSX) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(OW, {Unblock}, O) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(OXW, {Unblock}, M) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILX, L1_PUTX, ILXW ) {
- l_writebackAckNeedData;
- o_popL1RequestQueue;
- }
-
- transition(ILXW, L1_WBDIRTYDATA, M) {
- gg_clearLocalSharers;
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- u_writeDirtyDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- // clean writeback
- transition(ILXW, L1_WBCLEANDATA, M) {
- gg_clearLocalSharers;
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- u_writeCleanDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILXW, Unblock, ILX) {
- // writeback canceled because L1 invalidated
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILSW, L1_WBCLEANDATA, SLS) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- u_writeCleanDataToCache;
- gg_clearSharerFromL1Request;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(IW, L1_WBCLEANDATA, S) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- u_writeCleanDataToCache;
- gg_clearSharerFromL1Request;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- // Owner can have dirty data
- transition(ILOW, L1_WBDIRTYDATA, O) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeDirtyDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOW, L1_WBCLEANDATA, O) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeCleanDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOXW, L1_WBDIRTYDATA, M) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeDirtyDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOXW, L1_WBCLEANDATA, M) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeCleanDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOSW, L1_WBDIRTYDATA, OLS) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeDirtyDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOSW, L1_WBCLEANDATA, OLS) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeCleanDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOSXW, L1_WBDIRTYDATA, OLSX) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeDirtyDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILOSXW, L1_WBCLEANDATA, OLSX) {
- vv_allocateL2CacheBlock;
- y_copyDirToCacheAndRemove;
- gg_clearOwnerFromL1Request;
- u_writeCleanDataToCache;
- o_popL1RequestQueue;
- wa_wakeUpDependents;
- }
-
- transition(SLSW, {Unblock}, SLS) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(OLSW, {Unblock}, OLS) {
- gg_clearSharerFromL1Response;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
-
- // L2 WRITEBACKS
- transition({I, S}, L2_Replacement, I) {
- rr_deallocateL2CacheBlock;
- }
-
- transition(ILS, L2_Replacement) {
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- }
-
- transition(ILX, L2_Replacement ) {
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- }
-
- transition({ILO, ILOS}, L2_Replacement ) {
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- }
-
- transition(SLS, L2_Replacement, ILS) {
- y_copyCacheStateToDir;
- rr_deallocateL2CacheBlock;
- }
-
- transition({OLS, OLSX}, L2_Replacement, OLSI) {
- y_copyCacheStateToDir;
- b_issuePUTO_ls;
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- }
-
-
- transition(O, L2_Replacement, OI) {
- b_issuePUTO;
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- }
-
- transition(M, L2_Replacement, MI) {
- b_issuePUTX;
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- }
-
- transition(OLSI, Fwd_GETX, ILSI) {
- t_recordFwdXID;
- ee_sendLocalInv;
- m_popRequestQueue;
- }
-
- transition(ILSI, IntAck) {
- m_decrementNumberOfMessagesInt;
- o_checkForIntCompletion;
- n_popResponseQueue;
- }
-
- transition(ILSI, All_Acks, MII) {
- gg_clearLocalSharers;
- c_sendDataFromTBEToFwdGETX;
- n_popTriggerQueue;
- }
-
- transition(OLSI, Fwd_GETS) {
- t_recordFwdSID;
- c_sendDataFromTBEToFwdGETS;
- m_popRequestQueue;
- }
-
- transition({MI, OI}, Fwd_GETS, OI) {
- t_recordFwdSID;
- c_sendDataFromTBEToFwdGETS;
- m_popRequestQueue;
- }
-
- transition({MI, OI}, Fwd_DMA, OI) {
- cd_sendDataFromTBEToFwdDma;
- da_sendDmaAckUnblock;
- m_popRequestQueue;
- }
-
- transition(OLSI, Fwd_DMA) {
- cd_sendDataFromTBEToFwdDma;
- da_sendDmaAckUnblock;
- m_popRequestQueue;
- }
-
- transition({MI, OI}, Fwd_GETX, MII) {
- t_recordFwdXID;
- c_sendDataFromTBEToFwdGETX;
- m_popRequestQueue;
- }
-
- transition({MI, OI}, Writeback_Ack, I) {
- qq_sendDataFromTBEToMemory;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(MII, Writeback_Nack, I) {
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(OI, Writeback_Nack) {
- b_issuePUTO;
- n_popResponseQueue;
- }
-
- transition(OLSI, Writeback_Ack, ILS) {
- qq_sendDataFromTBEToMemory;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(MII, Writeback_Ack, I) {
- f_sendUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-
- transition(ILSI, Writeback_Ack, ILS) {
- f_sendUnblock;
- s_deallocateTBE;
- n_popResponseQueue;
- wa_wakeUpDependents;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2019 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:Directory, "Directory protocol")
-: DirectoryMemory * directory;
- Cycles directory_latency := 6;
- Cycles to_memory_controller_latency := 1;
-
- // Message Queues
- MessageBuffer * requestToDir, network="From", virtual_network="1",
- vnet_type="request"; // a mod-L2 bank -> this Dir
- MessageBuffer * responseToDir, network="From", virtual_network="2",
- vnet_type="response"; // a mod-L2 bank -> this Dir
-
- MessageBuffer * forwardFromDir, network="To", virtual_network="1",
- vnet_type="forward";
- MessageBuffer * responseFromDir, network="To", virtual_network="2",
- vnet_type="response"; // Dir -> mod-L2 bank
-
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, AccessPermission:Read_Write, desc="Invalid";
- S, AccessPermission:Read_Only, desc="Shared";
- O, AccessPermission:Maybe_Stale, desc="Owner";
- M, AccessPermission:Maybe_Stale, desc="Modified";
-
- IS, AccessPermission:Busy, desc="Blocked, was in idle";
- SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
- OO, AccessPermission:Busy, desc="Blocked, was in owned";
- MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
- MM, AccessPermission:Busy, desc="Blocked, going to modified";
-
- MI, AccessPermission:Busy, desc="Blocked on a writeback";
- MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
- OS, AccessPermission:Busy, desc="Blocked on a writeback";
- OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
-
- XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
- XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
- OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
-
- OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
- MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- GETX, desc="A GETX arrives";
- GETS, desc="A GETS arrives";
- PUTX, desc="A PUTX arrives";
- PUTO, desc="A PUTO arrives";
- PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
- Unblock, desc="An unblock message arrives";
- Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
- Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
- Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
- Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
- DMA_READ, desc="DMA Read";
- DMA_WRITE, desc="DMA Write";
- DMA_ACK, desc="DMA Ack";
- Data, desc="Data to directory";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...", interface='AbstractEntry') {
- State DirectoryState, desc="Directory state";
- NetDest Sharers, desc="Sharers for this block";
- NetDest Owner, desc="Owner of this block";
- int WaitingUnblocks, desc="Number of acks we're waiting for";
- }
-
- structure(TBE, desc="...") {
- Addr PhysicalAddress, desc="Physical address for this entry";
- int Len, desc="Length of request";
- DataBlock DataBlk, desc="DataBlk";
- MachineID Requestor, desc="original requestor";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- // ** OBJECTS **
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
- void set_tbe(TBE b);
- void unset_tbe();
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
-
- if (is_valid(dir_entry)) {
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- State getState(TBE tbe, Addr addr) {
- return getDirectoryEntry(addr).DirectoryState;
- }
-
- void setState(TBE tbe, Addr addr, State state) {
- if (directory.isPresent(addr)) {
-
- if (state == State:I) {
- assert(getDirectoryEntry(addr).Owner.count() == 0);
- assert(getDirectoryEntry(addr).Sharers.count() == 0);
- }
-
- if (state == State:S) {
- assert(getDirectoryEntry(addr).Owner.count() == 0);
- }
-
- if (state == State:O) {
- assert(getDirectoryEntry(addr).Owner.count() == 1);
- assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
- }
-
- if (state == State:M) {
- assert(getDirectoryEntry(addr).Owner.count() == 1);
- assert(getDirectoryEntry(addr).Sharers.count() == 0);
- }
-
- if ((state != State:SS) && (state != State:OO)) {
- assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
- }
-
- if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
- getDirectoryEntry(addr).DirectoryState := state;
- // disable coherence checker
- // sequencer.checkCoherence(addr);
- }
- else {
- getDirectoryEntry(addr).DirectoryState := state;
- }
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- if (directory.isPresent(addr)) {
- DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- if (directory.isPresent(addr)) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- functionalMemoryRead(pkt);
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- // if no sharers, then directory can be considered
- // both a sharer and exclusive w.r.t. coherence checking
- bool isBlockShared(Addr addr) {
- if (directory.isPresent(addr)) {
- if (getDirectoryEntry(addr).DirectoryState == State:I) {
- return true;
- }
- }
- return false;
- }
-
- bool isBlockExclusive(Addr addr) {
- if (directory.isPresent(addr)) {
- if (getDirectoryEntry(addr).DirectoryState == State:I) {
- return true;
- }
- }
- return false;
- }
-
- // ** OUT_PORTS **
- out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
-
- // ** IN_PORTS **
-
- in_port(unblockNetwork_in, ResponseMsg, responseToDir, rank=2) {
- if (unblockNetwork_in.isReady(clockEdge())) {
- peek(unblockNetwork_in, ResponseMsg) {
- if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
- trigger(Event:Last_Unblock, in_msg.addr,
- TBEs[in_msg.addr]);
- } else {
- trigger(Event:Unblock, in_msg.addr,
- TBEs[in_msg.addr]);
- }
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
- trigger(Event:Exclusive_Unblock, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- trigger(Event:Data, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
- trigger(Event:DMA_ACK, in_msg.addr,
- TBEs[in_msg.addr]);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
- if (requestQueue_in.isReady(clockEdge())) {
- peek(requestQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
- trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
- trigger(Event:Dirty_Writeback, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_CLEAN_ACK) {
- trigger(Event:Clean_Writeback, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
- trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
- trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=0) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:WB_ACK;
- out_msg.Sender := in_msg.Requestor;
- out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:WB_NACK;
- out_msg.Sender := in_msg.Requestor;
- out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(c_clearOwner, "c", desc="Clear the owner field") {
- getDirectoryEntry(address).Owner.clear();
- }
-
- action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
- getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
- getDirectoryEntry(address).Owner.clear();
- }
-
- action(cc_clearSharers, "\c", desc="Clear the sharers field") {
- getDirectoryEntry(address).Sharers.clear();
- }
-
- action(d_sendDataMsg, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := in_msg.Acks;
- if (in_msg.ReadX) {
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- } else {
- out_msg.Type := CoherenceResponseType:DATA;
- }
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
- peek(unblockNetwork_in, ResponseMsg) {
- getDirectoryEntry(address).Owner.clear();
- getDirectoryEntry(address).Owner.add(in_msg.Sender);
- }
- }
-
- action(f_forwardRequest, "f", desc="Forward request to owner") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
- out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
- out_msg.Acks := getDirectoryEntry(address).Sharers.count();
- if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- }
- }
- }
-
- action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
- out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
- out_msg.Acks := getDirectoryEntry(address).Sharers.count();
- if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- }
- }
- }
-
- action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
- peek(requestQueue_in, RequestMsg) {
- if ((getDirectoryEntry(in_msg.addr).Sharers.count() > 1) ||
- ((getDirectoryEntry(in_msg.addr).Sharers.count() > 0) &&
- (getDirectoryEntry(in_msg.addr).Sharers.isElement(in_msg.Requestor) == false))) {
- enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
- // out_msg.Destination := getDirectoryEntry(in_msg.addr).Sharers;
- out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Sharers);
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Invalidate_Control;
- }
- }
- }
- }
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue(clockEdge());
- }
-
- action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue(clockEdge());
- }
-
- action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
- peek(unblockNetwork_in, ResponseMsg) {
- getDirectoryEntry(address).Sharers.add(in_msg.Sender);
- }
- }
-
- action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
- getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
- }
-
- action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
- getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
- assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
- }
-
- action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestQueue_in, RequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
- }
- }
-
- action(qw_queueMemoryWBFromCacheRequest, "qw", desc="Queue off-chip writeback request") {
- peek(requestQueue_in, RequestMsg) {
- if (is_valid(tbe)) {
- queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
- in_msg.DataBlk);
- } else {
- queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
- }
-
- action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt",
- desc="Queue off-chip writeback request") {
- peek(unblockNetwork_in, ResponseMsg) {
- DataBlock DataBlk := in_msg.DataBlk;
- DataBlk.copyPartial(tbe.DataBlk, getOffset(tbe.PhysicalAddress),
- tbe.Len);
- queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
- DataBlk);
- }
- }
-
- action(qw_queueMemoryWBFromDMARequest, "/qw", desc="Queue off-chip writeback request") {
- peek(requestQueue_in, RequestMsg) {
- queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
-
- action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
- requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- out_msg.Type := CoherenceResponseType:DMA_ACK;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
- peek(unblockNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:Directory;
- if (is_valid(tbe)) {
- out_msg.Destination.add(tbe.Requestor);
- }
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- out_msg.Type := CoherenceResponseType:DMA_ACK;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE entry") {
- peek (requestQueue_in, RequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.PhysicalAddress := in_msg.addr;
- tbe.Len := in_msg.Len;
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Requestor := in_msg.Requestor;
- }
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
-
- // TRANSITIONS
- transition(I, GETX, MM) {
- qf_queueMemoryFetchRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(I, DMA_READ, XI_M) {
- qf_queueMemoryFetchRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(I, DMA_WRITE, XI_U) {
- qw_queueMemoryWBFromDMARequest;
- a_sendDMAAck; // ack count may be zero
- i_popIncomingRequestQueue;
- }
-
- transition(XI_M, Memory_Data, I) {
- d_sendDataMsg; // ack count may be zero
- q_popMemQueue;
- }
-
- transition(XI_U, Exclusive_Unblock, I) {
- cc_clearSharers;
- c_clearOwner;
- j_popIncomingUnblockQueue;
- }
-
- transition(S, GETX, MM) {
- qf_queueMemoryFetchRequest;
- g_sendInvalidations;
- i_popIncomingRequestQueue;
- }
-
- transition(S, DMA_READ) {
- //qf_queueMemoryFetchRequest;
- p_fwdDataToDMA;
- //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
- i_popIncomingRequestQueue;
- }
-
- transition(S, DMA_WRITE, XI_U) {
- qw_queueMemoryWBFromDMARequest;
- a_sendDMAAck; // ack count may be zero
- g_sendInvalidations; // the DMA will collect invalidations
- i_popIncomingRequestQueue;
- }
-
- transition(I, GETS, IS) {
- qf_queueMemoryFetchRequest;
- i_popIncomingRequestQueue;
- }
-
- transition({S, SS}, GETS, SS) {
- qf_queueMemoryFetchRequest;
- n_incrementOutstanding;
- i_popIncomingRequestQueue;
- }
-
- transition({I, S}, PUTO) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- transition({I, S, O}, PUTX) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- transition(O, GETX, MM) {
- f_forwardRequest;
- g_sendInvalidations;
- i_popIncomingRequestQueue;
- }
-
- transition(O, DMA_READ, OD) {
- f_forwardRequest; // this will cause the data to go to DMA directly
- //g_sendInvalidations; // this will cause acks to be sent to the DMA
- i_popIncomingRequestQueue;
- }
-
- transition(OD, DMA_ACK, O) {
- j_popIncomingUnblockQueue;
- }
-
- transition({O,M}, DMA_WRITE, OI_D) {
- f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
- g_sendInvalidations; // these go to the DMA Controller
- v_allocateTBE;
- i_popIncomingRequestQueue;
- }
-
- transition(OI_D, Data, XI_U) {
- qw_queueMemoryWBRequestFromMessageAndTBE;
- a_sendDMAAck2; // ack count may be zero
- w_deallocateTBE;
- j_popIncomingUnblockQueue;
- }
-
- transition({O, OO}, GETS, OO) {
- f_forwardRequest;
- n_incrementOutstanding;
- i_popIncomingRequestQueue;
- }
-
- transition(M, GETX, MM) {
- f_forwardRequest;
- i_popIncomingRequestQueue;
- }
-
- // no exclusive unblock will show up to the directory
- transition(M, DMA_READ, MD) {
- f_forwardRequest; // this will cause the data to go to DMA directly
- i_popIncomingRequestQueue;
- }
-
- transition(MD, DMA_ACK, M) {
- j_popIncomingUnblockQueue;
- }
-
- transition(M, GETS, MO) {
- f_forwardRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(M, PUTX, MI) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- // happens if M->O transition happens on-chip
- transition(M, PUTO, MI) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(M, PUTO_SHARERS, MIS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(O, PUTO, OS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(O, PUTO_SHARERS, OSS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
-
- transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
- zz_recycleRequest;
- }
-
- transition({MM, MO}, Exclusive_Unblock, M) {
- cc_clearSharers;
- e_ownerIsUnblocker;
- j_popIncomingUnblockQueue;
- }
-
- transition(MO, Unblock, O) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
- zz_recycleRequest;
- }
-
- transition(IS, GETS) {
- zz_recycleRequest;
- }
-
- transition(IS, Unblock, S) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition(IS, Exclusive_Unblock, M) {
- cc_clearSharers;
- e_ownerIsUnblocker;
- j_popIncomingUnblockQueue;
- }
-
- transition(SS, Unblock) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(SS, Last_Unblock, S) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(OO, Unblock) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(OO, Last_Unblock, O) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(MI, Dirty_Writeback, I) {
- c_clearOwner;
- cc_clearSharers;
- qw_queueMemoryWBFromCacheRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(MIS, Dirty_Writeback, S) {
- c_moveOwnerToSharer;
- qw_queueMemoryWBFromCacheRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(MIS, Clean_Writeback, S) {
- c_moveOwnerToSharer;
- i_popIncomingRequestQueue;
- }
-
- transition(OS, Dirty_Writeback, S) {
- c_clearOwner;
- qw_queueMemoryWBFromCacheRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(OSS, Dirty_Writeback, S) {
- c_moveOwnerToSharer;
- qw_queueMemoryWBFromCacheRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(OSS, Clean_Writeback, S) {
- c_moveOwnerToSharer;
- i_popIncomingRequestQueue;
- }
-
- transition(MI, Clean_Writeback, I) {
- c_clearOwner;
- cc_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- transition(OS, Clean_Writeback, S) {
- c_clearOwner;
- i_popIncomingRequestQueue;
- }
-
- transition({MI, MIS}, Unblock, M) {
- j_popIncomingUnblockQueue;
- }
-
- transition({OS, OSS}, Unblock, O) {
- j_popIncomingUnblockQueue;
- }
-
- transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
- d_sendDataMsg;
- q_popMemQueue;
- }
-
- transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
- //a_sendAck;
- q_popMemQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2019 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Copyright (c) 2009-2013 Mark D. Hill and David A. Wood
- * Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:DMA, "DMA Controller")
- : DMASequencer * dma_sequencer;
- Cycles request_latency := 14;
- Cycles response_latency := 14;
-
- MessageBuffer * responseFromDir, network="From", virtual_network="2",
- vnet_type="response";
-
- MessageBuffer * reqToDir, network="To", virtual_network="1",
- vnet_type="request";
- MessageBuffer * respToDir, network="To", virtual_network="2",
- vnet_type="dmaresponse";
-
- MessageBuffer * mandatoryQueue;
- MessageBuffer * triggerQueue;
-{
- state_declaration(State, desc="DMA states", default="DMA_State_READY") {
- READY, AccessPermission:Invalid, desc="Ready to accept a new request";
- BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
- BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
- }
-
- enumeration(Event, desc="DMA events") {
- ReadRequest, desc="A new read request";
- WriteRequest, desc="A new write request";
- Data, desc="Data from a DMA memory read";
- DMA_Ack, desc="DMA write to memory completed";
- Inv_Ack, desc="Invalidation Ack from a sharer";
- All_Acks, desc="All acks received";
- }
-
- structure(TBE, desc="...") {
- Addr address, desc="Physical address";
- int NumAcks, default="0", desc="Number of Acks pending";
- DataBlock DataBlk, desc="Data";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
- State cur_state;
-
- Tick clockEdge();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- State getState(TBE tbe, Addr addr) {
- return cur_state;
- }
- void setState(TBE tbe, Addr addr, State state) {
- cur_state := state;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("DMA does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("DMA does not support functional write.");
- }
-
- out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
- out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
- out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
-
- in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, rank=2) {
- if (dmaResponseQueue_in.isReady(clockEdge())) {
- peek( dmaResponseQueue_in, ResponseMsg) {
- if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
- trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
- in_msg.Type == CoherenceResponseType:DATA) {
- trigger(Event:Data, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
- } else {
- error("Invalid response type");
- }
- }
- }
- }
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=1) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, rank=0) {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, SequencerMsg) {
- if (in_msg.Type == SequencerRequestType:LD ) {
- trigger(Event:ReadRequest, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == SequencerRequestType:ST) {
- trigger(Event:WriteRequest, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid request type");
- }
- }
- }
- }
-
- action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, RequestMsg, request_latency) {
- out_msg.addr := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:DMA_READ;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:DMA;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, RequestMsg, request_latency) {
- out_msg.addr := in_msg.PhysicalAddress;
- out_msg.Type := CoherenceRequestType:DMA_WRITE;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Requestor := machineID;
- out_msg.RequestorMachine := MachineType:DMA;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
- dma_sequencer.ackCallback(address);
- }
-
- action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
- assert(is_valid(tbe));
- if (tbe.NumAcks == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:ALL_ACKS;
- }
- }
- }
-
- action(u_updateAckCount, "u", desc="Update ack count") {
- peek(dmaResponseQueue_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
- }
- }
-
- action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
- enqueue(respToDirectory_out, ResponseMsg, response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:DMA;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue(clockEdge());
- }
-
- action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(t_updateTBEData, "t", desc="Update TBE Data") {
- peek(dmaResponseQueue_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
- assert(is_valid(tbe));
- dma_sequencer.dataCallback(tbe.DataBlk, address);
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE entry") {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
- stall_and_wait(dmaRequestQueue_in, address);
- }
-
- action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
- wakeUpAllBuffers();
- }
-
- transition(READY, ReadRequest, BUSY_RD) {
- s_sendReadRequest;
- v_allocateTBE;
- p_popRequestQueue;
- }
-
- transition(BUSY_RD, Inv_Ack) {
- u_updateAckCount;
- o_checkForCompletion;
- p_popResponseQueue;
- }
-
- transition(BUSY_RD, Data, READY) {
- t_updateTBEData;
- d_dataCallbackFromTBE;
- w_deallocateTBE;
- //u_updateAckCount;
- //o_checkForCompletion;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition(BUSY_RD, All_Acks, READY) {
- d_dataCallbackFromTBE;
- //u_sendExclusiveUnblockToDir;
- w_deallocateTBE;
- p_popTriggerQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition(READY, WriteRequest, BUSY_WR) {
- s_sendWriteRequest;
- v_allocateTBE;
- p_popRequestQueue;
- }
-
- transition(BUSY_WR, Inv_Ack) {
- u_updateAckCount;
- o_checkForCompletion;
- p_popResponseQueue;
- }
-
- transition(BUSY_WR, DMA_Ack) {
- u_updateAckCount; // actually increases
- o_checkForCompletion;
- p_popResponseQueue;
- }
-
- transition(BUSY_WR, All_Acks, READY) {
- a_ackCallback;
- u_sendExclusiveUnblockToDir;
- w_deallocateTBE;
- p_popTriggerQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
- zz_stallAndWaitRequestQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2019 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- GETX, desc="Get eXclusive";
- GETS, desc="Get Shared";
- PUTX, desc="Put eXclusive";
- PUTO, desc="Put Owned";
- PUTO_SHARERS, desc="Put Owned, but sharers exist so don't remove from sharers list";
- PUTS, desc="Put Shared";
- INV, desc="Invalidation";
- WRITEBACK_CLEAN_DATA, desc="Clean writeback (contains data)";
- WRITEBACK_CLEAN_ACK, desc="Clean writeback (contains no data)";
- WRITEBACK_DIRTY_DATA, desc="Dirty writeback (contains data)";
- DMA_READ, desc="DMA Read";
- DMA_WRITE, desc="DMA Write";
-}
-
-// CoherenceResponseType
-enumeration(CoherenceResponseType, desc="...") {
- ACK, desc="ACKnowledgment, responder doesn't have a copy";
- DATA, desc="Data";
- DATA_EXCLUSIVE, desc="Data, no processor has a copy";
- UNBLOCK, desc="Unblock";
- UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
- WB_ACK, desc="Writeback ack";
- WB_ACK_DATA, desc="Writeback ack";
- WB_NACK, desc="Writeback neg. ack";
- DMA_ACK, desc="Ack that a DMA write completed";
-}
-
-// TriggerType
-enumeration(TriggerType, desc="...") {
- ALL_ACKS, desc="See corresponding event";
-}
-
-// TriggerMsg
-structure(TriggerMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- TriggerType Type, desc="Type of trigger";
-
- bool functionalRead(Packet *pkt) {
- // Trigger message does not hold data
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // Trigger message does not hold data
- return false;
- }
-}
-
-// RequestMsg (and also forwarded requests)
-structure(RequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- int Len, desc="Length of Request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- MachineID Requestor, desc="Node who initiated the request";
- MachineType RequestorMachine, desc="type of component";
- NetDest Destination, desc="Multicast destination mask";
- DataBlock DataBlk, desc="data for the cache line (DMA WRITE request)";
- int Acks, desc="How many acks to expect";
- MessageSizeType MessageSize, desc="size category of the message";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
-
- bool functionalRead(Packet *pkt) {
- // Read only those messages that contain the data
- if (Type == CoherenceRequestType:DMA_READ ||
- Type == CoherenceRequestType:DMA_WRITE ||
- Type == CoherenceRequestType:WRITEBACK_CLEAN_DATA ||
- Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
- return testAndRead(addr, DataBlk, pkt);
- }
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check required since all messages are written
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-// ResponseMsg (and also unblock requests)
-structure(ResponseMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
- MachineID Sender, desc="Node who sent the data";
- MachineType SenderMachine, desc="type of component sending msg";
- NetDest Destination, desc="Node to whom the data is sent";
- DataBlock DataBlk, desc="data for the cache line";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int Acks, desc="How many acks to expect";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- // Read only those messages that contain the data
- if (Type == CoherenceResponseType:DATA ||
- Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- return testAndRead(addr, DataBlk, pkt);
- }
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check required since all messages are written
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
+++ /dev/null
-protocol "MOESI_CMP_directory";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_CMP_directory-msg.sm";
-include "MOESI_CMP_directory-L1cache.sm";
-include "MOESI_CMP_directory-L2cache.sm";
-include "MOESI_CMP_directory-dma.sm";
-include "MOESI_CMP_directory-dir.sm";
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id: MOESI_CMP_token-L1cache.sm 1.22 05/01/19 15:55:39-06:00 beckmann@s0-28.cs.wisc.edu $
- *
- */
-
-machine(MachineType:L1Cache, "Token protocol")
- : Sequencer * sequencer;
- CacheMemory * L1Icache;
- CacheMemory * L1Dcache;
- int l2_select_num_bits;
- int N_tokens;
-
- Cycles l1_request_latency := 2;
- Cycles l1_response_latency := 2;
- int retry_threshold := 1;
- Cycles fixed_timeout_latency := 100;
- Cycles reissue_wakeup_latency := 10;
- Cycles use_timeout_latency := 50;
-
- bool dynamic_timeout_enabled := "True";
- bool no_mig_atomic := "True";
- bool send_evictions;
-
- // Message Queues
- // From this node's L1 cache TO the network
-
- // a local L1 -> this L2 bank
- MessageBuffer * responseFromL1Cache, network="To", virtual_network="4",
- vnet_type="response";
- MessageBuffer * persistentFromL1Cache, network="To", virtual_network="3",
- vnet_type="persistent";
- // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
- MessageBuffer * requestFromL1Cache, network="To", virtual_network="1",
- vnet_type="request";
-
- // To this node's L1 cache FROM the network
-
- // a L2 bank -> this L1
- MessageBuffer * responseToL1Cache, network="From", virtual_network="4",
- vnet_type="response";
- MessageBuffer * persistentToL1Cache, network="From", virtual_network="3",
- vnet_type="persistent";
- // a L2 bank -> this L1
- MessageBuffer * requestToL1Cache, network="From", virtual_network="1",
- vnet_type="request";
-
- MessageBuffer * mandatoryQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
- // Base states
- NP, AccessPermission:Invalid, "NP", desc="Not Present";
- I, AccessPermission:Invalid, "I", desc="Idle";
- S, AccessPermission:Read_Only, "S", desc="Shared";
- O, AccessPermission:Read_Only, "O", desc="Owned";
- M, AccessPermission:Read_Only, "M", desc="Modified (dirty)";
- MM, AccessPermission:Read_Write, "MM", desc="Modified (dirty and locally modified)";
- M_W, AccessPermission:Read_Only, "M^W", desc="Modified (dirty), waiting";
- MM_W, AccessPermission:Read_Write, "MM^W", desc="Modified (dirty and locally modified), waiting";
-
- // Transient States
- IM, AccessPermission:Busy, "IM", desc="Issued GetX";
- SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
- OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
- IS, AccessPermission:Busy, "IS", desc="Issued GetS";
-
- // Locked states
- I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
- S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
- IM_L, AccessPermission:Busy, "IM^L", desc="Invalid, Locked, trying to go to Modified";
- SM_L, AccessPermission:Busy, "SM^L", desc="Shared, Locked, trying to go to Modified";
- IS_L, AccessPermission:Busy, "IS^L", desc="Invalid, Locked, trying to go to Shared";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- Load, desc="Load request from the processor";
- Ifetch, desc="I-fetch request from the processor";
- Store, desc="Store request from the processor";
- Atomic, desc="Atomic request from the processor";
- L1_Replacement, desc="L1 Replacement";
-
- // Responses
- Data_Shared, desc="Received a data message, we are now a sharer";
- Data_Owner, desc="Received a data message, we are now the owner";
- Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
- Ack, desc="Received an ack message";
- Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
-
- // Requests
- Transient_GETX, desc="A GetX from another processor";
- Transient_Local_GETX, desc="A GetX from another processor";
- Transient_GETS, desc="A GetS from another processor";
- Transient_Local_GETS, desc="A GetS from another processor";
- Transient_GETS_Last_Token, desc="A GetS from another processor";
- Transient_Local_GETS_Last_Token, desc="A GetS from another processor";
-
- // Lock/Unlock for distributed
- Persistent_GETX, desc="Another processor has priority to read/write";
- Persistent_GETS, desc="Another processor has priority to read";
- Persistent_GETS_Last_Token, desc="Another processor has priority to read, no more tokens";
- Own_Lock_or_Unlock, desc="This processor now has priority";
-
- // Triggers
- Request_Timeout, desc="Timeout";
- Use_TimeoutStarverX, desc="Timeout";
- Use_TimeoutStarverS, desc="Timeout";
- Use_TimeoutNoStarvers, desc="Timeout";
- Use_TimeoutNoStarvers_NoMig, desc="Timeout Don't Migrate";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int Tokens, desc="The number of tokens we're holding for the line";
- DataBlock DataBlk, desc="data for the block";
- }
-
-
- // TBE fields
- structure(TBE, desc="...") {
- Addr addr, desc="Physical address for this TBE";
- State TBEState, desc="Transient state";
- int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
- Addr PC, desc="Program counter of request";
-
- bool WentPersistent, default="false", desc="Request went persistent";
- bool ExternalResponse, default="false", desc="Response came from an external controller";
- bool IsAtomic, default="false", desc="Request was an atomic request";
-
- AccessType TypeOfAccess, desc="Type of request (used for profiling)";
- Cycles IssueTime, desc="Time the request was issued";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- structure(PersistentTable, external="yes") {
- void persistentRequestLock(Addr, MachineID, AccessType);
- void persistentRequestUnlock(Addr, MachineID);
- bool okToIssueStarving(Addr, MachineID);
- MachineID findSmallest(Addr);
- AccessType typeOfSmallest(Addr);
- void markEntries(Addr);
- bool isLocked(Addr);
- int countStarvingForAddress(Addr);
- int countReadStarvingForAddress(Addr);
- }
-
- Tick clockEdge();
- Tick cyclesToTicks(Cycles c);
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- TBETable L1_TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
-
- bool starving, default="false";
- int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- PersistentTable persistentTable;
- TimerTable useTimerTable;
- TimerTable reissueTimerTable;
-
- int outstandingRequests, default="0";
- int outstandingPersistentRequests, default="0";
-
- // Constant that provides hysteresis for calculated the estimated average
- int averageLatencyHysteresis, default="(8)";
- Cycles averageLatencyCounter,
- default="(Cycles(500) << (*m_averageLatencyHysteresis_ptr))";
-
- Cycles averageLatencyEstimate() {
- DPRINTF(RubySlicc, "%d\n",
- (averageLatencyCounter >> averageLatencyHysteresis));
- return averageLatencyCounter >> averageLatencyHysteresis;
- }
-
- void updateAverageLatencyEstimate(Cycles latency) {
- DPRINTF(RubySlicc, "%d\n", latency);
-
- // By subtracting the current average and then adding the most
- // recent sample, we calculate an estimate of the recent average.
- // If we simply used a running sum and divided by the total number
- // of entries, the estimate of the average would adapt very slowly
- // after the execution has run for a long time.
- // averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
-
- averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
- }
-
- Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
- if(is_valid(L1Dcache_entry)) {
- return L1Dcache_entry;
- }
-
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
- return L1Icache_entry;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
- return L1Dcache_entry;
- }
-
- Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
- return L1Icache_entry;
- }
-
- int getTokens(Entry cache_entry) {
- if (is_valid(cache_entry)) {
- return cache_entry.Tokens;
- }
- return 0;
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
-
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- } else {
- if (persistentTable.isLocked(addr) && (persistentTable.findSmallest(addr) != machineID)) {
- // Not in cache, in persistent table, but this processor isn't highest priority
- return State:I_L;
- } else {
- return State:NP;
- }
- }
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
-
- if (is_valid(tbe)) {
- assert(state != State:I);
- assert(state != State:S);
- assert(state != State:O);
- assert(state != State:MM);
- assert(state != State:M);
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- // Make sure the token count is in range
- assert(cache_entry.Tokens >= 0);
- assert(cache_entry.Tokens <= max_tokens());
- assert(cache_entry.Tokens != (max_tokens() / 2));
-
- if ((state == State:I_L) ||
- (state == State:IM_L) ||
- (state == State:IS_L)) {
- // Make sure we have no tokens in the "Invalid, locked" states
- assert(cache_entry.Tokens == 0);
-
- // Make sure the line is locked
- // assert(persistentTable.isLocked(addr));
-
- // But we shouldn't have highest priority for it
- // assert(persistentTable.findSmallest(addr) != id);
-
- } else if ((state == State:S_L) ||
- (state == State:SM_L)) {
- assert(cache_entry.Tokens >= 1);
- assert(cache_entry.Tokens < (max_tokens() / 2));
-
- // Make sure the line is locked...
- // assert(persistentTable.isLocked(addr));
-
- // ...But we shouldn't have highest priority for it...
- // assert(persistentTable.findSmallest(addr) != id);
-
- // ...And it must be a GETS request
- // assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);
-
- } else {
-
- // If there is an entry in the persistent table of this block,
- // this processor needs to have an entry in the table for this
- // block, and that entry better be the smallest (highest
- // priority). Otherwise, the state should have been one of
- // locked states
-
- //if (persistentTable.isLocked(addr)) {
- // assert(persistentTable.findSmallest(addr) == id);
- //}
- }
-
- // in M and E you have all the tokens
- if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
- assert(cache_entry.Tokens == max_tokens());
- }
-
- // in NP you have no tokens
- if (state == State:NP) {
- assert(cache_entry.Tokens == 0);
- }
-
- // You have at least one token in S-like states
- if (state == State:S || state == State:SM) {
- assert(cache_entry.Tokens > 0);
- }
-
- // You have at least half the token in O-like states
- if (state == State:O && state == State:OM) {
- assert(cache_entry.Tokens > (max_tokens() / 2));
- }
-
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := L1_TBEs[addr];
- if(is_valid(tbe)) {
- return L1Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return L1Cache_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L1Cache_State_to_permission(state));
- }
- }
-
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Load;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Ifetch;
- } else if (type == RubyRequestType:ST) {
- return Event:Store;
- } else if (type == RubyRequestType:ATOMIC) {
- if (no_mig_atomic) {
- return Event:Atomic;
- } else {
- return Event:Store;
- }
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- AccessType cache_request_type_to_access_type(RubyRequestType type) {
- if ((type == RubyRequestType:LD) || (type == RubyRequestType:IFETCH)) {
- return AccessType:Read;
- } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
- return AccessType:Write;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- // NOTE: direct local hits should not call this function
- bool isExternalHit(Addr addr, MachineID sender) {
- if (machineIDToMachineType(sender) == MachineType:L1Cache) {
- return true;
- } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
-
- if (sender == mapAddressToRange(addr, MachineType:L2Cache,
- l2_select_low_bit, l2_select_num_bits, intToID(0))) {
- return false;
- } else {
- return true;
- }
- }
-
- return true;
- }
-
- bool okToIssueStarving(Addr addr, MachineID machineID) {
- return persistentTable.okToIssueStarving(addr, machineID);
- }
-
- void markPersistentEntries(Addr addr) {
- persistentTable.markEntries(addr);
- }
-
- void setExternalResponse(TBE tbe) {
- assert(is_valid(tbe));
- tbe.ExternalResponse := true;
- }
-
- bool IsAtomic(TBE tbe) {
- assert(is_valid(tbe));
- return tbe.IsAtomic;
- }
-
- // ** OUT_PORTS **
- out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
- out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
- out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
- out_port(requestRecycle_out, RequestMsg, requestToL1Cache);
-
- // ** IN_PORTS **
-
- // Use Timer
- in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
- if (useTimerTable_in.isReady(clockEdge())) {
- Addr readyAddress := useTimerTable.nextAddress();
- TBE tbe := L1_TBEs.lookup(readyAddress);
-
- if (persistentTable.isLocked(readyAddress) &&
- (persistentTable.findSmallest(readyAddress) != machineID)) {
- if (persistentTable.typeOfSmallest(readyAddress) == AccessType:Write) {
- trigger(Event:Use_TimeoutStarverX, readyAddress,
- getCacheEntry(readyAddress), tbe);
- } else {
- trigger(Event:Use_TimeoutStarverS, readyAddress,
- getCacheEntry(readyAddress), tbe);
- }
- } else {
- if (no_mig_atomic && IsAtomic(tbe)) {
- trigger(Event:Use_TimeoutNoStarvers_NoMig, readyAddress,
- getCacheEntry(readyAddress), tbe);
- } else {
- trigger(Event:Use_TimeoutNoStarvers, readyAddress,
- getCacheEntry(readyAddress), tbe);
- }
- }
- }
- }
-
- // Reissue Timer
- in_port(reissueTimerTable_in, Addr, reissueTimerTable, rank=4) {
- Tick current_time := clockEdge();
- if (reissueTimerTable_in.isReady(current_time)) {
- Addr addr := reissueTimerTable.nextAddress();
- trigger(Event:Request_Timeout, addr, getCacheEntry(addr),
- L1_TBEs.lookup(addr));
- }
- }
-
- // Persistent Network
- in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
- if (persistentNetwork_in.isReady(clockEdge())) {
- peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
- assert(in_msg.Destination.isElement(machineID));
-
- // Apply the lockdown or unlockdown message to the table
- if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
- } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
- } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
- persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
- } else {
- error("Unexpected message");
- }
-
- // React to the message based on the current state of the table
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs[in_msg.addr];
-
- if (persistentTable.isLocked(in_msg.addr)) {
- if (persistentTable.findSmallest(in_msg.addr) == machineID) {
- // Our Own Lock - this processor is highest priority
- trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- cache_entry, tbe);
- } else {
- if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
- if (getTokens(cache_entry) == 1 ||
- getTokens(cache_entry) == (max_tokens() / 2) + 1) {
- trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
- cache_entry, tbe);
- } else {
- trigger(Event:Persistent_GETS, in_msg.addr,
- cache_entry, tbe);
- }
- } else {
- trigger(Event:Persistent_GETX, in_msg.addr,
- cache_entry, tbe);
- }
- }
- } else {
- // Unlock case - no entries in the table
- trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- cache_entry, tbe);
- }
- }
- }
- }
-
- // Response Network
- in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg, block_on="addr") {
- assert(in_msg.Destination.isElement(machineID));
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs[in_msg.addr];
-
- // Mark TBE flag if response received off-chip. Use this to update average latency estimate
- if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
-
- if (in_msg.Sender == mapAddressToRange(in_msg.addr,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0))) {
-
- // came from an off-chip L2 cache
- if (is_valid(tbe)) {
- // L1_TBEs[in_msg.addr].ExternalResponse := true;
- // profile_offchipL2_response(in_msg.addr);
- }
- }
- else {
- // profile_onchipL2_response(in_msg.addr );
- }
- } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:Directory ) {
- if (is_valid(tbe)) {
- setExternalResponse(tbe);
- // profile_memory_response( in_msg.addr);
- }
- } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
- //if (isLocalProcessor(machineID, in_msg.Sender) == false) {
- //if (is_valid(tbe)) {
- // tbe.ExternalResponse := true;
- // profile_offchipL1_response(in_msg.addr );
- //}
- //}
- //else {
- // profile_onchipL1_response(in_msg.addr );
- //}
- } else {
- error("unexpected SenderMachine");
- }
-
-
- if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
- if (in_msg.Type == CoherenceResponseType:ACK) {
- assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
- trigger(Event:Data_Owner, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Data_Shared, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected message");
- }
- } else {
- if (in_msg.Type == CoherenceResponseType:ACK) {
- assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
- }
-
- // Request Network
- in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, RequestMsg, block_on="addr") {
- assert(in_msg.Destination.isElement(machineID));
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs[in_msg.addr];
-
- if (in_msg.Type == CoherenceRequestType:GETX) {
- if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETX, in_msg.addr,
- cache_entry, tbe);
- }
- else {
- trigger(Event:Transient_GETX, in_msg.addr,
- cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (getTokens(cache_entry) == 1 ||
- getTokens(cache_entry) == (max_tokens() / 2) + 1) {
- if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETS_Last_Token, in_msg.addr,
- cache_entry, tbe);
- }
- else {
- trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
- cache_entry, tbe);
- }
- }
- else {
- if (in_msg.isLocal) {
- trigger(Event:Transient_Local_GETS, in_msg.addr,
- cache_entry, tbe);
- }
- else {
- trigger(Event:Transient_GETS, in_msg.addr,
- cache_entry, tbe);
- }
- }
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Mandatory Queue
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
- // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
-
- TBE tbe := L1_TBEs[in_msg.LineAddress];
-
- if (in_msg.Type == RubyRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
-
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line.
- // We know it can't be in the L2 due to exclusion.
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Icache_entry, tbe);
- } else {
-
- // Check to see if it is in the OTHER L1
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The block is in the wrong L1, try to write it to the L2
- trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Dcache_entry, tbe);
- }
-
- if (L1Icache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Icache_entry, tbe);
- } else {
- // No room in the L1, so we need to make room
- trigger(Event:L1_Replacement,
- L1Icache.cacheProbe(in_msg.LineAddress),
- getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
- }
- }
- } else {
- // *** DATA ACCESS ***
-
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line.
- // We know it can't be in the L2 due to exclusion.
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Dcache_entry, tbe);
- } else {
-
- // Check to see if it is in the OTHER L1
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The block is in the wrong L1, try to write it to the L2
- trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, tbe);
- }
-
- if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Dcache_entry, tbe);
- } else {
- // No room in the L1, so we need to make room
- trigger(Event:L1_Replacement,
- L1Dcache.cacheProbe(in_msg.LineAddress),
- getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
- }
- }
- }
- }
- }
- }
-
- // ACTIONS
-
- action(a_issueReadRequest, "a", desc="Issue GETS") {
- assert(is_valid(tbe));
- if (tbe.IssueCount == 0) {
- // Update outstanding requests
- //profile_outstanding_request(outstandingRequests);
- outstandingRequests := outstandingRequests + 1;
- }
-
- if (tbe.IssueCount >= retry_threshold) {
- // Issue a persistent request if possible
- if (okToIssueStarving(address, machineID) && (starving == false)) {
- enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
-
- //
- // Currently the configuration system limits the system to only one
- // chip. Therefore, if we assume one shared L2 cache, then only one
- // pertinent L2 cache exist.
- //
- //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := tbe.Prefetch;
- out_msg.AccessMode := tbe.AccessMode;
- }
- markPersistentEntries(address);
- starving := true;
-
- if (tbe.IssueCount == 0) {
- //profile_persistent_prediction(address, tbe.TypeOfAccess);
- }
-
- // Update outstanding requests
- //profile_outstanding_persistent_request(outstandingPersistentRequests);
- outstandingPersistentRequests := outstandingPersistentRequests + 1;
-
- // Increment IssueCount
- tbe.IssueCount := tbe.IssueCount + 1;
-
- tbe.WentPersistent := true;
-
- // Do not schedule a wakeup, a persistent requests will always complete
- }
- else {
-
- // We'd like to issue a persistent request, but are not allowed
- // to issue a P.R. right now. This, we do not increment the
- // IssueCount.
-
- // Set a wakeup timer
- reissueTimerTable.set(
- address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
-
- }
- } else {
- // Make a normal request
- enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.RetryNum := tbe.IssueCount;
- if (tbe.IssueCount == 0) {
- out_msg.MessageSize := MessageSizeType:Request_Control;
- } else {
- out_msg.MessageSize := MessageSizeType:Reissue_Control;
- }
- out_msg.Prefetch := tbe.Prefetch;
- out_msg.AccessMode := tbe.AccessMode;
- }
-
- // send to other local L1s, with local bit set
- enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- //
- // Since only one chip, assuming all L1 caches are local
- //
- //out_msg.Destination := getOtherLocalL1IDs(machineID);
- out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.remove(machineID);
-
- out_msg.RetryNum := tbe.IssueCount;
- out_msg.isLocal := true;
- if (tbe.IssueCount == 0) {
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- } else {
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- }
- out_msg.Prefetch := tbe.Prefetch;
- out_msg.AccessMode := tbe.AccessMode;
- }
-
- // Increment IssueCount
- tbe.IssueCount := tbe.IssueCount + 1;
-
- // Set a wakeup timer
-
- if (dynamic_timeout_enabled) {
- reissueTimerTable.set(
- address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
- } else {
- reissueTimerTable.set(
- address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
- }
-
- }
- }
-
- action(b_issueWriteRequest, "b", desc="Issue GETX") {
-
- assert(is_valid(tbe));
- if (tbe.IssueCount == 0) {
- // Update outstanding requests
- //profile_outstanding_request(outstandingRequests);
- outstandingRequests := outstandingRequests + 1;
- }
-
- if (tbe.IssueCount >= retry_threshold) {
- // Issue a persistent request if possible
- if ( okToIssueStarving(address, machineID) && (starving == false)) {
- enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
-
- //
- // Currently the configuration system limits the system to only one
- // chip. Therefore, if we assume one shared L2 cache, then only one
- // pertinent L2 cache exist.
- //
- //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := tbe.Prefetch;
- out_msg.AccessMode := tbe.AccessMode;
- }
- markPersistentEntries(address);
- starving := true;
-
- // Update outstanding requests
- //profile_outstanding_persistent_request(outstandingPersistentRequests);
- outstandingPersistentRequests := outstandingPersistentRequests + 1;
-
- if (tbe.IssueCount == 0) {
- //profile_persistent_prediction(address, tbe.TypeOfAccess);
- }
-
- // Increment IssueCount
- tbe.IssueCount := tbe.IssueCount + 1;
-
- tbe.WentPersistent := true;
-
- // Do not schedule a wakeup, a persistent requests will always complete
- }
- else {
-
- // We'd like to issue a persistent request, but are not allowed
- // to issue a P.R. right now. This, we do not increment the
- // IssueCount.
-
- // Set a wakeup timer
- reissueTimerTable.set(
- address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
- }
-
- } else {
- // Make a normal request
- enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.RetryNum := tbe.IssueCount;
-
- if (tbe.IssueCount == 0) {
- out_msg.MessageSize := MessageSizeType:Request_Control;
- } else {
- out_msg.MessageSize := MessageSizeType:Reissue_Control;
- }
- out_msg.Prefetch := tbe.Prefetch;
- out_msg.AccessMode := tbe.AccessMode;
- }
-
- // send to other local L1s too
- enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- out_msg.isLocal := true;
-
- //
- // Since only one chip, assuming all L1 caches are local
- //
- //out_msg.Destination := getOtherLocalL1IDs(machineID);
- out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.remove(machineID);
-
- out_msg.RetryNum := tbe.IssueCount;
- if (tbe.IssueCount == 0) {
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- } else {
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- }
- out_msg.Prefetch := tbe.Prefetch;
- out_msg.AccessMode := tbe.AccessMode;
- }
-
- // Increment IssueCount
- tbe.IssueCount := tbe.IssueCount + 1;
-
- DPRINTF(RubySlicc, "incremented issue count to %d\n",
- tbe.IssueCount);
-
- // Set a wakeup timer
- if (dynamic_timeout_enabled) {
- reissueTimerTable.set(
- address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
- } else {
- reissueTimerTable.set(
- address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
- }
- }
- }
-
- action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- // FIXME, should use a 3rd vnet
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(c_ownedReplacement, "c", desc="Issue writeback") {
- assert(is_valid(cache_entry));
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.Type := CoherenceResponseType:WB_OWNED;
-
- // always send the data?
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- }
- cache_entry.Tokens := 0;
- }
-
- action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
-
- // don't send writeback if replacing block with no tokens
- assert(is_valid(cache_entry));
- assert (cache_entry.Tokens > 0);
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- // assert(cache_entry.Dirty == false);
- out_msg.Dirty := false;
-
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
- }
- cache_entry.Tokens := 0;
- }
-
- action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- // assert(cache_entry.Dirty == false);
- out_msg.Dirty := false;
-
- // always send the data?
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.Type := CoherenceResponseType:WB_TOKENS;
- }
- }
- cache_entry.Tokens := 0;
- }
-
-
- action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
- assert(is_valid(cache_entry));
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Tokens := 1;
- out_msg.DataBlk := cache_entry.DataBlk;
- // out_msg.Dirty := cache_entry.Dirty;
- out_msg.Dirty := false;
- if (in_msg.isLocal) {
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
- cache_entry.Tokens := cache_entry.Tokens - 1;
- assert(cache_entry.Tokens >= 1);
- }
-
- action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
- assert(is_valid(cache_entry));
- peek(requestNetwork_in, RequestMsg) {
- if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Tokens := N_tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- // out_msg.Dirty := cache_entry.Dirty;
- out_msg.Dirty := false;
- if (in_msg.isLocal) {
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- cache_entry.Tokens := cache_entry.Tokens - N_tokens;
- }
- else if (cache_entry.Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Tokens := 1;
- out_msg.DataBlk := cache_entry.DataBlk;
- // out_msg.Dirty := cache_entry.Dirty;
- out_msg.Dirty := false;
- if (in_msg.isLocal) {
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- cache_entry.Tokens := cache_entry.Tokens - 1;
- }
- }
-// assert(cache_entry.Tokens >= 1);
- }
-
- action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- assert(cache_entry.Tokens > (max_tokens() / 2));
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- if (in_msg.isLocal) {
- out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
- } else {
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
- // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- if (cache_entry.Tokens > (max_tokens() / 2)) {
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- } else {
- out_msg.Type := CoherenceResponseType:ACK;
- }
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
- //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens > 0);
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(cache_entry.Tokens > (max_tokens() / 2));
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- cache_entry.Tokens := 0;
- }
-
- action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
- //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens > 0);
- if (cache_entry.Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- if (cache_entry.Tokens > (max_tokens() / 2)) {
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- } else {
- out_msg.Type := CoherenceResponseType:ACK;
- }
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(cache_entry.Tokens >= 1);
- if (cache_entry.Tokens > N_tokens) {
- out_msg.Tokens := cache_entry.Tokens - N_tokens;
- } else {
- out_msg.Tokens := cache_entry.Tokens - 1;
- }
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- if (cache_entry.Tokens > N_tokens) {
- cache_entry.Tokens := N_tokens;
- } else {
- cache_entry.Tokens := 1;
- }
- }
-
- action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
- //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens > ((max_tokens() / 2) + 1));
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
- out_msg.Tokens := cache_entry.Tokens - N_tokens;
- } else {
- out_msg.Tokens := cache_entry.Tokens - 1;
- }
- assert(out_msg.Tokens > (max_tokens() / 2));
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
- cache_entry.Tokens := N_tokens;
- } else {
- cache_entry.Tokens := 1;
- }
- }
-
- action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens == ((max_tokens() / 2) + 1));
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := cache_entry.Tokens;
- assert(out_msg.Tokens > (max_tokens() / 2));
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- cache_entry.Tokens := 0;
- }
-
- action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
- // assert(persistentTable.isLocked(address));
-
- peek(responseNetwork_in, ResponseMsg) {
- // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- // FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.MessageSize := in_msg.MessageSize;
- }
- }
- }
-
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
-
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- MachineType:L1Cache);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
-
- L1Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- MachineType:L1Cache);
- }
-
- action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.readCallback(address, cache_entry.DataBlk,
- isExternalHit(address, in_msg.Sender),
- machineIDToMachineType(in_msg.Sender));
- }
- }
-
- action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
-
- L1Dcache.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk, false,
- MachineType:L1Cache);
- cache_entry.Dirty := true;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
-
- action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
- address, cache_entry.DataBlk);
- peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.writeCallback(address, cache_entry.DataBlk,
- isExternalHit(address, in_msg.Sender),
- machineIDToMachineType(in_msg.Sender));
- }
- cache_entry.Dirty := true;
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE") {
- check_allocate(L1_TBEs);
- L1_TBEs.allocate(address);
- set_tbe(L1_TBEs[address]);
- tbe.IssueCount := 0;
- peek(mandatoryQueue_in, RubyRequest) {
- tbe.PC := in_msg.ProgramCounter;
- tbe.TypeOfAccess := cache_request_type_to_access_type(in_msg.Type);
- if (in_msg.Type == RubyRequestType:ATOMIC) {
- tbe.IsAtomic := true;
- }
- tbe.Prefetch := in_msg.Prefetch;
- tbe.AccessMode := in_msg.AccessMode;
- }
- tbe.IssueTime := curCycle();
- }
-
- action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
- peek(mandatoryQueue_in, RubyRequest) {
- APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
- }
- }
-
- action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
- if (reissueTimerTable.isSet(address)) {
- reissueTimerTable.unset(address);
- }
- }
-
- action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
- useTimerTable.unset(address);
- }
-
- action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue(clockEdge());
- }
-
- action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
- useTimerTable.set(
- address, clockEdge() + cyclesToTicks(use_timeout_latency));
- }
-
- action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:INV;
- out_msg.Tokens := 0;
- out_msg.Sender := machineID;
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
-
- action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- assert(in_msg.Tokens != 0);
- DPRINTF(RubySlicc, "L1 received tokens for address: %#x, tokens: %d\n",
- in_msg.addr, in_msg.Tokens);
- cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
- DPRINTF(RubySlicc, "%d\n", cache_entry.Tokens);
-
- if (cache_entry.Dirty == false && in_msg.Dirty) {
- cache_entry.Dirty := true;
- }
- }
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
-
- assert(is_valid(tbe));
- if (tbe.WentPersistent) {
- // assert(starving);
- outstandingRequests := outstandingRequests - 1;
- enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
- out_msg.addr := address;
- out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
-
- //
- // Currently the configuration system limits the system to only one
- // chip. Therefore, if we assume one shared L2 cache, then only one
- // pertinent L2 cache exist.
- //
- //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Persistent_Control;
- }
- starving := false;
- }
-
- // Update average latency
- if (tbe.IssueCount <= 1) {
- if (tbe.ExternalResponse) {
- updateAverageLatencyEstimate(curCycle() - tbe.IssueTime);
- }
- }
-
- // Profile
- //if (tbe.WentPersistent) {
- // profile_token_retry(address, tbe.TypeOfAccess, 2);
- //}
- //else {
- // profile_token_retry(address, tbe.TypeOfAccess, 1);
- //}
-
- //profile_token_retry(address, tbe.TypeOfAccess, tbe.IssueCount);
- L1_TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > 0) {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
- out_msg.addr := address;
- if (cache_entry.Tokens > (max_tokens() / 2)) {
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- } else {
- out_msg.Type := CoherenceResponseType:ACK;
- }
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(u_writeDataToCache, "u", desc="Write data to cache") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- if (cache_entry.Dirty == false && in_msg.Dirty) {
- cache_entry.Dirty := in_msg.Dirty;
- }
-
- }
- }
-
- action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
- assert(getTokens(cache_entry) == 0);
- if (L1Dcache.isTagPresent(address)) {
- L1Dcache.deallocate(address);
- } else {
- L1Icache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (is_valid(cache_entry)) {
- } else {
- set_cache_entry(L1Dcache.allocate(address, new Entry));
- }
- }
-
- action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (is_valid(cache_entry)) {
- } else {
- set_cache_entry(L1Icache.allocate(address, new Entry));
- }
- }
-
- action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
- ++L1Icache.demand_misses;
- }
-
- action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
- ++L1Icache.demand_hits;
- }
-
- action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
- ++L1Dcache.demand_misses;
- }
-
- action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
- ++L1Dcache.demand_hits;
- }
-
- action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- assert(cache_entry.DataBlk == in_msg.DataBlk);
- }
- }
-
- action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- peek(mandatoryQueue_in, RubyRequest) {
- APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
- }
- stall_and_wait(mandatoryQueue_in, address);
- }
-
- action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
- wakeUpBuffers(address);
- }
-
- action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
- wakeUpAllBuffers();
- }
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // Transitions for Load/Store/L2_Replacement from transient states
- transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L1_Replacement) {
- ta_traceStalledAddress;
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, {Store, Atomic}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- // Lockdowns
- transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
- l_popPersistentQueue;
- }
-
- // Transitions from NP
- transition(NP, Load, IS) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- a_issueReadRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(NP, Ifetch, IS) {
- pp_allocateL1ICacheBlock;
- i_allocateTBE;
- a_issueReadRequest;
- uu_profileInstMiss;
- k_popMandatoryQueue;
- }
-
- transition(NP, {Store, Atomic}, IM) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- b_issueWriteRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
- bb_bounceResponse;
- n_popResponseQueue;
- }
-
- transition(NP, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) {
- m_popRequestQueue;
- }
-
- transition(NP, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
- l_popPersistentQueue;
- }
-
- // Transitions from Idle
- transition(I, Load, IS) {
- i_allocateTBE;
- a_issueReadRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, Ifetch, IS) {
- i_allocateTBE;
- a_issueReadRequest;
- uu_profileInstMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, {Store, Atomic}, IM) {
- i_allocateTBE;
- b_issueWriteRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(I, L1_Replacement) {
- ta_traceStalledAddress;
- tr_tokenReplacement;
- gg_deallocateL1CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(I, {Transient_GETX, Transient_Local_GETX}) {
- t_sendAckWithCollectedTokens;
- m_popRequestQueue;
- }
-
- transition(I, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
- m_popRequestQueue;
- }
-
- transition(I, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
- e_sendAckWithCollectedTokens;
- l_popPersistentQueue;
- }
-
- transition(I_L, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}) {
- l_popPersistentQueue;
- }
-
- transition(I, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(I, Data_Shared, S) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(I, Data_Owner, O) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(I, Data_All_Tokens, M) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- // Transitions from Shared
- transition({S, SM, S_L, SM_L}, Load) {
- h_load_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition({S, SM, S_L, SM_L}, Ifetch) {
- h_ifetch_hit;
- uu_profileInstHit;
- k_popMandatoryQueue;
- }
-
- transition(S, {Store, Atomic}, SM) {
- i_allocateTBE;
- b_issueWriteRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(S, L1_Replacement, I) {
- ta_traceStalledAddress;
- cc_sharedReplacement; // Only needed in some cases
- forward_eviction_to_cpu;
- gg_deallocateL1CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(S, {Transient_GETX, Transient_Local_GETX}, I) {
- t_sendAckWithCollectedTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- m_popRequestQueue;
- }
-
- // only owner responds to non-local requests
- transition(S, Transient_GETS) {
- m_popRequestQueue;
- }
-
- transition(S, Transient_Local_GETS) {
- d_sendDataWithToken;
- m_popRequestQueue;
- }
-
- transition(S, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
- m_popRequestQueue;
- }
-
- transition({S, S_L}, Persistent_GETX, I_L) {
- e_sendAckWithCollectedTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- l_popPersistentQueue;
- }
-
- transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
- f_sendAckWithAllButNorOneTokens;
- l_popPersistentQueue;
- }
-
- transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
- l_popPersistentQueue;
- }
-
- transition(S, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(S, Data_Shared) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(S, Data_Owner, O) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(S, Data_All_Tokens, M) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- // Transitions from Owned
- transition({O, OM}, Ifetch) {
- h_ifetch_hit;
- uu_profileInstHit;
- k_popMandatoryQueue;
- }
-
- transition({O, OM}, Load) {
- h_load_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(O, {Store, Atomic}, OM) {
- i_allocateTBE;
- b_issueWriteRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(O, L1_Replacement, I) {
- ta_traceStalledAddress;
- c_ownedReplacement;
- forward_eviction_to_cpu
- gg_deallocateL1CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(O, {Transient_GETX, Transient_Local_GETX}, I) {
- dd_sendDataWithAllTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- m_popRequestQueue;
- }
-
- transition(O, Persistent_GETX, I_L) {
- ee_sendDataWithAllTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- l_popPersistentQueue;
- }
-
- transition(O, Persistent_GETS, S_L) {
- ff_sendDataWithAllButNorOneTokens;
- l_popPersistentQueue;
- }
-
- transition(O, Persistent_GETS_Last_Token, I_L) {
- fo_sendDataWithOwnerToken;
- forward_eviction_to_cpu
- l_popPersistentQueue;
- }
-
- transition(O, Transient_GETS) {
- d_sendDataWithToken;
- m_popRequestQueue;
- }
-
- transition(O, Transient_Local_GETS) {
- d_sendDataWithToken;
- m_popRequestQueue;
- }
-
- // ran out of tokens, wait for it to go persistent
- transition(O, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
- m_popRequestQueue;
- }
-
- transition(O, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(O, Ack_All_Tokens, M) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(O, Data_Shared) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(O, Data_All_Tokens, M) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- // Transitions from Modified
- transition({MM, MM_W}, Ifetch) {
- h_ifetch_hit;
- uu_profileInstHit;
- k_popMandatoryQueue;
- }
-
- transition({MM, MM_W}, Load) {
- h_load_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition({MM_W}, {Store, Atomic}) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(MM, Store) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(MM, Atomic, M) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(MM, L1_Replacement, I) {
- ta_traceStalledAddress;
- c_ownedReplacement;
- forward_eviction_to_cpu
- gg_deallocateL1CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(MM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}, I) {
- dd_sendDataWithAllTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- m_popRequestQueue;
- }
-
- transition({MM_W}, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
- m_popRequestQueue;
- }
-
- // Implement the migratory sharing optimization, even for persistent requests
- transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
- ee_sendDataWithAllTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- l_popPersistentQueue;
- }
-
- // ignore persistent requests in lockout period
- transition(MM_W, {Persistent_GETX, Persistent_GETS}) {
- l_popPersistentQueue;
- }
-
- transition(MM_W, Use_TimeoutNoStarvers, MM) {
- s_deallocateTBE;
- jj_unsetUseTimer;
- kd_wakeUpDependents;
- }
-
- transition(MM_W, Use_TimeoutNoStarvers_NoMig, M) {
- s_deallocateTBE;
- jj_unsetUseTimer;
- kd_wakeUpDependents;
- }
-
- // Transitions from Dirty Exclusive
- transition({M, M_W}, Ifetch) {
- h_ifetch_hit;
- uu_profileInstHit;
- k_popMandatoryQueue;
- }
-
- transition({M, M_W}, Load) {
- h_load_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(M, Store, MM) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(M, Atomic) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(M_W, Store, MM_W) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(M_W, Atomic) {
- hh_store_hit;
- uu_profileDataHit;
- k_popMandatoryQueue;
- }
-
- transition(M, L1_Replacement, I) {
- ta_traceStalledAddress;
- c_ownedReplacement;
- forward_eviction_to_cpu
- gg_deallocateL1CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(M, {Transient_GETX, Transient_Local_GETX}, I) {
- dd_sendDataWithAllTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- m_popRequestQueue;
- }
-
- transition(M, Transient_Local_GETS, O) {
- d_sendDataWithToken;
- m_popRequestQueue;
- }
-
- transition(M, Transient_GETS, O) {
- d_sendDataWithNTokenIfAvail;
- m_popRequestQueue;
- }
-
- transition(M_W, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
- m_popRequestQueue;
- }
-
- transition(M, Persistent_GETX, I_L) {
- ee_sendDataWithAllTokens;
- p_informL2AboutTokenLoss;
- forward_eviction_to_cpu
- l_popPersistentQueue;
- }
-
- transition(M, Persistent_GETS, S_L) {
- ff_sendDataWithAllButNorOneTokens;
- l_popPersistentQueue;
- }
-
- // ignore persistent requests in lockout period
- transition(M_W, {Persistent_GETX, Persistent_GETS}) {
- l_popPersistentQueue;
- }
-
- transition(M_W, Use_TimeoutStarverS, S_L) {
- s_deallocateTBE;
- ff_sendDataWithAllButNorOneTokens;
- jj_unsetUseTimer;
- }
-
- // someone unlocked during timeout
- transition(M_W, {Use_TimeoutNoStarvers, Use_TimeoutNoStarvers_NoMig}, M) {
- s_deallocateTBE;
- jj_unsetUseTimer;
- kd_wakeUpDependents;
- }
-
- transition(M_W, Use_TimeoutStarverX, I_L) {
- s_deallocateTBE;
- ee_sendDataWithAllTokens;
- forward_eviction_to_cpu;
- p_informL2AboutTokenLoss;
- jj_unsetUseTimer;
- }
-
- // migratory
- transition(MM_W, {Use_TimeoutStarverX, Use_TimeoutStarverS}, I_L) {
- s_deallocateTBE;
- ee_sendDataWithAllTokens;
- forward_eviction_to_cpu;
- p_informL2AboutTokenLoss;
- jj_unsetUseTimer;
-
- }
-
- // Transient_GETX and Transient_GETS in transient states
- transition(OM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
- m_popRequestQueue; // Even if we have the data, we can pretend we don't have it yet.
- }
-
- transition(IS, {Transient_GETX, Transient_Local_GETX}) {
- t_sendAckWithCollectedTokens;
- m_popRequestQueue;
- }
-
- transition(IS, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
- m_popRequestQueue;
- }
-
- transition(IS, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IS_L) {
- e_sendAckWithCollectedTokens;
- l_popPersistentQueue;
- }
-
- transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
- l_popPersistentQueue;
- }
-
- transition(IM, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IM_L) {
- e_sendAckWithCollectedTokens;
- l_popPersistentQueue;
- }
-
- transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
- l_popPersistentQueue;
- }
-
- transition({SM, SM_L}, Persistent_GETX, IM_L) {
- e_sendAckWithCollectedTokens;
- forward_eviction_to_cpu
- l_popPersistentQueue;
- }
-
- transition(SM, {Persistent_GETS, Persistent_GETS_Last_Token}, SM_L) {
- f_sendAckWithAllButNorOneTokens;
- l_popPersistentQueue;
- }
-
- transition(SM_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
- l_popPersistentQueue;
- }
-
- transition(OM, Persistent_GETX, IM_L) {
- ee_sendDataWithAllTokens;
- forward_eviction_to_cpu
- l_popPersistentQueue;
- }
-
- transition(OM, Persistent_GETS, SM_L) {
- ff_sendDataWithAllButNorOneTokens;
- l_popPersistentQueue;
- }
-
- transition(OM, Persistent_GETS_Last_Token, IM_L) {
- fo_sendDataWithOwnerToken;
- l_popPersistentQueue;
- }
-
- // Transitions from IM/SM
-
- transition({IM, SM}, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(IM, Data_Shared, SM) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(IM, Data_Owner, OM) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(IM, Data_All_Tokens, MM_W) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- xx_external_store_hit;
- o_scheduleUseTimeout;
- j_unsetReissueTimer;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(SM, Data_Shared) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(SM, Data_Owner, OM) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(SM, Data_All_Tokens, MM_W) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- xx_external_store_hit;
- o_scheduleUseTimeout;
- j_unsetReissueTimer;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition({IM, SM}, {Transient_GETX, Transient_Local_GETX}, IM) { // We don't have the data yet, but we might have collected some tokens. We give them up here to avoid livelock
- t_sendAckWithCollectedTokens;
- forward_eviction_to_cpu;
- m_popRequestQueue;
- }
-
- transition({IM, SM}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
- m_popRequestQueue;
- }
-
- transition({IM, SM}, Request_Timeout) {
- j_unsetReissueTimer;
- b_issueWriteRequest;
- }
-
- // Transitions from OM
-
- transition(OM, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(OM, Ack_All_Tokens, MM_W) {
- q_updateTokensFromResponse;
- xx_external_store_hit;
- o_scheduleUseTimeout;
- j_unsetReissueTimer;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(OM, Data_Shared) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(OM, Data_All_Tokens, MM_W) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- xx_external_store_hit;
- o_scheduleUseTimeout;
- j_unsetReissueTimer;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(OM, Request_Timeout) {
- j_unsetReissueTimer;
- b_issueWriteRequest;
- }
-
- // Transitions from IS
-
- transition(IS, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(IS, Data_Shared, S) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- x_external_load_hit;
- s_deallocateTBE;
- j_unsetReissueTimer;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Data_Owner, O) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- x_external_load_hit;
- s_deallocateTBE;
- j_unsetReissueTimer;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Data_All_Tokens, M_W) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- x_external_load_hit;
- o_scheduleUseTimeout;
- j_unsetReissueTimer;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Request_Timeout) {
- j_unsetReissueTimer;
- a_issueReadRequest;
- }
-
- // Transitions from I_L
-
- transition(I_L, Load, IS_L) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- a_issueReadRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- transition(I_L, Ifetch, IS_L) {
- pp_allocateL1ICacheBlock;
- i_allocateTBE;
- a_issueReadRequest;
- uu_profileInstMiss;
- k_popMandatoryQueue;
- }
-
- transition(I_L, {Store, Atomic}, IM_L) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- b_issueWriteRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
-
- // Transitions from S_L
-
- transition(S_L, {Store, Atomic}, SM_L) {
- i_allocateTBE;
- b_issueWriteRequest;
- uu_profileDataMiss;
- k_popMandatoryQueue;
- }
-
- // Other transitions from *_L states
-
- transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS, Transient_GETX, Transient_Local_GETX}) {
- m_popRequestQueue;
- }
-
- transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
- g_bounceResponseToStarver;
- n_popResponseQueue;
- }
-
- transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
- g_bounceResponseToStarver;
- n_popResponseQueue;
- }
-
- transition({I_L, S_L}, Data_All_Tokens) {
- g_bounceResponseToStarver;
- n_popResponseQueue;
- }
-
- transition(IS_L, Request_Timeout) {
- j_unsetReissueTimer;
- a_issueReadRequest;
- }
-
- transition({IM_L, SM_L}, Request_Timeout) {
- j_unsetReissueTimer;
- b_issueWriteRequest;
- }
-
- // Opportunisticly Complete the memory operation in the following
- // cases. Note: these transitions could just use
- // g_bounceResponseToStarver, but if we have the data and tokens, we
- // might as well complete the memory request while we have the
- // chance (and then immediately forward on the data)
-
- transition(IM_L, Data_All_Tokens, MM_W) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- xx_external_store_hit;
- j_unsetReissueTimer;
- o_scheduleUseTimeout;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(SM_L, Data_All_Tokens, S_L) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- xx_external_store_hit;
- ff_sendDataWithAllButNorOneTokens;
- s_deallocateTBE;
- j_unsetReissueTimer;
- n_popResponseQueue;
- }
-
- transition(IS_L, Data_Shared, I_L) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- x_external_load_hit;
- s_deallocateTBE;
- e_sendAckWithCollectedTokens;
- p_informL2AboutTokenLoss;
- j_unsetReissueTimer;
- n_popResponseQueue;
- }
-
- transition(IS_L, Data_Owner, I_L) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- x_external_load_hit;
- ee_sendDataWithAllTokens;
- s_deallocateTBE;
- p_informL2AboutTokenLoss;
- j_unsetReissueTimer;
- n_popResponseQueue;
- }
-
- transition(IS_L, Data_All_Tokens, M_W) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- x_external_load_hit;
- j_unsetReissueTimer;
- o_scheduleUseTimeout;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- // Own_Lock_or_Unlock
-
- transition(I_L, Own_Lock_or_Unlock, I) {
- l_popPersistentQueue;
- kd_wakeUpDependents;
- }
-
- transition(S_L, Own_Lock_or_Unlock, S) {
- l_popPersistentQueue;
- kd_wakeUpDependents;
- }
-
- transition(IM_L, Own_Lock_or_Unlock, IM) {
- l_popPersistentQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS_L, Own_Lock_or_Unlock, IS) {
- l_popPersistentQueue;
- kd_wakeUpDependents;
- }
-
- transition(SM_L, Own_Lock_or_Unlock, SM) {
- l_popPersistentQueue;
- kd_wakeUpDependents;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:L2Cache, "Token protocol")
- : CacheMemory * L2cache;
- int N_tokens;
- Cycles l2_request_latency := 5;
- Cycles l2_response_latency := 5;
- bool filtering_enabled := "True";
-
- // L2 BANK QUEUES
- // From local bank of L2 cache TO the network
-
- // this L2 bank -> a local L1 || mod-directory
- MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
- vnet_type="response";
- // this L2 bank -> mod-directory
- MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
- vnet_type="request";
- // this L2 bank -> a local L1
- MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
- vnet_type="request";
-
-
- // FROM the network to this local bank of L2 cache
-
- // a local L1 || mod-directory -> this L2 bank
- MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
- vnet_type="response";
- MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
- vnet_type="persistent";
- // mod-directory -> this L2 bank
- MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
- vnet_type="request";
- // a local L1 -> this L2 bank
- MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
- vnet_type="request";
-
-{
- // STATES
- state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
- // Base states
- NP, AccessPermission:Invalid, desc="Not Present";
- I, AccessPermission:Invalid, desc="Idle";
- S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
- O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
- M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
-
- // Locked states
- I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
- S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
-
- // Requests
- L1_GETS, desc="local L1 GETS request";
- L1_GETS_Last_Token, desc="local L1 GETS request";
- L1_GETX, desc="local L1 GETX request";
- L1_INV, desc="L1 no longer has tokens";
- Transient_GETX, desc="A GetX from another processor";
- Transient_GETS, desc="A GetS from another processor";
- Transient_GETS_Last_Token, desc="A GetS from another processor";
-
- // events initiated by this L2
- L2_Replacement, desc="L2 Replacement", format="!r";
-
- // events of external L2 responses
-
- // Responses
- Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
- Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
- Writeback_All_Tokens, desc="Received a writeback from L1";
- Writeback_Owned, desc="Received a writeback from L1";
-
-
- Data_Shared, desc="Received a data message, we are now a sharer";
- Data_Owner, desc="Received a data message, we are now the owner";
- Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
- Ack, desc="Received an ack message";
- Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
-
- // Lock/Unlock
- Persistent_GETX, desc="Another processor has priority to read/write";
- Persistent_GETS, desc="Another processor has priority to read";
- Persistent_GETS_Last_Token, desc="Another processor has priority to read";
- Own_Lock_or_Unlock, desc="This processor now has priority";
- }
-
- // TYPES
-
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int Tokens, desc="The number of tokens we're holding for the line";
- DataBlock DataBlk, desc="data for the block";
- }
-
- structure(DirEntry, desc="...", interface="AbstractEntry") {
- Set Sharers, desc="Set of the internal processors that want the block in shared state";
- bool exclusive, default="false", desc="if local exclusive is likely";
- }
-
- structure(PerfectCacheMemory, external="yes") {
- void allocate(Addr);
- void deallocate(Addr);
- DirEntry lookup(Addr);
- bool isTagPresent(Addr);
- }
-
- structure(PersistentTable, external="yes") {
- void persistentRequestLock(Addr, MachineID, AccessType);
- void persistentRequestUnlock(Addr, MachineID);
- MachineID findSmallest(Addr);
- AccessType typeOfSmallest(Addr);
- void markEntries(Addr);
- bool isLocked(Addr);
- int countStarvingForAddress(Addr);
- int countReadStarvingForAddress(Addr);
- }
-
- PersistentTable persistentTable;
- PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
-
- Tick clockEdge();
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
- return cache_entry;
- }
-
- DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
- return localDirectory.lookup(address);
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
- return num_functional_writes;
- }
-
- int getTokens(Entry cache_entry) {
- if (is_valid(cache_entry)) {
- return cache_entry.Tokens;
- } else {
- return 0;
- }
- }
-
- State getState(Entry cache_entry, Addr addr) {
- if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- } else if (persistentTable.isLocked(addr)) {
- return State:I_L;
- } else {
- return State:NP;
- }
- }
-
- void setState(Entry cache_entry, Addr addr, State state) {
-
- if (is_valid(cache_entry)) {
- // Make sure the token count is in range
- assert(cache_entry.Tokens >= 0);
- assert(cache_entry.Tokens <= max_tokens());
- assert(cache_entry.Tokens != (max_tokens() / 2));
-
- // Make sure we have no tokens in L
- if ((state == State:I_L) ) {
- assert(cache_entry.Tokens == 0);
- }
-
- // in M and E you have all the tokens
- if (state == State:M ) {
- assert(cache_entry.Tokens == max_tokens());
- }
-
- // in NP you have no tokens
- if (state == State:NP) {
- assert(cache_entry.Tokens == 0);
- }
-
- // You have at least one token in S-like states
- if (state == State:S ) {
- assert(cache_entry.Tokens > 0);
- }
-
- // You have at least half the token in O-like states
- if (state == State:O ) {
- assert(cache_entry.Tokens > (max_tokens() / 2));
- }
-
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return L2Cache_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L2Cache_State_to_permission(state));
- }
- }
-
- void removeSharer(Addr addr, NodeID id) {
-
- if (localDirectory.isTagPresent(addr)) {
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.remove(id);
- if (dir_entry.Sharers.count() == 0) {
- localDirectory.deallocate(addr);
- }
- }
- }
-
- bool sharersExist(Addr addr) {
- if (localDirectory.isTagPresent(addr)) {
- DirEntry dir_entry := getDirEntry(addr);
- if (dir_entry.Sharers.count() > 0) {
- return true;
- }
- else {
- return false;
- }
- }
- else {
- return false;
- }
- }
-
- bool exclusiveExists(Addr addr) {
- if (localDirectory.isTagPresent(addr)) {
- DirEntry dir_entry := getDirEntry(addr);
- if (dir_entry.exclusive) {
- return true;
- }
- else {
- return false;
- }
- }
- else {
- return false;
- }
- }
-
- // assumes that caller will check to make sure tag is present
- Set getSharers(Addr addr) {
- DirEntry dir_entry := getDirEntry(addr);
- return dir_entry.Sharers;
- }
-
- void setNewWriter(Addr addr, NodeID id) {
- if (localDirectory.isTagPresent(addr) == false) {
- localDirectory.allocate(addr);
- }
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.clear();
- dir_entry.Sharers.add(id);
- dir_entry.exclusive := true;
- }
-
- void addNewSharer(Addr addr, NodeID id) {
- if (localDirectory.isTagPresent(addr) == false) {
- localDirectory.allocate(addr);
- }
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.Sharers.add(id);
- // dir_entry.exclusive := false;
- }
-
- void clearExclusiveBitIfExists(Addr addr) {
- if (localDirectory.isTagPresent(addr)) {
- DirEntry dir_entry := getDirEntry(addr);
- dir_entry.exclusive := false;
- }
- }
-
- // ** OUT_PORTS **
- out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
- out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
- out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
-
-
-
- // ** IN_PORTS **
-
- // Persistent Network
- in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
- if (persistentNetwork_in.isReady(clockEdge())) {
- peek(persistentNetwork_in, PersistentMsg) {
- assert(in_msg.Destination.isElement(machineID));
-
- if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
- } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
- } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
- persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
- } else {
- error("Unexpected message");
- }
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- // React to the message based on the current state of the table
- if (persistentTable.isLocked(in_msg.addr)) {
-
- if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
- if (getTokens(cache_entry) == 1 ||
- getTokens(cache_entry) == (max_tokens() / 2) + 1) {
- trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
- cache_entry);
- } else {
- trigger(Event:Persistent_GETS, in_msg.addr, cache_entry);
- }
- } else {
- trigger(Event:Persistent_GETX, in_msg.addr, cache_entry);
- }
- }
- else {
- trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry);
- }
- }
- }
- }
-
-
- // Request Network
- in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:Transient_GETX, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (getTokens(cache_entry) == 1) {
- trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
- cache_entry);
- }
- else {
- trigger(Event:Transient_GETS, in_msg.addr, cache_entry);
- }
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady(clockEdge())) {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- Entry cache_entry := getCacheEntry(in_msg.addr);
- if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:L1_GETX, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (getTokens(cache_entry) == 1 ||
- getTokens(cache_entry) == (max_tokens() / 2) + 1) {
- trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry);
- }
- else {
- trigger(Event:L1_GETS, in_msg.addr, cache_entry);
- }
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
-
- // Response Network
- in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- Entry cache_entry := getCacheEntry(in_msg.addr);
-
- if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
- if (in_msg.Type == CoherenceResponseType:ACK) {
- assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
- trigger(Event:Data_Owner, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_Shared, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
- in_msg.Type == CoherenceResponseType:WB_OWNED ||
- in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
-
- if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
-
- // either room is available or the block is already present
-
- if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
- assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry);
- }
- else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
- //assert(in_msg.Dirty == false);
- trigger(Event:Writeback_Owned, in_msg.addr, cache_entry);
- }
- }
- else {
- trigger(Event:L2_Replacement,
- L2cache.cacheProbe(in_msg.addr),
- getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
- }
- } else if (in_msg.Type == CoherenceResponseType:INV) {
- trigger(Event:L1_INV, in_msg.addr, cache_entry);
- } else {
- error("Unexpected message");
- }
- } else {
- if (in_msg.Type == CoherenceResponseType:ACK) {
- assert(in_msg.Tokens < (max_tokens() / 2));
- trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
- in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
- in_msg.Type == CoherenceResponseType:WB_OWNED ||
- in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
-
- // either room is available or the block is already present
-
- if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
- assert(in_msg.Dirty == false);
- assert( (getState(cache_entry, in_msg.addr) != State:NP)
- && (getState(cache_entry, in_msg.addr) != State:I) );
- trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
- } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- assert(in_msg.Dirty == false);
- trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
- }
- else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
- trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
- }
- }
- else {
- trigger(Event:L2_Replacement,
- L2cache.cacheProbe(in_msg.addr),
- getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
- }
- } else if (in_msg.Type == CoherenceResponseType:INV) {
- trigger(Event:L1_INV, in_msg.addr, cache_entry);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Unexpected message");
- }
- }
- }
- }
- }
-
-
- // ACTIONS
-
- action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
-
- peek(L1requestNetwork_in, RequestMsg) {
-
- // if this is a retry or no local sharers, broadcast normally
- enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
- out_msg.addr := in_msg.addr;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.RetryNum := in_msg.RetryNum;
-
- //
- // If a statically shared L2 cache, then no other L2 caches can
- // store the block
- //
- //out_msg.Destination.broadcast(MachineType:L2Cache);
- //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
- //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
-
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.AccessMode := in_msg.AccessMode;
- out_msg.Prefetch := in_msg.Prefetch;
- } //enqueue
- // } // if
-
- //profile_filter_action(0);
- } // peek
- } //action
-
-
- action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- // FIXME, should use a 3rd vnet
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(c_cleanReplacement, "c", desc="Issue clean writeback") {
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- cache_entry.Tokens := 0;
- }
- }
-
- action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
- assert(is_valid(cache_entry));
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
-
- if (cache_entry.Dirty) {
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- } else {
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- out_msg.Type := CoherenceResponseType:ACK_OWNER;
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
- peek(requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Tokens := N_tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- cache_entry.Tokens := cache_entry.Tokens - N_tokens;
- }
- else {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Tokens := 1;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- cache_entry.Tokens := cache_entry.Tokens - 1;
- }
- }
- }
-
- action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
- assert(is_valid(cache_entry));
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
- assert(is_valid(cache_entry));
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- cache_entry.Tokens := 0;
- }
-
- action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
- //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens > 0);
- if (cache_entry.Tokens > 1) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens - 1;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- cache_entry.Tokens := 1;
- }
-
- action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
- //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := cache_entry.Tokens - 1;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- cache_entry.Tokens := 1;
- }
-
- action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
- //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- cache_entry.Tokens := 0;
- }
-
-
-
- action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
- // assert(persistentTable.isLocked(address));
- peek(responseNetwork_in, ResponseMsg) {
- // FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.MessageSize := in_msg.MessageSize;
- }
- }
- }
-
- action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
- //assert(persistentTable.isLocked(address));
- peek(responseNetwork_in, ResponseMsg) {
- // FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- } else {
- assert(in_msg.Tokens < (max_tokens() / 2));
- out_msg.Type := CoherenceResponseType:ACK;
- }
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.MessageSize := in_msg.MessageSize;
- }
- }
- }
-
- action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
- // assert(persistentTable.isLocked(address));
- peek(responseNetwork_in, ResponseMsg) {
- // FIXME, should use a 3rd vnet in some cases
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.MessageSize := in_msg.MessageSize;
- }
- }
- }
-
-
- action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
- peek(responseNetwork_in, ResponseMsg) {
- removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender));
- }
- }
-
- action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
- peek(requestNetwork_in, RequestMsg) {
- if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) {
- //profile_filter_action(1);
- DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
- in_msg.RetryNum);
- }
- else {
- enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
- out_msg.addr := in_msg.addr;
- out_msg.Requestor := in_msg.Requestor;
-
- //
- // Currently assuming only one chip so all L1s are local
- //
- //out_msg.Destination := getLocalL1IDs(machineID);
- out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.remove(in_msg.Requestor);
-
- out_msg.Type := in_msg.Type;
- out_msg.isLocal := false;
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- out_msg.AccessMode := in_msg.AccessMode;
- out_msg.Prefetch := in_msg.Prefetch;
- }
- //profile_filter_action(0);
- }
- }
- }
-
- action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens > 0);
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- out_msg.Tokens := 1;
- }
- cache_entry.Tokens := cache_entry.Tokens - 1;
- }
- }
-
- action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
- assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- out_msg.Tokens := cache_entry.Tokens;
- }
- cache_entry.Tokens := 0;
- }
- }
-
- action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
- peek(L1requestNetwork_in, RequestMsg) {
- assert(is_valid(cache_entry));
-// assert(cache_entry.Tokens == max_tokens());
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
- //out_msg.Tokens := max_tokens();
- out_msg.Tokens := cache_entry.Tokens;
- }
- cache_entry.Tokens := 0;
- }
- }
-
- action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue(clockEdge());
- }
-
- action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue(clockEdge());
- }
-
-
- action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- assert(in_msg.Tokens != 0);
- cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
-
- // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
- // may not trigger this action.
- if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
- cache_entry.Dirty := true;
- }
- }
- }
-
- action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
- peek(L1requestNetwork_in, RequestMsg) {
- if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
- if (in_msg.Type == CoherenceRequestType:GETX) {
- setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
- }
- }
- }
- }
-
- action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
- clearExclusiveBitIfExists(address);
- }
-
- action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
- peek(L1requestNetwork_in, RequestMsg) {
- if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
- (is_valid(cache_entry))) {
- L2cache.setMRU(address);
- }
- }
- }
-
- action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > 0) {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
- assert(is_valid(cache_entry));
- if (cache_entry.Tokens > 0) {
- peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- assert(cache_entry.Tokens >= 1);
- out_msg.Tokens := cache_entry.Tokens;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
- cache_entry.Tokens := 0;
- }
-
- action(u_writeDataToCache, "u", desc="Write data to cache") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- if ((cache_entry.Dirty == false) && in_msg.Dirty) {
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
- set_cache_entry(L2cache.allocate(address, new Entry));
- }
-
- action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cache.deallocate(address);
- unset_cache_entry();
- }
-
- action(uu_profileMiss, "\um", desc="Profile the demand miss") {
- ++L2cache.demand_misses;
- }
-
- action(uu_profileHit, "\uh", desc="Profile the demand hit") {
- ++L2cache.demand_hits;
- }
-
- action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
- peek(responseNetwork_in, ResponseMsg) {
- if (in_msg.Type != CoherenceResponseType:ACK &&
- in_msg.Type != CoherenceResponseType:WB_TOKENS) {
- assert(is_valid(cache_entry));
- assert(cache_entry.DataBlk == in_msg.DataBlk);
- }
- }
- }
-
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
-
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
- l_popPersistentQueue;
- }
-
-
- // Transitions from NP
-
- transition(NP, {Transient_GETX, Transient_GETS}) {
- // forward message to local sharers
- r_clearExclusive;
- j_forwardTransientRequestToLocalSharers;
- m_popRequestQueue;
- }
-
-
- transition(NP, {L1_GETS, L1_GETX}) {
- a_broadcastLocalRequest;
- r_markNewSharer;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
- bb_bounceResponse;
- n_popResponseQueue;
- }
-
- transition(NP, Writeback_Shared_Data, S) {
- vv_allocateL2CacheBlock;
- u_writeDataToCache;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(NP, Writeback_Tokens, I) {
- vv_allocateL2CacheBlock;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(NP, Writeback_All_Tokens, M) {
- vv_allocateL2CacheBlock;
- u_writeDataToCache;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(NP, Writeback_Owned, O) {
- vv_allocateL2CacheBlock;
- u_writeDataToCache;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
-
- transition(NP,
- {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
- I_L) {
- l_popPersistentQueue;
- }
-
- // Transitions from Idle
-
- transition(I, {L1_GETS, L1_GETS_Last_Token}) {
- a_broadcastLocalRequest;
- tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
- r_markNewSharer;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(I, L1_GETX) {
- a_broadcastLocalRequest;
- tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
- r_markNewSharer;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(I, L2_Replacement) {
- c_cleanReplacement; // Only needed in some cases
- rr_deallocateL2CacheBlock;
- }
-
- transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
- r_clearExclusive;
- t_sendAckWithCollectedTokens;
- j_forwardTransientRequestToLocalSharers;
- m_popRequestQueue;
- }
-
- transition(I,
- {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
- I_L) {
- e_sendAckWithCollectedTokens;
- l_popPersistentQueue;
- }
-
-
- transition(I, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(I, Data_Shared, S) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(I, Writeback_Shared_Data, S) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(I, Writeback_Tokens) {
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(I, Data_Owner, O) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(I, Writeback_Owned, O) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(I, Data_All_Tokens, M) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
-
- transition(I, Writeback_All_Tokens, M) {
- u_writeDataToCache;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- // Transitions from Shared
-
- transition(S, L2_Replacement, I) {
- c_cleanReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(S, Transient_GETX, I) {
- r_clearExclusive;
- t_sendAckWithCollectedTokens;
- j_forwardTransientRequestToLocalSharers;
- m_popRequestQueue;
- }
-
- transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
- j_forwardTransientRequestToLocalSharers;
- r_clearExclusive;
- m_popRequestQueue;
- }
-
- transition(S, Persistent_GETX, I_L) {
- e_sendAckWithCollectedTokens;
- l_popPersistentQueue;
- }
-
-
- transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
- f_sendAckWithAllButOneTokens;
- l_popPersistentQueue;
- }
-
-
- transition(S, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(S, Data_Shared) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(S, Writeback_Tokens) {
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(S, Writeback_Shared_Data) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
-
- transition(S, Data_Owner, O) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(S, Writeback_Owned, O) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(S, Data_All_Tokens, M) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(S, Writeback_All_Tokens, M) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(S, L1_GETX, I) {
- a_broadcastLocalRequest;
- tt_sendLocalAckWithCollectedTokens;
- r_markNewSharer;
- r_setMRU;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
-
- transition(S, L1_GETS) {
- k_dataFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(S, L1_GETS_Last_Token, I) {
-
- k_dataFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- // Transitions from Owned
-
- transition(O, L2_Replacement, I) {
- cc_dirtyReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(O, Transient_GETX, I) {
- r_clearExclusive;
- dd_sendDataWithAllTokens;
- j_forwardTransientRequestToLocalSharers;
- m_popRequestQueue;
- }
-
- transition(O, Persistent_GETX, I_L) {
- ee_sendDataWithAllTokens;
- l_popPersistentQueue;
- }
-
- transition(O, Persistent_GETS, S_L) {
- ff_sendDataWithAllButOneTokens;
- l_popPersistentQueue;
- }
-
- transition(O, Persistent_GETS_Last_Token, I_L) {
- fa_sendDataWithAllTokens;
- l_popPersistentQueue;
- }
-
- transition(O, Transient_GETS) {
- // send multiple tokens
- r_clearExclusive;
- d_sendDataWithTokens;
- m_popRequestQueue;
- }
-
- transition(O, Transient_GETS_Last_Token) {
- // WAIT FOR IT TO GO PERSISTENT
- r_clearExclusive;
- m_popRequestQueue;
- }
-
- transition(O, Ack) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(O, Ack_All_Tokens, M) {
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(O, Data_Shared) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
-
- transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(O, Data_All_Tokens, M) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- n_popResponseQueue;
- }
-
- transition(O, Writeback_All_Tokens, M) {
- w_assertIncomingDataAndCacheDataMatch;
- q_updateTokensFromResponse;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(O, L1_GETS) {
- k_dataFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(O, L1_GETS_Last_Token, I) {
- k_dataOwnerFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(O, L1_GETX, I) {
- a_broadcastLocalRequest;
- k_dataAndAllTokensFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- // Transitions from M
-
- transition(M, L2_Replacement, I) {
- cc_dirtyReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- // MRM_DEBUG: Give up all tokens even for GETS? ???
- transition(M, {Transient_GETX, Transient_GETS}, I) {
- r_clearExclusive;
- dd_sendDataWithAllTokens;
- m_popRequestQueue;
- }
-
- transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
- ee_sendDataWithAllTokens;
- l_popPersistentQueue;
- }
-
-
- transition(M, L1_GETS, O) {
- k_dataFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(M, L1_GETX, I) {
- k_dataAndAllTokensFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
-
- //Transitions from locked states
-
- transition({I_L, S_L}, Ack) {
- gg_bounceResponseToStarver;
- n_popResponseQueue;
- }
-
- transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
- gg_bounceResponseToStarver;
- n_popResponseQueue;
- }
-
- transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
- gg_bounceWBSharedToStarver;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
- gg_bounceWBOwnedToStarver;
- h_updateFilterFromL1HintOrWB;
- n_popResponseQueue;
- }
-
- transition(S_L, L2_Replacement, I) {
- c_cleanReplacement;
- rr_deallocateL2CacheBlock;
- }
-
- transition(I_L, L2_Replacement, I) {
- rr_deallocateL2CacheBlock;
- }
-
- transition(I_L, Own_Lock_or_Unlock, I) {
- l_popPersistentQueue;
- }
-
- transition(S_L, Own_Lock_or_Unlock, S) {
- l_popPersistentQueue;
- }
-
- transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
- r_clearExclusive;
- m_popRequestQueue;
- }
-
- transition(I_L, {L1_GETX, L1_GETS}) {
- a_broadcastLocalRequest;
- r_markNewSharer;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(S_L, L1_GETX, I_L) {
- a_broadcastLocalRequest;
- tt_sendLocalAckWithCollectedTokens;
- r_markNewSharer;
- r_setMRU;
- uu_profileMiss;
- o_popL1RequestQueue;
- }
-
- transition(S_L, L1_GETS) {
- k_dataFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(S_L, L1_GETS_Last_Token, I_L) {
- k_dataFromL2CacheToL1Requestor;
- r_markNewSharer;
- r_setMRU;
- uu_profileHit;
- o_popL1RequestQueue;
- }
-
- transition(S_L, Persistent_GETX, I_L) {
- e_sendAckWithCollectedTokens;
- l_popPersistentQueue;
- }
-
- transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
- l_popPersistentQueue;
- }
-
- transition(I_L, {Persistent_GETX, Persistent_GETS}) {
- l_popPersistentQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-machine(MachineType:Directory, "Token protocol")
- : DirectoryMemory * directory;
- int l2_select_num_bits;
- Cycles directory_latency := 5;
- bool distributed_persistent := "True";
- Cycles fixed_timeout_latency := 100;
- Cycles reissue_wakeup_latency := 10;
- Cycles to_memory_controller_latency := 1;
-
- // Message Queues from dir to other controllers / network
- MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
- vnet_type="response";
-
- MessageBuffer * responseFromDir, network="To", virtual_network="4",
- vnet_type="response";
-
- MessageBuffer * persistentFromDir, network="To", virtual_network="3",
- vnet_type="persistent";
-
- MessageBuffer * requestFromDir, network="To", virtual_network="1",
- vnet_type="request";
-
- // Message Queues to dir from other controllers / network
- MessageBuffer * responseToDir, network="From", virtual_network="4",
- vnet_type="response";
-
- MessageBuffer * persistentToDir, network="From", virtual_network="3",
- vnet_type="persistent";
-
- MessageBuffer * requestToDir, network="From", virtual_network="2",
- vnet_type="request";
-
- MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
- vnet_type="request";
-
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_O") {
- // Base states
- O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
- NO, AccessPermission:Maybe_Stale, desc="Not Owner";
- L, AccessPermission:Busy, desc="Locked";
-
- // Memory wait states - can block all messages including persistent requests
- O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
- L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
- L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
- DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
- DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
- NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
- O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
- O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
-
- // DMA request transient states - must respond to persistent requests
- O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
- NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
- NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
-
- // DMA request in progress - competing with a CPU persistent request
- DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
- DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
-
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- GETX, desc="A GETX arrives";
- GETS, desc="A GETS arrives";
- Lockdown, desc="A lockdown request arrives";
- Unlockdown, desc="An un-lockdown request arrives";
- Own_Lock_or_Unlock, desc="own lock or unlock";
- Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
- Data_Owner, desc="Data arrive";
- Data_All_Tokens, desc="Data and all tokens";
- Ack_Owner, desc="Owner token arrived without data because it was clean";
- Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
- Tokens, desc="Tokens arrive";
- Ack_All_Tokens, desc="All_Tokens arrive";
- Request_Timeout, desc="A DMA request has timed out";
-
- // Memory Controller
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
-
- // DMA requests
- DMA_READ, desc="A DMA Read memory request";
- DMA_WRITE, desc="A DMA Write memory request";
- DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
-
- // The following state is provided to allow for bandwidth
- // efficient directory-like operation. However all of this state
- // is 'soft state' that does not need to be correct (as long as
- // you're eventually willing to resort to broadcast.)
-
- Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
- Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
- }
-
- structure(PersistentTable, external="yes") {
- void persistentRequestLock(Addr, MachineID, AccessType);
- void persistentRequestUnlock(Addr, MachineID);
- bool okToIssueStarving(Addr, MachineID);
- MachineID findSmallest(Addr);
- AccessType typeOfSmallest(Addr);
- void markEntries(Addr);
- bool isLocked(Addr);
- int countStarvingForAddress(Addr);
- int countReadStarvingForAddress(Addr);
- }
-
- // TBE entries for DMA requests
- structure(TBE, desc="TBE entries for outstanding DMA requests") {
- Addr PhysicalAddress, desc="physical address";
- State TBEState, desc="Transient State";
- DataBlock DataBlk, desc="Current view of the associated address range";
- int Len, desc="...";
- MachineID DmaRequestor, desc="DMA requestor";
- bool WentPersistent, desc="Did the DMA request require a persistent request";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- // ** OBJECTS **
-
- PersistentTable persistentTable;
- TimerTable reissueTimerTable;
-
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- bool starving, default="false";
- int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
-
- Tick clockEdge();
- Tick clockEdge(Cycles c);
- Tick cyclesToTicks(Cycles c);
- void set_tbe(TBE b);
- void unset_tbe();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
-
- if (is_valid(dir_entry)) {
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- State getState(TBE tbe, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else {
- return getDirectoryEntry(addr).DirectoryState;
- }
- }
-
- void setState(TBE tbe, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- getDirectoryEntry(addr).DirectoryState := state;
-
- if (state == State:L || state == State:DW_L || state == State:DR_L) {
- assert(getDirectoryEntry(addr).Tokens == 0);
- }
-
- // We have one or zero owners
- assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
-
- // Make sure the token count is in range
- assert(getDirectoryEntry(addr).Tokens >= 0);
- assert(getDirectoryEntry(addr).Tokens <= max_tokens());
-
- if (state == State:O || state == State:O_W || state == State:O_DW) {
- assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
- // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- return Directory_State_to_permission(tbe.TBEState);
- }
-
- if (directory.isPresent(addr)) {
- DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
-
- bool okToIssueStarving(Addr addr, MachineID machinID) {
- return persistentTable.okToIssueStarving(addr, machineID);
- }
-
- void markPersistentEntries(Addr addr) {
- persistentTable.markEntries(addr);
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- // ** OUT_PORTS **
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
- out_port(requestNetwork_out, RequestMsg, requestFromDir);
- out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
-
- // ** IN_PORTS **
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // Reissue Timer
- in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
- Tick current_time := clockEdge();
- if (reissueTimerTable_in.isReady(current_time)) {
- Addr addr := reissueTimerTable.nextAddress();
- trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
- }
- }
-
- in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady(clockEdge())) {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
- if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
- (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
- trigger(Event:Data_All_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
- trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack_All_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- } else {
- if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
- trigger(Event:Data_Owner, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
- (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
- trigger(Event:Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
- trigger(Event:Ack_Owner, in_msg.addr,
- TBEs[in_msg.addr]);
- } else {
- DPRINTF(RubySlicc, "%s\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
- }
-
- in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
- if (persistentNetwork_in.isReady(clockEdge())) {
- peek(persistentNetwork_in, PersistentMsg) {
- assert(in_msg.Destination.isElement(machineID));
-
- if (distributed_persistent) {
- // Apply the lockdown or unlockdown message to the table
- if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
- } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
- persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
- } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
- persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
- } else {
- error("Invalid message");
- }
-
- // React to the message based on the current state of the table
- if (persistentTable.isLocked(in_msg.addr)) {
- if (persistentTable.findSmallest(in_msg.addr) == machineID) {
- if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
- trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
- } else {
- trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs[in_msg.addr]);
- }
- } else {
- // locked
- trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
- }
- } else {
- // unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
- }
- }
- else {
- if (persistentTable.findSmallest(in_msg.addr) == machineID) {
- if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
- trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
- } else {
- trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs[in_msg.addr]);
- }
- } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
- // locked
- trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
- // locked
- trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
- // unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
- } else {
- error("Invalid message");
- }
- }
- }
- }
- }
-
- in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady(clockEdge())) {
- peek(requestNetwork_in, RequestMsg) {
- assert(in_msg.Destination.isElement(machineID));
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- if (in_msg.Type == DMARequestType:READ) {
- trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == DMARequestType:WRITE) {
- if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
- trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
- } else {
- trigger(Event:DMA_WRITE, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
- }
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(a_sendTokens, "a", desc="Send tokens to requestor") {
- // Only send a message if we have tokens to send
- if (getDirectoryEntry(address).Tokens > 0) {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- getDirectoryEntry(address).Tokens := 0;
- }
- }
-
- action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
- if (okToIssueStarving(address, machineID) && (starving == false)) {
- enqueue(persistentNetwork_out, PersistentMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
-
- //
- // Currently the configuration system limits the system to only one
- // chip. Therefore, if we assume one shared L2 cache, then only one
- // pertinent L2 cache exist.
- //
- //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := PrefetchBit:No;
- out_msg.AccessMode := RubyAccessMode:Supervisor;
- }
- markPersistentEntries(address);
- starving := true;
-
- tbe.WentPersistent := true;
-
- // Do not schedule a wakeup, a persistent requests will always complete
- } else {
-
- // We'd like to issue a persistent request, but are not allowed
- // to issue a P.R. right now. This, we do not increment the
- // IssueCount.
-
- // Set a wakeup timer
- reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
- }
- }
-
- action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- //
- // Assser that we only send message if we don't already have all the tokens
- //
- assert(getDirectoryEntry(address).Tokens != max_tokens());
- enqueue(requestNetwork_out, RequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
-
- //
- // Since only one chip, assuming all L1 caches are local
- //
- out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.RetryNum := 0;
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- out_msg.Prefetch := PrefetchBit:No;
- out_msg.AccessMode := RubyAccessMode:Supervisor;
- }
- }
- }
-
- action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
- if (okToIssueStarving(address, machineID) && (starving == false)) {
- enqueue(persistentNetwork_out, PersistentMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
-
- //
- // Currently the configuration system limits the system to only one
- // chip. Therefore, if we assume one shared L2 cache, then only one
- // pertinent L2 cache exist.
- //
- //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Persistent_Control;
- out_msg.Prefetch := PrefetchBit:No;
- out_msg.AccessMode := RubyAccessMode:Supervisor;
- }
- markPersistentEntries(address);
- starving := true;
-
- tbe.WentPersistent := true;
-
- // Do not schedule a wakeup, a persistent requests will always complete
- } else {
-
- // We'd like to issue a persistent request, but are not allowed
- // to issue a P.R. right now. This, we do not increment the
- // IssueCount.
-
- // Set a wakeup timer
- reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
- }
- }
-
- action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(requestNetwork_out, RequestMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
-
- //
- // Since only one chip, assuming all L1 caches are local
- //
- out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.RetryNum := 0;
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- out_msg.Prefetch := PrefetchBit:No;
- out_msg.AccessMode := RubyAccessMode:Supervisor;
- }
- }
- }
-
- action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
- // Only send a message if we have tokens to send
- if (getDirectoryEntry(address).Tokens > 0) {
- enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := getDirectoryEntry(address).Tokens;
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- getDirectoryEntry(address).Tokens := 0;
- }
- }
-
- action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- assert(getDirectoryEntry(address).Tokens > 0);
- out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- getDirectoryEntry(address).Tokens := 0;
- }
-
- action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getDirectoryEntry(address).Tokens > 0);
- out_msg.Tokens := getDirectoryEntry(address).Tokens;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- getDirectoryEntry(address).Tokens := 0;
- }
-
- action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- assert(getDirectoryEntry(address).Tokens > 0);
- out_msg.Tokens := getDirectoryEntry(address).Tokens;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := false;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- getDirectoryEntry(address).Tokens := 0;
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestNetwork_in, RequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
- }
- }
-
- action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
- queueMemoryRead(persistentTable.findSmallest(address), address,
- to_memory_controller_latency);
- }
-
- action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
- }
- }
-
- action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
-
- action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
- queueMemoryWritePartial(tbe.DmaRequestor, address,
- to_memory_controller_latency, tbe.DataBlk,
- tbe.Len);
- }
-
- action(lr_queueMemoryDmaReadWriteback, "lr",
- desc="Write DMA data from read to memory") {
- peek(responseNetwork_in, ResponseMsg) {
- queueMemoryWrite(machineID, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
-
- action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.DataBlk := in_msg.DataBlk;
- tbe.PhysicalAddress := in_msg.PhysicalAddress;
- tbe.Len := in_msg.Len;
- tbe.DmaRequestor := in_msg.Requestor;
- tbe.WentPersistent := false;
- }
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
-
- if (tbe.WentPersistent) {
- assert(starving);
-
- enqueue(persistentNetwork_out, PersistentMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
-
- //
- // Currently the configuration system limits the system to only one
- // chip. Therefore, if we assume one shared L2 cache, then only one
- // pertinent L2 cache exist.
- //
- //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
-
- out_msg.Destination.add(mapAddressToRange(address,
- MachineType:L2Cache, l2_select_low_bit,
- l2_select_num_bits, intToID(0)));
-
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Persistent_Control;
- }
- starving := false;
- }
-
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
- peek(responseNetwork_in, ResponseMsg) {
- DataBlock DataBlk := tbe.DataBlk;
- tbe.DataBlk := in_msg.DataBlk;
- tbe.DataBlk.copyPartial(DataBlk, getOffset(tbe.PhysicalAddress),
- tbe.Len);
- }
- }
-
- action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Tokens >= 1);
- getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
- }
- }
-
- action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
- assert(getDirectoryEntry(address).Tokens == max_tokens());
- }
-
- action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue(clockEdge());
- }
-
- action(z_recycleRequest, "z", desc="Recycle the request queue") {
- requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
- responseNetwork_in.dequeue(clockEdge());
- }
-
- action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
- responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
- persistentNetwork_in.dequeue(clockEdge());
- }
-
- action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
- }
-
- action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := in_msg.Dirty;
- }
- }
- }
-
- action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
- //
- // currently only support a fixed timeout latency
- //
- if (reissueTimerTable.isSet(address)) {
- reissueTimerTable.unset(address);
- reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
- }
- }
-
- action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
- //
- // currently only support a fixed timeout latency
- //
- reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
- }
-
- action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
- if (reissueTimerTable.isSet(address)) {
- reissueTimerTable.unset(address);
- }
- }
-
- action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
- peek(responseNetwork_in, ResponseMsg) {
- assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
- assert(in_msg.Dirty == false);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
-
- // Bounce the message, but "re-associate" the data and the owner
- // token. In essence we're converting an ACK_OWNER message to a
- // DATA_OWNER message, keeping the number of tokens the same.
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_OWNER;
- out_msg.Sender := machineID;
- out_msg.Destination.add(persistentTable.findSmallest(address));
- out_msg.Tokens := in_msg.Tokens;
- out_msg.Dirty := in_msg.Dirty;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:ACK;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:DATA;
- //
- // we send the entire data block and rely on the dma controller to
- // split it up if need be
- //
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
- peek(responseNetwork_in, ResponseMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:DATA;
- //
- // we send the entire data block and rely on the dma controller to
- // split it up if need be
- //
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- // TRANSITIONS
-
- //
- // Trans. from base state O
- // the directory has valid data
- //
- transition(O, GETX, NO_W) {
- qf_queueMemoryFetchRequest;
- j_popIncomingRequestQueue;
- }
-
- transition(O, DMA_WRITE, O_DW) {
- vd_allocateDmaRequestInTBE;
- bw_broadcastWrite;
- st_scheduleTimeout;
- p_popDmaRequestQueue;
- }
-
- transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
- vd_allocateDmaRequestInTBE;
- ld_queueMemoryDmaWriteFromTbe;
- p_popDmaRequestQueue;
- }
-
- transition(O, GETS, NO_W) {
- qf_queueMemoryFetchRequest;
- j_popIncomingRequestQueue;
- }
-
- transition(O, DMA_READ, O_DR_W) {
- vd_allocateDmaRequestInTBE;
- fd_memoryDma;
- st_scheduleTimeout;
- p_popDmaRequestQueue;
- }
-
- transition(O, Lockdown, L_O_W) {
- qp_queueMemoryForPersistent;
- l_popIncomingPersistentQueue;
- }
-
- transition(O, {Tokens, Ack_All_Tokens}) {
- f_incrementTokens;
- k_popIncomingResponseQueue;
- }
-
- transition(O, {Data_Owner, Data_All_Tokens}) {
- f_incrementTokens;
- k_popIncomingResponseQueue;
- }
-
- transition({O, NO}, Unlockdown) {
- l_popIncomingPersistentQueue;
- }
-
- //
- // transitioning to Owner, waiting for memory before DMA ack
- // All other events should recycle/stall
- //
- transition(O_DR_W, Memory_Data, O) {
- dm_sendMemoryDataToDma;
- ut_unsetReissueTimer;
- s_deallocateTBE;
- l_popMemQueue;
- }
-
- //
- // issued GETX for DMA write, waiting for all tokens
- //
- transition(O_DW, Request_Timeout) {
- ut_unsetReissueTimer;
- px_tryIssuingPersistentGETXRequest;
- }
-
- transition(O_DW, Tokens) {
- f_incrementTokens;
- k_popIncomingResponseQueue;
- }
-
- transition(O_DW, Data_Owner) {
- f_incrementTokens;
- rd_recordDataInTbe;
- k_popIncomingResponseQueue;
- }
-
- transition(O_DW, Ack_Owner) {
- f_incrementTokens;
- k_popIncomingResponseQueue;
- }
-
- transition(O_DW, Lockdown, DW_L) {
- de_sendTbeDataToStarver;
- l_popIncomingPersistentQueue;
- }
-
- transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
- f_incrementTokens;
- rd_recordDataInTbe;
- ld_queueMemoryDmaWriteFromTbe;
- ut_unsetReissueTimer;
- k_popIncomingResponseQueue;
- }
-
- transition(O_DW, Ack_All_Tokens, O_DW_W) {
- f_incrementTokens;
- ld_queueMemoryDmaWriteFromTbe;
- ut_unsetReissueTimer;
- k_popIncomingResponseQueue;
- }
-
- transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
- f_incrementTokens;
- ld_queueMemoryDmaWriteFromTbe;
- ut_unsetReissueTimer;
- k_popIncomingResponseQueue;
- }
-
- transition(O_DW_W, Memory_Ack, O) {
- da_sendDmaAck;
- s_deallocateTBE;
- l_popMemQueue;
- }
-
- //
- // Trans. from NO
- // The direcotry does not have valid data, but may have some tokens
- //
- transition(NO, GETX) {
- a_sendTokens;
- j_popIncomingRequestQueue;
- }
-
- transition(NO, DMA_WRITE, NO_DW) {
- vd_allocateDmaRequestInTBE;
- bw_broadcastWrite;
- st_scheduleTimeout;
- p_popDmaRequestQueue;
- }
-
- transition(NO, GETS) {
- j_popIncomingRequestQueue;
- }
-
- transition(NO, DMA_READ, NO_DR) {
- vd_allocateDmaRequestInTBE;
- br_broadcastRead;
- st_scheduleTimeout;
- p_popDmaRequestQueue;
- }
-
- transition(NO, Lockdown, L) {
- aa_sendTokensToStarver;
- l_popIncomingPersistentQueue;
- }
-
- transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
- f_incrementTokens;
- lq_queueMemoryWbRequest;
- k_popIncomingResponseQueue;
- }
-
- transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
- f_incrementTokens;
- k_popIncomingResponseQueue;
- }
-
- transition(NO, Tokens) {
- f_incrementTokens;
- k_popIncomingResponseQueue;
- }
-
- transition(NO_W, Memory_Data, NO) {
- d_sendMemoryDataWithAllTokens;
- l_popMemQueue;
- }
-
- // Trans. from NO_DW
- transition(NO_DW, Request_Timeout) {
- ut_unsetReissueTimer;
- px_tryIssuingPersistentGETXRequest;
- }
-
- transition(NO_DW, Lockdown, DW_L) {
- aa_sendTokensToStarver;
- l_popIncomingPersistentQueue;
- }
-
- // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
- // Note: NO_DW should not receive the action Ack_All_Tokens because the
- // directory does not have valid data
-
- transition(NO_DW, Data_Owner, O_DW) {
- f_incrementTokens;
- rd_recordDataInTbe;
- k_popIncomingResponseQueue;
- }
-
- transition({NO_DW, NO_DR}, Tokens) {
- f_incrementTokens;
- k_popIncomingResponseQueue;
- }
-
- // Trans. from NO_DR
- transition(NO_DR, Request_Timeout) {
- ut_unsetReissueTimer;
- ps_tryIssuingPersistentGETSRequest;
- }
-
- transition(NO_DR, Lockdown, DR_L) {
- aa_sendTokensToStarver;
- l_popIncomingPersistentQueue;
- }
-
- transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
- f_incrementTokens;
- dd_sendDmaData;
- lr_queueMemoryDmaReadWriteback;
- ut_unsetReissueTimer;
- s_deallocateTBE;
- k_popIncomingResponseQueue;
- }
-
- // Trans. from L
- transition({L, DW_L, DR_L}, {GETX, GETS}) {
- j_popIncomingRequestQueue;
- }
-
- transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
- l_popIncomingPersistentQueue;
- }
-
- //
- // Received data for lockdown blocks
- // For blocks with outstanding dma requests to them
- // ...we could change this to write the data to memory and send it cleanly
- // ...we could also proactively complete our DMA requests
- // However, to keep my mind from spinning out-of-control, we won't for now :)
- //
- transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
- r_bounceResponse;
- k_popIncomingResponseQueue;
- }
-
- transition({DW_L, DR_L, L}, Tokens) {
- r_bounceResponse;
- k_popIncomingResponseQueue;
- }
-
- transition({DW_L, DR_L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
- bd_bounceDatalessOwnerToken;
- k_popIncomingResponseQueue;
- }
-
- transition(L, {Ack_Owner_All_Tokens, Ack_Owner}, L_O_W) {
- f_incrementTokens;
- qp_queueMemoryForPersistent;
- k_popIncomingResponseQueue;
- }
-
- transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
- l_popIncomingPersistentQueue;
- }
-
- transition(L, Own_Lock_or_Unlock_Tokens, O) {
- l_popIncomingPersistentQueue;
- }
-
- transition({L_NO_W, L_O_W}, Memory_Data, L) {
- dd_sendMemDataToStarver;
- l_popMemQueue;
- }
-
- transition(L_O_W, Memory_Ack) {
- qp_queueMemoryForPersistent;
- l_popMemQueue;
- }
-
- transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition(DR_L_W, Memory_Data, DR_L) {
- dd_sendMemDataToStarver;
- l_popMemQueue;
- }
-
- transition(DW_L_W, Memory_Ack, L) {
- aat_assertAllTokens;
- da_sendDmaAck;
- s_deallocateTBE;
- dd_sendMemDataToStarver;
- l_popMemQueue;
- }
-
- transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
- l_popIncomingPersistentQueue;
- }
-
- transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
- ut_unsetReissueTimer;
- px_tryIssuingPersistentGETXRequest;
- }
-
- transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
- l_popIncomingPersistentQueue;
- }
-
- transition(DR_L, Request_Timeout) {
- ut_unsetReissueTimer;
- ps_tryIssuingPersistentGETSRequest;
- }
-
- //
- // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
- // presistent request is issued and resolve before memory returns with data
- //
- transition(O_W, {Memory_Ack, Memory_Data}, O) {
- l_popMemQueue;
- }
-
- transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
- l_popIncomingPersistentQueue;
- }
-
- // Blocked states
- transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
- z_recycleRequest;
- }
-
- transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
- y_recycleDmaRequestQueue;
- }
-
- transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
- kz_recycleResponse;
- }
-
- //
- // If we receive a request timeout while waiting for memory, it is likely that
- // the request will be satisfied and issuing a presistent request will do us
- // no good. Just wait.
- //
- transition({O_DW_W, O_DR_W}, Request_Timeout) {
- rs_resetScheduleTimeout;
- }
-
- transition(NO_W, Lockdown, L_NO_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition(O_W, Lockdown, L_O_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition(O_DR_W, Lockdown, DR_L_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition(O_DW_W, Lockdown, DW_L_W) {
- l_popIncomingPersistentQueue;
- }
-
- transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
- l_popIncomingPersistentQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-machine(MachineType:DMA, "DMA Controller")
- : DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-
- // Messsage Queues
- MessageBuffer * responseFromDir, network="From", virtual_network="5",
- vnet_type="response";
- MessageBuffer * reqToDirectory, network="To", virtual_network="0",
- vnet_type="request";
-
- MessageBuffer * mandatoryQueue;
-{
- state_declaration(State, desc="DMA states", default="DMA_State_READY") {
- READY, AccessPermission:Invalid, desc="Ready to accept a new request";
- BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
- BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
- }
-
- enumeration(Event, desc="DMA events") {
- ReadRequest, desc="A new read request";
- WriteRequest, desc="A new write request";
- Data, desc="Data from a DMA memory read";
- Ack, desc="DMA write to memory completed";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Data";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
-
- TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- State getState(TBE tbe, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else {
- return State:READY;
- }
- }
-
- void setState(TBE tbe, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("DMA does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("DMA does not support functional write.");
- }
-
- out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
-
- in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, SequencerMsg) {
- if (in_msg.Type == SequencerRequestType:LD ) {
- trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == SequencerRequestType:ST) {
- trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid request type");
- }
- }
- }
- }
-
- in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady(clockEdge())) {
- peek( dmaResponseQueue_in, DMAResponseMsg) {
- if (in_msg.Type == DMAResponseType:ACK) {
- trigger(Event:Ack, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == DMAResponseType:DATA) {
- trigger(Event:Data, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid response type");
- }
- }
- }
- }
-
- action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.LineAddress := in_msg.LineAddress;
- out_msg.Type := DMARequestType:READ;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.LineAddress := in_msg.LineAddress;
- out_msg.Type := DMARequestType:WRITE;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
- dma_sequencer.ackCallback(address);
- }
-
- action(d_dataCallback, "d", desc="Write data to dma sequencer") {
- dma_sequencer.dataCallback(tbe.DataBlk, address);
- }
-
- action(t_updateTBEData, "t", desc="Update TBE Data") {
- assert(is_valid(tbe));
- peek(dmaResponseQueue_in, DMAResponseMsg) {
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE entry") {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue(clockEdge());
- }
-
- action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
- stall_and_wait(dmaRequestQueue_in, address);
- }
-
- action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
- wakeUpAllBuffers();
- }
-
- transition(READY, ReadRequest, BUSY_RD) {
- v_allocateTBE;
- s_sendReadRequest;
- p_popRequestQueue;
- }
-
- transition(READY, WriteRequest, BUSY_WR) {
- v_allocateTBE;
- s_sendWriteRequest;
- p_popRequestQueue;
- }
-
- transition(BUSY_RD, Data, READY) {
- t_updateTBEData;
- d_dataCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition(BUSY_WR, Ack, READY) {
- a_ackCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
- zz_stallAndWaitRequestQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- GETX, desc="Get eXclusive";
- GETS, desc="Get Shared";
-}
-
-// PersistentType
-enumeration(PersistentRequestType, desc="...") {
- GETX_PERSISTENT, desc="...";
- GETS_PERSISTENT, desc="...";
- DEACTIVATE_PERSISTENT,desc="...";
-}
-
-// CoherenceResponseType
-enumeration(CoherenceResponseType, desc="...") {
- DATA_OWNER, desc="Data";
- ACK_OWNER, desc="data-less owner token";
- DATA_SHARED, desc="Data";
- ACK, desc="ACKnowledgment";
- WB_TOKENS, desc="L1 to L2 writeback";
- WB_SHARED_DATA, desc="L1 to L2 writeback with data";
- WB_OWNED, desc="L1 to L2 writeback with data";
- INV, desc="L1 informing L2 of loss of all tokens";
-}
-
-// PersistentMsg
-structure(PersistentMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- PersistentRequestType Type, desc="Type of starvation request";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Destination set";
- MessageSizeType MessageSize, desc="size category of the message";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
-
- bool functionalRead(Packet *pkt) {
- // No data in persistent messages
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No data in persistent messages
- return false;
- }
-}
-
-// RequestMsg
-structure(RequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Multicast destination mask";
- bool isLocal, desc="Is this request from a local L1";
- int RetryNum, desc="retry sequence number";
- MessageSizeType MessageSize, desc="size category of the message";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
-
- bool functionalRead(Packet *pkt) {
- // No data in request messages
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // No data in request messages
- return false;
- }
-}
-
-// ResponseMsg
-structure(ResponseMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
- MachineID Sender, desc="Node who sent the data";
- NetDest Destination, desc="Node to whom the data is sent";
- int Tokens, desc="Number of tokens being transfered for this line";
- DataBlock DataBlk, desc="data for the cache line";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- // No check being carried out on the message type. Would be added later.
- return testAndRead(addr, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- // No check required since all messages are written.
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
- READ, desc="Memory Read";
- WRITE, desc="Memory Write";
- NULL, desc="Invalid";
-}
-
-enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
- DATA, desc="DATA read";
- ACK, desc="ACK write";
- NULL, desc="Invalid";
-}
-
-structure(DMARequestMsg, desc="...", interface="Message") {
- DMARequestType Type, desc="Request type (read/write)";
- Addr PhysicalAddress, desc="Physical address for this request";
- Addr LineAddress, desc="Line address for this request";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Destination";
- DataBlock DataBlk, desc="DataBlk attached to this request";
- int Len, desc="The length of the request";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(LineAddress, DataBlk, pkt);
- }
-}
-
-structure(DMAResponseMsg, desc="...", interface="Message") {
- DMAResponseType Type, desc="Response type (DATA/ACK)";
- Addr PhysicalAddress, desc="Physical address for this request";
- Addr LineAddress, desc="Line address for this request";
- NetDest Destination, desc="Destination";
- DataBlock DataBlk, desc="DataBlk attached to this request";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(LineAddress, DataBlk, pkt);
- }
-}
+++ /dev/null
-protocol "MOESI_CMP_token";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_CMP_token-msg.sm";
-include "MOESI_CMP_token-L1cache.sm";
-include "MOESI_CMP_token-L2cache.sm";
-include "MOESI_CMP_token-dir.sm";
-include "MOESI_CMP_token-dma.sm";
+++ /dev/null
-/*
- * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * AMD's contributions to the MOESI hammer protocol do not constitute an
- * endorsement of its similarity to any AMD products.
- *
- * Authors: Milo Martin
- * Brad Beckmann
- */
-
-machine(MachineType:L1Cache, "AMD Hammer-like protocol")
- : Sequencer * sequencer;
- CacheMemory * L1Icache;
- CacheMemory * L1Dcache;
- CacheMemory * L2cache;
- Cycles cache_response_latency := 10;
- Cycles issue_latency := 2;
- Cycles l2_cache_hit_latency := 10;
- bool no_mig_atomic := "True";
- bool send_evictions;
-
- // NETWORK BUFFERS
- MessageBuffer * requestFromCache, network="To", virtual_network="2",
- vnet_type="request";
- MessageBuffer * responseFromCache, network="To", virtual_network="4",
- vnet_type="response";
- MessageBuffer * unblockFromCache, network="To", virtual_network="5",
- vnet_type="unblock";
-
- MessageBuffer * forwardToCache, network="From", virtual_network="3",
- vnet_type="forward";
- MessageBuffer * responseToCache, network="From", virtual_network="4",
- vnet_type="response";
-
- MessageBuffer * mandatoryQueue;
-
- MessageBuffer * triggerQueue;
-{
- // STATES
- state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
- // Base states
- I, AccessPermission:Invalid, desc="Idle";
- S, AccessPermission:Read_Only, desc="Shared";
- O, AccessPermission:Read_Only, desc="Owned";
- M, AccessPermission:Read_Only, desc="Modified (dirty)";
- MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
-
- // Base states, locked and ready to service the mandatory queue
- IR, AccessPermission:Invalid, desc="Idle";
- SR, AccessPermission:Read_Only, desc="Shared";
- OR, AccessPermission:Read_Only, desc="Owned";
- MR, AccessPermission:Read_Only, desc="Modified (dirty)";
- MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
-
- // Transient States
- IM, AccessPermission:Busy, "IM", desc="Issued GetX";
- SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
- OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
- ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
- M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
- MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
- IS, AccessPermission:Busy, "IS", desc="Issued GetS";
- SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
- OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
- MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
- II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
- ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
- OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
- MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
- MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
-
- //Transition States Related to Flushing
- MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
- MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
- IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
- ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
- SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
- OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
- MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
- }
-
- // EVENTS
- enumeration(Event, desc="Cache events") {
- Load, desc="Load request from the processor";
- Ifetch, desc="I-fetch request from the processor";
- Store, desc="Store request from the processor";
- L2_Replacement, desc="L2 Replacement";
- L1_to_L2, desc="L1 to L2 transfer";
- Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
- Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
- Complete_L2_to_L1, desc="L2 to L1 transfer completed";
-
- // Requests
- Other_GETX, desc="A GetX from another processor";
- Other_GETS, desc="A GetS from another processor";
- Merged_GETS, desc="A Merged GetS from another processor";
- Other_GETS_No_Mig, desc="A GetS from another processor";
- NC_DMA_GETS, desc="special GetS when only DMA exists";
- Invalidate, desc="Invalidate block";
-
- // Responses
- Ack, desc="Received an ack message";
- Shared_Ack, desc="Received an ack message, responder has a shared copy";
- Data, desc="Received a data message";
- Shared_Data, desc="Received a data message, responder has a shared copy";
- Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
-
- Writeback_Ack, desc="Writeback O.K. from directory";
- Writeback_Nack, desc="Writeback not O.K. from directory";
-
- // Triggers
- All_acks, desc="Received all required data and message acks";
- All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
-
- // For Flush
- Flush_line, desc="flush the cache line from all caches";
- Block_Ack, desc="the directory is blocked and ready for the flush";
- }
-
- // STRUCTURE DEFINITIONS
- // CacheEntry
- structure(Entry, desc="...", interface="AbstractCacheEntry") {
- State CacheState, desc="cache state";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- DataBlock DataBlk, desc="data for the block";
- bool FromL2, default="false", desc="block just moved from L2";
- bool AtomicAccessed, default="false", desc="block just moved from L2";
- }
-
- // TBE fields
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
- bool Sharers, desc="On a GetS, did we find any other sharers in the system";
- bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
- MachineID LastResponder, desc="last machine to send a response for this request";
- MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
-
- Cycles InitialRequestTime, default="Cycles(0)",
- desc="time the initial requests was sent from the L1Cache";
- Cycles ForwardRequestTime, default="Cycles(0)",
- desc="time the dir forwarded the request";
- Cycles FirstResponseTime, default="Cycles(0)",
- desc="the time the first response was received";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
- if(is_valid(L2cache_entry)) {
- return L2cache_entry;
- }
-
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
- if(is_valid(L1Dcache_entry)) {
- return L1Dcache_entry;
- }
-
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
- return L1Icache_entry;
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- testAndRead(addr, cache_entry.DataBlk, pkt);
- } else {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- error("Missing data block");
- }
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, cache_entry.DataBlk, pkt);
- return num_functional_writes;
- }
-
- TBE tbe := TBEs[addr];
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- return num_functional_writes;
- }
-
- Entry getL2CacheEntry(Addr address), return_by_pointer="yes" {
- Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
- return L2cache_entry;
- }
-
- Entry getL1DCacheEntry(Addr address), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
- return L1Dcache_entry;
- }
-
- Entry getL1ICacheEntry(Addr address), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
- return L1Icache_entry;
- }
-
- State getState(TBE tbe, Entry cache_entry, Addr addr) {
- if(is_valid(tbe)) {
- return tbe.TBEState;
- } else if (is_valid(cache_entry)) {
- return cache_entry.CacheState;
- }
- return State:I;
- }
-
- void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
- assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
- assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
- assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
-
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
-
- if (is_valid(cache_entry)) {
- cache_entry.CacheState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- return L1Cache_State_to_permission(tbe.TBEState);
- }
-
- Entry cache_entry := getCacheEntry(addr);
- if(is_valid(cache_entry)) {
- return L1Cache_State_to_permission(cache_entry.CacheState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Entry cache_entry, Addr addr, State state) {
- if (is_valid(cache_entry)) {
- cache_entry.changePermission(L1Cache_State_to_permission(state));
- }
- }
-
- Event mandatory_request_type_to_event(RubyRequestType type) {
- if (type == RubyRequestType:LD) {
- return Event:Load;
- } else if (type == RubyRequestType:IFETCH) {
- return Event:Ifetch;
- } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
- return Event:Store;
- } else if ((type == RubyRequestType:FLUSH)) {
- return Event:Flush_line;
- } else {
- error("Invalid RubyRequestType");
- }
- }
-
- MachineType testAndClearLocalHit(Entry cache_entry) {
- if (is_valid(cache_entry) && cache_entry.FromL2) {
- cache_entry.FromL2 := false;
- return MachineType:L2Cache;
- }
- return MachineType:L1Cache;
- }
-
- bool IsAtomicAccessed(Entry cache_entry) {
- assert(is_valid(cache_entry));
- return cache_entry.AtomicAccessed;
- }
-
- // ** OUT_PORTS **
- out_port(requestNetwork_out, RequestMsg, requestFromCache);
- out_port(responseNetwork_out, ResponseMsg, responseFromCache);
- out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- // ** IN_PORTS **
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if (in_msg.Type == TriggerType:L2_to_L1) {
- trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_acks, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
- trigger(Event:All_acks_no_sharers, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Nothing from the unblock network
-
- // Response Network
- in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
- if (responseToCache_in.isReady(clockEdge())) {
- peek(responseToCache_in, ResponseMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
- trigger(Event:Shared_Ack, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA) {
- trigger(Event:Data, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Shared_Data, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- trigger(Event:Exclusive_Data, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Forward Network
- in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
- if (forwardToCache_in.isReady(clockEdge())) {
- peek(forwardToCache_in, RequestMsg, block_on="addr") {
-
- Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
-
- if ((in_msg.Type == CoherenceRequestType:GETX) ||
- (in_msg.Type == CoherenceRequestType:GETF)) {
- trigger(Event:Other_GETX, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
- trigger(Event:Merged_GETS, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:GETS) {
- if (machineCount(MachineType:L1Cache) > 1) {
- if (is_valid(cache_entry)) {
- if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
- trigger(Event:Other_GETS_No_Mig, in_msg.addr, cache_entry, tbe);
- } else {
- trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
- }
- } else {
- trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
- }
- } else {
- trigger(Event:NC_DMA_GETS, in_msg.addr, cache_entry, tbe);
- }
- } else if (in_msg.Type == CoherenceRequestType:INV) {
- trigger(Event:Invalidate, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
- trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
- trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
- trigger(Event:Block_Ack, in_msg.addr, cache_entry, tbe);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // Nothing from the request network
-
- // Mandatory Queue
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady(clockEdge())) {
- peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
-
- // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := TBEs[in_msg.LineAddress];
-
- if (in_msg.Type == RubyRequestType:IFETCH) {
- // ** INSTRUCTION ACCESS ***
-
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line.
- // We know it can't be in the L2 due to exclusion
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Icache_entry, tbe);
- } else {
- // Check to see if it is in the OTHER L1
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The block is in the wrong L1, try to write it to the L2
- if (L2cache.cacheAvail(in_msg.LineAddress)) {
- trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
- } else {
- Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L2_Replacement,
- l2_victim_addr,
- getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
- }
- }
-
- if (L1Icache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1
-
- Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
- if (is_valid(L2cache_entry)) {
- // L2 has it (maybe not with the right permissions)
- trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
- L2cache_entry, tbe);
- } else {
- // We have room, the L2 doesn't have it, so the L1 fetches the line
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Icache_entry, tbe);
- }
- } else {
- // No room in the L1, so we need to make room
- // Check if the line we want to evict is not locked
- Addr l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, l1i_victim_addr);
- if (L2cache.cacheAvail(l1i_victim_addr)) {
- // The L2 has room, so we move the line from the L1 to the L2
- trigger(Event:L1_to_L2,
- l1i_victim_addr,
- getL1ICacheEntry(l1i_victim_addr),
- TBEs[l1i_victim_addr]);
- } else {
- Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
- // The L2 does not have room, so we replace a line from the L2
- trigger(Event:L2_Replacement,
- l2_victim_addr,
- getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
- }
- }
- }
- } else {
- // *** DATA ACCESS ***
-
- Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
- if (is_valid(L1Dcache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line.
- // We know it can't be in the L2 due to exclusion
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Dcache_entry, tbe);
- } else {
-
- // Check to see if it is in the OTHER L1
- Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
- if (is_valid(L1Icache_entry)) {
- // The block is in the wrong L1, try to write it to the L2
- if (L2cache.cacheAvail(in_msg.LineAddress)) {
- trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
- } else {
- Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
- trigger(Event:L2_Replacement,
- l2_victim_addr,
- getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
- }
- }
-
- if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1
- Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
- if (is_valid(L2cache_entry)) {
- // L2 has it (maybe not with the right permissions)
- trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
- L2cache_entry, tbe);
- } else {
- // We have room, the L2 doesn't have it, so the L1 fetches the line
- trigger(mandatory_request_type_to_event(in_msg.Type),
- in_msg.LineAddress, L1Dcache_entry, tbe);
- }
- } else {
- // No room in the L1, so we need to make room
- // Check if the line we want to evict is not locked
- Addr l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
- check_on_cache_probe(mandatoryQueue_in, l1d_victim_addr);
- if (L2cache.cacheAvail(l1d_victim_addr)) {
- // The L2 has room, so we move the line from the L1 to the L2
- trigger(Event:L1_to_L2,
- l1d_victim_addr,
- getL1DCacheEntry(l1d_victim_addr),
- TBEs[l1d_victim_addr]);
- } else {
- Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
- // The L2 does not have room, so we replace a line from the L2
- trigger(Event:L2_Replacement,
- l2_victim_addr,
- getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
- }
- }
- }
- }
- }
- }
- }
-
- // ACTIONS
-
- action(a_issueGETS, "a", desc="Issue GETS") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
-
- // One from each other cache (n-1) plus the memory (+1)
- tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
- }
- }
-
- action(b_issueGETX, "b", desc="Issue GETX") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
-
- // One from each other cache (n-1) plus the memory (+1)
- tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
- }
- }
-
- action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
- if (machineCount(MachineType:L1Cache) > 1) {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
- }
- }
-
- // One from each other cache (n-1) plus the memory (+1)
- tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
- }
-
- action(bf_issueGETF, "bf", desc="Issue GETF") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETF;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := curCycle();
-
- // One from each other cache (n-1) plus the memory (+1)
- tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
- }
- }
-
- action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- if (in_msg.DirectedProbe) {
- out_msg.Acks := machineCount(MachineType:L1Cache);
- } else {
- out_msg.Acks := 2;
- }
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (in_msg.DirectedProbe) {
- out_msg.Acks := machineCount(MachineType:L1Cache);
- } else {
- out_msg.Acks := 2;
- }
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(d_issuePUT, "d", desc="Issue PUT") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUT;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(df_issuePUTF, "df", desc="Issue PUTF") {
- enqueue(requestNetwork_out, RequestMsg, issue_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:PUTF;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(e_sendData, "e", desc="Send data from cache to requestor") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- if (in_msg.DirectedProbe) {
- out_msg.Acks := machineCount(MachineType:L1Cache);
- } else {
- out_msg.Acks := 2;
- }
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
- if (in_msg.DirectedProbe) {
- out_msg.Acks := machineCount(MachineType:L1Cache);
- } else {
- out_msg.Acks := 2;
- }
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
- if (in_msg.DirectedProbe) {
- out_msg.Acks := machineCount(MachineType:L1Cache);
- } else {
- out_msg.Acks := 2;
- }
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination := in_msg.MergedRequestors;
- out_msg.DataBlk := cache_entry.DataBlk;
- out_msg.Dirty := cache_entry.Dirty;
- DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
- out_msg.Acks := machineCount(MachineType:L1Cache);
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination := in_msg.MergedRequestors;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
- out_msg.Acks := machineCount(MachineType:L1Cache);
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(f_sendAck, "f", desc="Send ack from cache to requestor") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Acks := 1;
- out_msg.SilentAcks := in_msg.SilentAcks;
- assert(in_msg.DirectedProbe == false);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Acks := 1;
- out_msg.SilentAcks := in_msg.SilentAcks;
- assert(in_msg.DirectedProbe == false);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(g_sendUnblock, "g", desc="Send unblock to memory") {
- enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
- action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
- enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCKM;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
- action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
- enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:UNBLOCKS;
- out_msg.Sender := machineID;
- out_msg.CurOwner := tbe.CurOwner;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Unblock_Control;
- }
- }
-
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- testAndClearLocalHit(cache_entry));
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- testAndClearLocalHit(cache_entry));
- }
-
- action(hx_external_load_hit, "hx", desc="load required external msgs") {
- assert(is_valid(cache_entry));
- assert(is_valid(tbe));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.readCallback(address, cache_entry.DataBlk, true,
- machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
- tbe.ForwardRequestTime, tbe.FirstResponseTime);
- }
- }
-
- action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- peek(mandatoryQueue_in, RubyRequest) {
- L1Dcache.setMRU(cache_entry);
- sequencer.writeCallback(address, cache_entry.DataBlk, false,
- testAndClearLocalHit(cache_entry));
-
- cache_entry.Dirty := true;
- if (in_msg.Type == RubyRequestType:ATOMIC) {
- cache_entry.AtomicAccessed := true;
- }
- }
- }
-
- action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
- assert(is_valid(tbe));
- DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
- sequencer.writeCallback(address, tbe.DataBlk, false, MachineType:L1Cache);
- }
-
- action(sx_external_store_hit, "sx", desc="store required external msgs.") {
- assert(is_valid(cache_entry));
- assert(is_valid(tbe));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.writeCallback(address, cache_entry.DataBlk, true,
- machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
- tbe.ForwardRequestTime, tbe.FirstResponseTime);
- }
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- cache_entry.Dirty := true;
- }
-
- action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
- assert(is_valid(cache_entry));
- assert(is_valid(tbe));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
- sequencer.writeCallback(address, cache_entry.DataBlk, true,
- machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
- tbe.ForwardRequestTime, tbe.FirstResponseTime);
-
- cache_entry.Dirty := true;
- }
-
- action(i_allocateTBE, "i", desc="Allocate TBE") {
- check_allocate(TBEs);
- assert(is_valid(cache_entry));
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
- tbe.Dirty := cache_entry.Dirty;
- tbe.Sharers := false;
- }
-
- action(it_allocateTBE, "it", desc="Allocate TBE") {
- check_allocate(TBEs);
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.Dirty := false;
- tbe.Sharers := false;
- }
-
- action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue(clockEdge());
- }
-
- action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
- forwardToCache_in.dequeue(clockEdge());
- }
-
- action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
- assert(is_valid(cache_entry));
- assert(is_valid(tbe));
- cache_entry.Dirty := tbe.Dirty;
- cache_entry.DataBlk := tbe.DataBlk;
- }
-
- action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
- assert(is_valid(cache_entry));
- assert(is_valid(tbe));
- cache_entry.Dirty := tbe.Dirty;
- cache_entry.DataBlk := tbe.DataBlk;
- cache_entry.FromL2 := true;
- }
-
- action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
- peek(responseToCache_in, ResponseMsg) {
- assert(in_msg.Acks >= 0);
- assert(is_valid(tbe));
- DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
- DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
- if (tbe.AppliedSilentAcks == false) {
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
- tbe.AppliedSilentAcks := true;
- }
- DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
- DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
- APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- tbe.LastResponder := in_msg.Sender;
- if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
- assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
- }
- if (in_msg.InitialRequestTime != zero_time()) {
- tbe.InitialRequestTime := in_msg.InitialRequestTime;
- }
- if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
- assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
- }
- if (in_msg.ForwardRequestTime != zero_time()) {
- tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- if (tbe.FirstResponseTime == zero_time()) {
- tbe.FirstResponseTime := curCycle();
- }
- }
- }
- action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
- peek(responseToCache_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.CurOwner := in_msg.Sender;
- }
- }
-
- action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToCache_in.dequeue(clockEdge());
- }
-
- action(ll_L2toL1Transfer, "ll", desc="") {
- enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:L2_to_L1;
- }
- }
-
- action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
- assert(is_valid(tbe));
- if (tbe.NumPendingMsgs == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- if (tbe.Sharers) {
- out_msg.Type := TriggerType:ALL_ACKS;
- } else {
- out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
- }
- }
- }
- }
-
- action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
- assert(is_valid(tbe));
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
- }
-
- action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
- assert(is_valid(tbe));
- tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
- }
-
- action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
- peek(forwardToCache_in, RequestMsg) {
- assert(in_msg.Requestor != machineID);
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (in_msg.DirectedProbe) {
- out_msg.Acks := machineCount(MachineType:L1Cache);
- } else {
- out_msg.Acks := 2;
- }
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
- peek(forwardToCache_in, RequestMsg) {
- assert(in_msg.Requestor != machineID);
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (in_msg.DirectedProbe) {
- out_msg.Acks := machineCount(MachineType:L1Cache);
- } else {
- out_msg.Acks := 2;
- }
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
- peek(forwardToCache_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:DATA_SHARED;
- out_msg.Sender := machineID;
- out_msg.Destination := in_msg.MergedRequestors;
- DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- out_msg.Acks := machineCount(MachineType:L1Cache);
- out_msg.SilentAcks := in_msg.SilentAcks;
- out_msg.MessageSize := MessageSizeType:Response_Data;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
- }
- }
- }
-
- action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
- enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Dirty) {
- out_msg.Type := CoherenceResponseType:WB_DIRTY;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.Type := CoherenceResponseType:WB_CLEAN;
- // NOTE: in a real system this would not send data. We send
- // data here only so we can check it at the memory
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(r_setSharerBit, "r", desc="We saw other sharers") {
- assert(is_valid(tbe));
- tbe.Sharers := true;
- }
-
- action(s_deallocateTBE, "s", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
- enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Sender := machineID;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Dirty := tbe.Dirty;
- if (tbe.Dirty) {
- out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Writeback_Data;
- } else {
- out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
- // NOTE: in a real system this would not send data. We send
- // data here only so we can check it at the memory
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(u_writeDataToCache, "u", desc="Write data to cache") {
- peek(responseToCache_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty;
- }
- }
-
- action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
- peek(responseToCache_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty;
- }
- }
-
- action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
- peek(responseToCache_in, ResponseMsg) {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
- cache_entry.DataBlk, in_msg.DataBlk);
- assert(cache_entry.DataBlk == in_msg.DataBlk);
- cache_entry.DataBlk := in_msg.DataBlk;
- cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
- }
- }
-
- action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
- peek(responseToCache_in, ResponseMsg) {
- assert(is_valid(tbe));
- DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
- tbe.DataBlk, in_msg.DataBlk);
- assert(tbe.DataBlk == in_msg.DataBlk);
- tbe.DataBlk := in_msg.DataBlk;
- tbe.Dirty := in_msg.Dirty || tbe.Dirty;
- }
- }
-
- action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
- if (L1Dcache.isTagPresent(address)) {
- L1Dcache.deallocate(address);
- } else {
- L1Icache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1Dcache.allocate(address, new Entry));
- }
- }
-
- action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
- if (is_invalid(cache_entry)) {
- set_cache_entry(L1Icache.allocate(address, new Entry));
- }
- }
-
- action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
- set_cache_entry(L2cache.allocate(address, new Entry));
- }
-
- action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cache.deallocate(address);
- unset_cache_entry();
- }
-
- action(gr_deallocateCacheBlock, "\gr", desc="Deallocate an L1 or L2 cache block.") {
- if (L1Dcache.isTagPresent(address)) {
- L1Dcache.deallocate(address);
- }
- else if (L1Icache.isTagPresent(address)){
- L1Icache.deallocate(address);
- }
- else {
- assert(L2cache.isTagPresent(address));
- L2cache.deallocate(address);
- }
- unset_cache_entry();
- }
-
- action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
- if (send_evictions) {
- DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
- sequencer.evictionCallback(address);
- }
- }
-
- action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
- ++L1Dcache.demand_misses;
- }
-
- action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
- ++L1Dcache.demand_hits;
- }
-
- action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
- ++L1Icache.demand_misses;
- }
-
- action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
- ++L1Icache.demand_hits;
- }
-
- action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
- ++L2cache.demand_misses;
- }
-
- action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
- ++L2cache.demand_hits;
- }
-
- action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- stall_and_wait(mandatoryQueue_in, address);
- }
-
- action(z_stall, "z", desc="stall") {
- // do nothing and the special z_stall action will return a protocol stall
- // so that the next port is checked
- }
-
- action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
- wakeUpBuffers(address);
- }
-
- action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
- wakeUpAllBuffers();
- }
-
- //*****************************************************
- // TRANSITIONS
- //*****************************************************
-
- // Transitions for Load/Store/L2_Replacement from transient states
- transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({IM, IS, OI, MI, II, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({MI_F, MM_F}, {Store}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({MM_F, MI_F}, {Flush_line}) {
- zz_stallAndWaitMandatoryQueue;
- }
-
- transition({ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
- z_stall;
- }
-
- transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
- z_stall;
- }
-
- // Transitions moving data between the L1 and L2 caches
- transition({S, O, M, MM}, L1_to_L2) {
- i_allocateTBE;
- gg_deallocateL1CacheBlock;
- vv_allocateL2CacheBlock;
- hp_copyFromTBEToL2;
- s_deallocateTBE;
- }
-
- transition(S, Trigger_L2_to_L1D, ST) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- ii_allocateL1DCacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(O, Trigger_L2_to_L1D, OT) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- ii_allocateL1DCacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(M, Trigger_L2_to_L1D, MT) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- ii_allocateL1DCacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(MM, Trigger_L2_to_L1D, MMT) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- ii_allocateL1DCacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(S, Trigger_L2_to_L1I, ST) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- jj_allocateL1ICacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(O, Trigger_L2_to_L1I, OT) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- jj_allocateL1ICacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(M, Trigger_L2_to_L1I, MT) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- jj_allocateL1ICacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(MM, Trigger_L2_to_L1I, MMT) {
- i_allocateTBE;
- rr_deallocateL2CacheBlock;
- jj_allocateL1ICacheBlock;
- nb_copyFromTBEToL1;
- s_deallocateTBE;
- zz_stallAndWaitMandatoryQueue;
- ll_L2toL1Transfer;
- }
-
- transition(ST, Complete_L2_to_L1, SR) {
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- transition(OT, Complete_L2_to_L1, OR) {
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- transition(MT, Complete_L2_to_L1, MR) {
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- transition(MMT, Complete_L2_to_L1, MMR) {
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from Idle
- transition({I,IR}, Load, IS) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileL1DataMiss;
- uu_profileL2Miss;
- k_popMandatoryQueue;
- }
-
- transition({I,IR}, Ifetch, IS) {
- jj_allocateL1ICacheBlock;
- i_allocateTBE;
- a_issueGETS;
- uu_profileL1InstMiss;
- uu_profileL2Miss;
- k_popMandatoryQueue;
- }
-
- transition({I,IR}, Store, IM) {
- ii_allocateL1DCacheBlock;
- i_allocateTBE;
- b_issueGETX;
- uu_profileL1DataMiss;
- uu_profileL2Miss;
- k_popMandatoryQueue;
- }
-
- transition({I, IR}, Flush_line, IM_F) {
- it_allocateTBE;
- bf_issueGETF;
- k_popMandatoryQueue;
- }
-
- transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- // Transitions from Shared
- transition({S, SM, ISM}, Load) {
- h_load_hit;
- uu_profileL1DataHit;
- k_popMandatoryQueue;
- }
-
- transition({S, SM, ISM}, Ifetch) {
- h_ifetch_hit;
- uu_profileL1InstHit;
- k_popMandatoryQueue;
- }
-
- transition(SR, Load, S) {
- h_load_hit;
- uu_profileL1DataMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition(SR, Ifetch, S) {
- h_ifetch_hit;
- uu_profileL1InstMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition({S,SR}, Store, SM) {
- i_allocateTBE;
- b_issueGETX;
- uu_profileL1DataMiss;
- uu_profileL2Miss;
- k_popMandatoryQueue;
- }
-
- transition({S, SR}, Flush_line, SM_F) {
- i_allocateTBE;
- bf_issueGETF;
- forward_eviction_to_cpu;
- gg_deallocateL1CacheBlock;
- k_popMandatoryQueue;
- }
-
- transition(S, L2_Replacement, I) {
- forward_eviction_to_cpu;
- rr_deallocateL2CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(S, {Other_GETX, Invalidate}, I) {
- f_sendAck;
- forward_eviction_to_cpu;
- gr_deallocateCacheBlock;
- l_popForwardQueue;
- }
-
- transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
- ff_sendAckShared;
- l_popForwardQueue;
- }
-
- // Transitions from Owned
- transition({O, OM, SS, MM_W, M_W}, {Load}) {
- h_load_hit;
- uu_profileL1DataHit;
- k_popMandatoryQueue;
- }
-
- transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
- h_ifetch_hit;
- uu_profileL1InstHit;
- k_popMandatoryQueue;
- }
-
- transition(OR, Load, O) {
- h_load_hit;
- uu_profileL1DataMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition(OR, Ifetch, O) {
- h_ifetch_hit;
- uu_profileL1InstMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition({O,OR}, Store, OM) {
- i_allocateTBE;
- b_issueGETX;
- p_decrementNumberOfMessagesByOne;
- uu_profileL1DataMiss;
- uu_profileL2Miss;
- k_popMandatoryQueue;
- }
-
- transition({O, OR}, Flush_line, OM_F) {
- i_allocateTBE;
- bf_issueGETF;
- p_decrementNumberOfMessagesByOne;
- forward_eviction_to_cpu;
- gg_deallocateL1CacheBlock;
- k_popMandatoryQueue;
- }
-
- transition(O, L2_Replacement, OI) {
- i_allocateTBE;
- d_issuePUT;
- forward_eviction_to_cpu;
- rr_deallocateL2CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(O, {Other_GETX, Invalidate}, I) {
- e_sendData;
- forward_eviction_to_cpu;
- gr_deallocateCacheBlock;
- l_popForwardQueue;
- }
-
- transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
- ee_sendDataShared;
- l_popForwardQueue;
- }
-
- transition(O, Merged_GETS) {
- em_sendDataSharedMultiple;
- l_popForwardQueue;
- }
-
- // Transitions from Modified
- transition({MM, M}, {Ifetch}) {
- h_ifetch_hit;
- uu_profileL1InstHit;
- k_popMandatoryQueue;
- }
-
- transition({MM, M}, {Load}) {
- h_load_hit;
- uu_profileL1DataHit;
- k_popMandatoryQueue;
- }
-
- transition(MM, Store) {
- hh_store_hit;
- uu_profileL1DataHit;
- k_popMandatoryQueue;
- }
-
- transition(MMR, Load, MM) {
- h_load_hit;
- uu_profileL1DataMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition(MMR, Ifetch, MM) {
- h_ifetch_hit;
- uu_profileL1InstMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition(MMR, Store, MM) {
- hh_store_hit;
- uu_profileL1DataMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition({MM, M, MMR, MR}, Flush_line, MM_F) {
- i_allocateTBE;
- bf_issueGETF;
- p_decrementNumberOfMessagesByOne;
- forward_eviction_to_cpu;
- gg_deallocateL1CacheBlock;
- k_popMandatoryQueue;
- }
-
- transition(MM_F, Block_Ack, MI_F) {
- df_issuePUTF;
- l_popForwardQueue;
- kd_wakeUpDependents;
- }
-
- transition(MM, L2_Replacement, MI) {
- i_allocateTBE;
- d_issuePUT;
- forward_eviction_to_cpu;
- rr_deallocateL2CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(MM, {Other_GETX, Invalidate}, I) {
- c_sendExclusiveData;
- forward_eviction_to_cpu;
- gr_deallocateCacheBlock;
- l_popForwardQueue;
- }
-
- transition(MM, Other_GETS, I) {
- c_sendExclusiveData;
- forward_eviction_to_cpu;
- gr_deallocateCacheBlock;
- l_popForwardQueue;
- }
-
- transition(MM, NC_DMA_GETS, O) {
- ee_sendDataShared;
- l_popForwardQueue;
- }
-
- transition(MM, Other_GETS_No_Mig, O) {
- ee_sendDataShared;
- l_popForwardQueue;
- }
-
- transition(MM, Merged_GETS, O) {
- em_sendDataSharedMultiple;
- l_popForwardQueue;
- }
-
- // Transitions from Dirty Exclusive
- transition(M, Store, MM) {
- hh_store_hit;
- uu_profileL1DataHit;
- k_popMandatoryQueue;
- }
-
- transition(MR, Load, M) {
- h_load_hit;
- uu_profileL1DataMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition(MR, Ifetch, M) {
- h_ifetch_hit;
- uu_profileL1InstMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition(MR, Store, MM) {
- hh_store_hit;
- uu_profileL1DataMiss;
- uu_profileL2Hit;
- k_popMandatoryQueue;
- ka_wakeUpAllDependents;
- }
-
- transition(M, L2_Replacement, MI) {
- i_allocateTBE;
- d_issuePUT;
- forward_eviction_to_cpu;
- rr_deallocateL2CacheBlock;
- ka_wakeUpAllDependents;
- }
-
- transition(M, {Other_GETX, Invalidate}, I) {
- c_sendExclusiveData;
- forward_eviction_to_cpu;
- gr_deallocateCacheBlock;
- l_popForwardQueue;
- }
-
- transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
- ee_sendDataShared;
- l_popForwardQueue;
- }
-
- transition(M, NC_DMA_GETS, O) {
- ee_sendDataShared;
- l_popForwardQueue;
- }
-
- transition(M, Merged_GETS, O) {
- em_sendDataSharedMultiple;
- l_popForwardQueue;
- }
-
- // Transitions from IM
-
- transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- transition({IM, IM_F, MM_F}, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(IM, Data, ISM) {
- u_writeDataToCache;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(IM_F, Data, ISM_F) {
- uf_writeDataToCacheTBE;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(IM, Exclusive_Data, MM_W) {
- u_writeDataToCache;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- sx_external_store_hit;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IM_F, Exclusive_Data, MM_WF) {
- uf_writeDataToCacheTBE;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- // Transitions from SM
- transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
- ff_sendAckShared;
- l_popForwardQueue;
- }
-
- transition(SM, {Other_GETX, Invalidate}, IM) {
- f_sendAck;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
- f_sendAck;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition({SM, SM_F}, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(SM, {Data, Exclusive_Data}, ISM) {
- v_writeDataToCacheVerify;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
- vt_writeDataToTBEVerify;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- // Transitions from ISM
- transition({ISM, ISM_F}, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(ISM, All_acks_no_sharers, MM) {
- sxt_trig_ext_store_hit;
- gm_sendUnblockM;
- s_deallocateTBE;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- transition(ISM_F, All_acks_no_sharers, MI_F) {
- df_issuePUTF;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from OM
-
- transition(OM, {Other_GETX, Invalidate}, IM) {
- e_sendData;
- pp_incrementNumberOfMessagesByOne;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
- q_sendDataFromTBEToCache;
- pp_incrementNumberOfMessagesByOne;
- forward_eviction_to_cpu;
- l_popForwardQueue;
- }
-
- transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
- ee_sendDataShared;
- l_popForwardQueue;
- }
-
- transition(OM, Merged_GETS) {
- em_sendDataSharedMultiple;
- l_popForwardQueue;
- }
-
- transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
- et_sendDataSharedFromTBE;
- l_popForwardQueue;
- }
-
- transition(OM_F, Merged_GETS) {
- emt_sendDataSharedMultipleFromTBE;
- l_popForwardQueue;
- }
-
- transition({OM, OM_F}, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(OM, {All_acks, All_acks_no_sharers}, MM) {
- sxt_trig_ext_store_hit;
- gm_sendUnblockM;
- s_deallocateTBE;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
- df_issuePUTF;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
- // Transitions from IS
-
- transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- transition(IS, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(IS, Shared_Ack) {
- m_decrementNumberOfMessages;
- r_setSharerBit;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(IS, Data, SS) {
- u_writeDataToCache;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- hx_external_load_hit;
- uo_updateCurrentOwner;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Exclusive_Data, M_W) {
- u_writeDataToCache;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- hx_external_load_hit;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- transition(IS, Shared_Data, SS) {
- u_writeDataToCache;
- r_setSharerBit;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- hx_external_load_hit;
- uo_updateCurrentOwner;
- n_popResponseQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from SS
-
- transition(SS, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(SS, Shared_Ack) {
- m_decrementNumberOfMessages;
- r_setSharerBit;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(SS, All_acks, S) {
- gs_sendUnblockS;
- s_deallocateTBE;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- transition(SS, All_acks_no_sharers, S) {
- // Note: The directory might still be the owner, so that is why we go to S
- gs_sendUnblockS;
- s_deallocateTBE;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from MM_W
-
- transition(MM_W, Store) {
- hh_store_hit;
- uu_profileL1DataHit;
- k_popMandatoryQueue;
- }
-
- transition({MM_W, MM_WF}, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(MM_W, All_acks_no_sharers, MM) {
- gm_sendUnblockM;
- s_deallocateTBE;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- transition(MM_WF, All_acks_no_sharers, MI_F) {
- df_issuePUTF;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
- // Transitions from M_W
-
- transition(M_W, Store, MM_W) {
- hh_store_hit;
- uu_profileL1DataHit;
- k_popMandatoryQueue;
- }
-
- transition(M_W, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(M_W, All_acks_no_sharers, M) {
- gm_sendUnblockM;
- s_deallocateTBE;
- j_popTriggerQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from OI/MI
-
- transition({OI, MI}, {Other_GETX, Invalidate}, II) {
- q_sendDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
- sq_sendSharedDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition({OI, MI}, Merged_GETS, OI) {
- qm_sendDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition(MI, Writeback_Ack, I) {
- t_sendExclusiveDataFromTBEToMemory;
- s_deallocateTBE;
- l_popForwardQueue;
- kd_wakeUpDependents;
- }
-
- transition(MI_F, Writeback_Ack, I) {
- hh_flush_hit;
- t_sendExclusiveDataFromTBEToMemory;
- s_deallocateTBE;
- l_popForwardQueue;
- kd_wakeUpDependents;
- }
-
- transition(OI, Writeback_Ack, I) {
- qq_sendDataFromTBEToMemory;
- s_deallocateTBE;
- l_popForwardQueue;
- kd_wakeUpDependents;
- }
-
- // Transitions from II
- transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
- f_sendAck;
- l_popForwardQueue;
- }
-
- transition(II, Writeback_Ack, I) {
- g_sendUnblock;
- s_deallocateTBE;
- l_popForwardQueue;
- kd_wakeUpDependents;
- }
-
- transition(II, Writeback_Nack, I) {
- s_deallocateTBE;
- l_popForwardQueue;
- kd_wakeUpDependents;
- }
-
- transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
- ct_sendExclusiveDataFromTBE;
- pp_incrementNumberOfMessagesByOne;
- l_popForwardQueue;
- }
-
- transition(MM_F, Other_GETS, IM_F) {
- ct_sendExclusiveDataFromTBE;
- pp_incrementNumberOfMessagesByOne;
- l_popForwardQueue;
- }
-
- transition(MM_F, NC_DMA_GETS, OM_F) {
- sq_sendSharedDataFromTBEToCache;
- l_popForwardQueue;
- }
-
- transition(MM_F, Other_GETS_No_Mig, OM_F) {
- et_sendDataSharedFromTBE;
- l_popForwardQueue;
- }
-
- transition(MM_F, Merged_GETS, OM_F) {
- emt_sendDataSharedMultipleFromTBE;
- l_popForwardQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * Copyright (c) 2009 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * AMD's contributions to the MOESI hammer protocol do not constitute an
- * endorsement of its similarity to any AMD products.
- *
- * Authors: Milo Martin
- * Brad Beckmann
- */
-
-machine(MachineType:Directory, "AMD Hammer-like protocol")
- : DirectoryMemory * directory;
- CacheMemory * probeFilter;
- Cycles from_memory_controller_latency := 2;
- Cycles to_memory_controller_latency := 1;
- bool probe_filter_enabled := "False";
- bool full_bit_dir_enabled := "False";
-
- MessageBuffer * forwardFromDir, network="To", virtual_network="3",
- vnet_type="forward";
-
- MessageBuffer * responseFromDir, network="To", virtual_network="4",
- vnet_type="response";
-
- // For a finite buffered network, note that the DMA response network only
- // works at this relatively lower numbered (lower priority) virtual network
- // because the trigger queue decouples cache responses from DMA responses.
- MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
- vnet_type="response";
-
- MessageBuffer * unblockToDir, network="From", virtual_network="5",
- vnet_type="unblock";
-
- MessageBuffer * responseToDir, network="From", virtual_network="4",
- vnet_type="response";
-
- MessageBuffer * requestToDir, network="From", virtual_network="2",
- vnet_type="request";
-
- MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
- vnet_type="request";
-
- MessageBuffer * triggerQueue;
- MessageBuffer * responseFromMemory;
-{
- // STATES
- state_declaration(State, desc="Directory states", default="Directory_State_E") {
- // Base states
- NX, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in O at Owner";
- NO, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
- S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
- O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
- E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
-
- O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
- S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
- NO_R, AccessPermission:Busy, desc="Was Not Owner or Sharer, replacing probe filter entry";
-
- NO_B, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked";
- NO_B_X, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
- NO_B_S, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
- NO_B_S_W, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
- O_B, AccessPermission:Busy, "O^B", desc="Owner, Blocked";
- NO_B_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
- O_B_W, AccessPermission:Busy, desc="Owner, Blocked, waiting for Dram";
- NO_W, AccessPermission:Busy, desc="Not Owner, waiting for Dram";
- O_W, AccessPermission:Busy, desc="Owner, waiting for Dram";
- NO_DW_B_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram and cache responses";
- NO_DR_B_W, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for Dram and cache responses";
- NO_DR_B_D, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
- NO_DR_B, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses";
- NO_DW_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram";
- O_DR_B_W, AccessPermission:Busy, desc="Owner, Dma Read waiting for Dram and cache responses";
- O_DR_B, AccessPermission:Busy, desc="Owner, Dma Read waiting for cache responses";
- WB, AccessPermission:Busy, desc="Blocked on a writeback";
- WB_O_W, AccessPermission:Busy, desc="Blocked on memory write, will go to O";
- WB_E_W, AccessPermission:Busy, desc="Blocked on memory write, will go to E";
-
- NO_F, AccessPermission:Busy, desc="Blocked on a flush";
- NO_F_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- GETX, desc="A GETX arrives";
- GETS, desc="A GETS arrives";
- PUT, desc="A PUT arrives";
- Unblock, desc="An unblock message arrives";
- UnblockS, desc="An unblock message arrives";
- UnblockM, desc="An unblock message arrives";
- Writeback_Clean, desc="The final part of a PutX (no data)";
- Writeback_Dirty, desc="The final part of a PutX (data)";
- Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
- Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
-
- // Probe filter
- Pf_Replacement, desc="probe filter replacement";
-
- // DMA requests
- DMA_READ, desc="A DMA Read memory request";
- DMA_WRITE, desc="A DMA Write memory request";
-
- // Memory Controller
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
-
- // Cache responses required to handle DMA
- Ack, desc="Received an ack message";
- Shared_Ack, desc="Received an ack message, responder has a shared copy";
- Shared_Data, desc="Received a data message, responder has a shared copy";
- Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
- Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
-
- // Triggers
- All_acks_and_shared_data, desc="Received shared data and message acks";
- All_acks_and_owner_data, desc="Received shared data and message acks";
- All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
- All_Unblocks, desc="Received all unblocks for a merged gets request";
- GETF, desc="A GETF arrives";
- PUTF, desc="A PUTF arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...", interface="AbstractEntry") {
- State DirectoryState, desc="Directory state";
- }
-
- // ProbeFilterEntry
- structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
- State PfState, desc="Directory state";
- MachineID Owner, desc="Owner node";
- Set Sharers, desc="sharing vector for full bit directory";
- }
-
- // TBE entries for DMA requests
- structure(TBE, desc="TBE entries for outstanding DMA requests") {
- Addr PhysicalAddress, desc="physical address";
- State TBEState, desc="Transient State";
- CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
- int Acks, default="0", desc="The number of acks that the waiting response represents";
- int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
- DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
- DataBlock DataBlk, desc="The current view of system memory";
- int Len, desc="...";
- MachineID DmaRequestor, desc="DMA requestor";
- NetDest GetSRequestors, desc="GETS merged requestors";
- int NumPendingMsgs, desc="Number of pending acks/messages";
- bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
- bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
- bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
- }
-
- structure(TBETable, external="yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- Tick clockEdge();
- void set_cache_entry(AbstractCacheEntry b);
- void unset_cache_entry();
- void set_tbe(TBE a);
- void unset_tbe();
- void wakeUpBuffers(Addr a);
- Cycles curCycle();
-
- // ** OBJECTS **
-
- Set fwd_set;
-
- TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
-
- Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
-
- if (is_valid(dir_entry)) {
- return dir_entry;
- }
-
- dir_entry := static_cast(Entry, "pointer",
- directory.allocate(addr, new Entry));
- return dir_entry;
- }
-
- PfEntry getProbeFilterEntry(Addr addr), return_by_pointer="yes" {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
- return pfEntry;
- }
- return OOD;
- }
-
- State getState(TBE tbe, PfEntry pf_entry, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- if (is_valid(pf_entry)) {
- assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
- }
- }
- return getDirectoryEntry(addr).DirectoryState;
- }
- }
-
- void setState(TBE tbe, PfEntry pf_entry, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- if (probe_filter_enabled || full_bit_dir_enabled) {
- if (is_valid(pf_entry)) {
- pf_entry.PfState := state;
- }
- if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
- assert(is_valid(pf_entry));
- }
- if (state == State:E) {
- assert(is_valid(pf_entry) == false);
- }
- }
- if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
- state == State:O) {
- assert(is_valid(tbe) == false);
- }
- getDirectoryEntry(addr).DirectoryState := state;
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- return Directory_State_to_permission(tbe.TBEState);
- }
-
- if(directory.isPresent(addr)) {
- return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
- }
-
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(PfEntry pf_entry, Addr addr, State state) {
- getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- testAndRead(addr, tbe.DataBlk, pkt);
- } else {
- functionalMemoryRead(pkt);
- }
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- int num_functional_writes := 0;
-
- TBE tbe := TBEs[addr];
- if(is_valid(tbe)) {
- num_functional_writes := num_functional_writes +
- testAndWrite(addr, tbe.DataBlk, pkt);
- }
-
- num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
- return num_functional_writes;
- }
-
- Event cache_request_to_event(CoherenceRequestType type) {
- if (type == CoherenceRequestType:GETS) {
- return Event:GETS;
- } else if (type == CoherenceRequestType:GETX) {
- return Event:GETX;
- } else if (type == CoherenceRequestType:GETF) {
- return Event:GETF;
- } else {
- error("Invalid CoherenceRequestType");
- }
- }
-
- // ** OUT_PORTS **
- out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
- out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
- out_port(triggerQueue_out, TriggerMsg, triggerQueue);
-
- // ** IN_PORTS **
-
- // Trigger Queue
- in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
- if (triggerQueue_in.isReady(clockEdge())) {
- peek(triggerQueue_in, TriggerMsg) {
- PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
- if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_acks_and_owner_data, in_msg.addr,
- pf_entry, tbe);
- } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
- trigger(Event:All_acks_and_shared_data, in_msg.addr,
- pf_entry, tbe);
- } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
- trigger(Event:All_acks_and_data_no_sharers, in_msg.addr,
- pf_entry, tbe);
- } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
- trigger(Event:All_Unblocks, in_msg.addr,
- pf_entry, tbe);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
- if (unblockNetwork_in.isReady(clockEdge())) {
- peek(unblockNetwork_in, ResponseMsg) {
- PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
- if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
- trigger(Event:UnblockS, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
- trigger(Event:UnblockM, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
- trigger(Event:Writeback_Clean, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
- trigger(Event:Writeback_Dirty, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
- trigger(Event:Writeback_Exclusive_Clean, in_msg.addr,
- pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
- trigger(Event:Writeback_Exclusive_Dirty, in_msg.addr,
- pf_entry, tbe);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // Response Network
- in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
- if (responseToDir_in.isReady(clockEdge())) {
- peek(responseToDir_in, ResponseMsg) {
- PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
- if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
- trigger(Event:Shared_Ack, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
- trigger(Event:Shared_Data, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA) {
- trigger(Event:Data, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
- trigger(Event:Exclusive_Data, in_msg.addr, pf_entry, tbe);
- } else {
- error("Unexpected message");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
- if (memQueue_in.isReady(clockEdge())) {
- peek(memQueue_in, MemoryMsg) {
- PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, pf_entry, tbe);
- } else {
- DPRINTF(RubySlicc, "%d\n", in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
- if (requestQueue_in.isReady(clockEdge())) {
- peek(requestQueue_in, RequestMsg) {
- PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
- if (in_msg.Type == CoherenceRequestType:PUT) {
- trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:PUTF) {
- trigger(Event:PUTF, in_msg.addr, pf_entry, tbe);
- } else {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- if (is_valid(pf_entry)) {
- trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
- pf_entry, tbe);
- } else {
- if (probeFilter.cacheAvail(in_msg.addr)) {
- trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
- pf_entry, tbe);
- } else {
- trigger(Event:Pf_Replacement,
- probeFilter.cacheProbe(in_msg.addr),
- getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
- TBEs[probeFilter.cacheProbe(in_msg.addr)]);
- }
- }
- } else {
- trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
- pf_entry, tbe);
- }
- }
- }
- }
- }
-
- in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
- TBE tbe := TBEs[in_msg.LineAddress];
- if (in_msg.Type == DMARequestType:READ) {
- trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
- } else if (in_msg.Type == DMARequestType:WRITE) {
- trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- assert(is_valid(cache_entry));
- probeFilter.setMRU(address);
- }
- }
-
- action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- assert(is_valid(cache_entry));
- peek(unblockNetwork_in, ResponseMsg) {
- assert(cache_entry.Owner != in_msg.Sender);
- if (full_bit_dir_enabled) {
- assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
- }
- }
- }
- }
-
- action(uo_updateOwnerIfPf, "uo", desc="update owner") {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- assert(is_valid(cache_entry));
- peek(unblockNetwork_in, ResponseMsg) {
- cache_entry.Owner := in_msg.Sender;
- if (full_bit_dir_enabled) {
- cache_entry.Sharers.clear();
- cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
- APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
- DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
- }
- }
- }
- }
-
- action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
- if (full_bit_dir_enabled) {
- assert(probeFilter.isTagPresent(address));
- peek(unblockNetwork_in, ResponseMsg) {
- cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
- }
- }
- }
-
- action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WB_ACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
- peek(requestQueue_in, RequestMsg) {
- if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:BLOCK_ACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
- }
-
- action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:WB_NACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- peek(requestQueue_in, RequestMsg) {
- set_cache_entry(probeFilter.allocate(address, new PfEntry));
- cache_entry.Owner := in_msg.Requestor;
- cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
- }
- }
- }
-
- action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- probeFilter.deallocate(address);
- unset_cache_entry();
- }
- }
-
- action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
- if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
- probeFilter.deallocate(address);
- unset_cache_entry();
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE") {
- check_allocate(TBEs);
- peek(requestQueue_in, RequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.PhysicalAddress := address;
- tbe.ResponseType := CoherenceResponseType:NULL;
- }
- }
-
- action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
- check_allocate(TBEs);
- peek(dmaRequestQueue_in, DMARequestMsg) {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- tbe.DmaDataBlk := in_msg.DataBlk;
- tbe.PhysicalAddress := in_msg.PhysicalAddress;
- tbe.Len := in_msg.Len;
- tbe.DmaRequestor := in_msg.Requestor;
- tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
- //
- // One ack for each last-level cache
- //
- tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
- //
- // Assume initially that the caches store a clean copy and that memory
- // will provide the data
- //
- tbe.CacheDirty := false;
- }
- }
-
- action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
- assert(is_valid(tbe));
- if (full_bit_dir_enabled) {
- assert(is_valid(cache_entry));
- tbe.NumPendingMsgs := cache_entry.Sharers.count();
- } else {
- tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
- }
- }
-
- action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
- assert(is_valid(tbe));
- tbe.NumPendingMsgs := 1;
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
- assert(is_valid(tbe));
- peek(requestQueue_in, RequestMsg) {
- if (full_bit_dir_enabled) {
- assert(is_valid(cache_entry));
- //
- // If we are using the full-bit directory and no sharers exists beyond
- // the requestor, then we must set the ack number to all, not one
- //
- fwd_set := cache_entry.Sharers;
- fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
- if (fwd_set.count() > 0) {
- tbe.Acks := 1;
- tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
- tbe.SilentAcks := tbe.SilentAcks - 1;
- } else {
- tbe.Acks := machineCount(MachineType:L1Cache);
- tbe.SilentAcks := 0;
- }
- } else {
- tbe.Acks := 1;
- }
- }
- }
-
- action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
- assert(is_valid(tbe));
- if (probe_filter_enabled || full_bit_dir_enabled) {
- tbe.Acks := machineCount(MachineType:L1Cache);
- tbe.SilentAcks := 0;
- } else {
- tbe.Acks := 1;
- }
- }
-
- action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
- peek(responseToDir_in, ResponseMsg) {
- assert(is_valid(tbe));
- assert(in_msg.Acks > 0);
- DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
- //
- // Note that cache data responses will have an ack count of 2. However,
- // directory DMA requests must wait for acks from all LLC caches, so
- // only decrement by 1.
- //
- if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
- (in_msg.Type == CoherenceResponseType:DATA) ||
- (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
- } else {
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
- }
- DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
- }
- }
-
- action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
- peek(unblockNetwork_in, ResponseMsg) {
- assert(is_valid(tbe));
- assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
- DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
- tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
- DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
- }
- }
-
- action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToDir_in.dequeue(clockEdge());
- }
-
- action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
- assert(is_valid(tbe));
- if (tbe.NumPendingMsgs == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- if (tbe.Sharers) {
- if (tbe.Owned) {
- out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
- } else {
- out_msg.Type := TriggerType:ALL_ACKS;
- }
- } else {
- out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
- }
- }
- }
- }
-
- action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
- assert(is_valid(tbe));
- if (tbe.NumPendingMsgs == 0) {
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:ALL_UNBLOCKS;
- }
- }
- }
-
- action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
- assert(is_valid(tbe));
- tbe.NumPendingMsgs := tbe.GetSRequestors.count();
- }
-
- action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- assert(is_valid(tbe));
- tbe.NumPendingMsgs := 0;
- }
- }
-
- action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
- assert(is_valid(tbe));
- if (tbe.NumPendingMsgs == 0) {
- assert(probe_filter_enabled || full_bit_dir_enabled);
- enqueue(triggerQueue_out, TriggerMsg) {
- out_msg.addr := address;
- out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
- }
- }
- }
-
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := tbe.ResponseType;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- out_msg.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
- out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := tbe.Acks;
- out_msg.SilentAcks := tbe.SilentAcks;
- DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
- assert(out_msg.Acks > 0);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- assert(is_valid(tbe));
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:DATA;
- //
- // we send the entire data block and rely on the dma controller to
- // split it up if need be
- //
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
- peek(triggerQueue_in, TriggerMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- assert(is_valid(tbe));
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:DATA;
- //
- // we send the entire data block and rely on the dma controller to
- // split it up if need be
- //
- out_msg.DataBlk := tbe.DataBlk;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
- assert(is_valid(tbe));
- out_msg.PhysicalAddress := address;
- out_msg.LineAddress := address;
- out_msg.Type := DMAResponseType:ACK;
- out_msg.Destination.add(tbe.DmaRequestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
-
- action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
- peek(requestQueue_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
- }
- }
-
- action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
- peek(requestQueue_in, RequestMsg) {
- assert(is_valid(tbe));
- if (full_bit_dir_enabled) {
- fwd_set := cache_entry.Sharers;
- fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
- if (fwd_set.count() > 0) {
- tbe.ResponseType := CoherenceResponseType:DATA;
- } else {
- tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
- }
- } else {
- tbe.ResponseType := CoherenceResponseType:DATA;
- }
- }
- }
-
- action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
- peek(requestQueue_in, RequestMsg) {
- assert(is_valid(tbe));
- tbe.GetSRequestors.add(in_msg.Requestor);
- }
- }
-
- action(r_setSharerBit, "r", desc="We saw other sharers") {
- assert(is_valid(tbe));
- tbe.Sharers := true;
- }
-
- action(so_setOwnerBit, "so", desc="We saw other sharers") {
- assert(is_valid(tbe));
- tbe.Sharers := true;
- tbe.Owned := true;
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestQueue_in, RequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
- }
- }
-
- action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
- }
- }
-
- action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
- assert(is_valid(tbe));
- if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
- if (full_bit_dir_enabled) {
- assert(is_valid(cache_entry));
- peek(requestQueue_in, RequestMsg) {
- fwd_set := cache_entry.Sharers;
- fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
- if (fwd_set.count() > 0) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
- out_msg.MessageSize := MessageSizeType:Multicast_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- assert(tbe.SilentAcks > 0);
- out_msg.SilentAcks := tbe.SilentAcks;
- }
- }
- }
- } else {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
- out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- }
- }
- }
- }
- }
-
- action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
- if (machineCount(MachineType:L1Cache) > 1) {
- if (full_bit_dir_enabled) {
- assert(cache_entry.Sharers.count() > 0);
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := machineID;
- out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
- out_msg.MessageSize := MessageSizeType:Multicast_Control;
- }
- }
- } else {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- }
- }
- }
- }
-
- action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
- if (machineCount(MachineType:L1Cache) > 1) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := machineID;
- out_msg.Destination.add(cache_entry.Owner);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.DirectedProbe := true;
- }
- }
- }
-
- action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
- if (machineCount(MachineType:L1Cache) > 1) {
- peek(requestQueue_in, RequestMsg) {
- if (full_bit_dir_enabled) {
- fwd_set := cache_entry.Sharers;
- fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
- if (fwd_set.count() > 0) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
- out_msg.MessageSize := MessageSizeType:Multicast_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
- out_msg.SilentAcks := out_msg.SilentAcks - 1;
- }
- }
- } else {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
- out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- }
- }
- }
- } else {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, 1) {
- out_msg.addr := address;
- out_msg.Type := CoherenceResponseType:ACK;
- out_msg.Sender := machineID;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := 0;
- out_msg.SilentAcks := 0;
- DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
- out_msg.MessageSize := MessageSizeType:Response_Control;
- }
- }
- }
- }
-
- action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
- assert(machineCount(MachineType:L1Cache) > 1);
- //
- // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
- // decouple the two.
- //
- peek(unblockNetwork_in, ResponseMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- assert(is_valid(tbe));
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:MERGED_GETS;
- out_msg.MergedRequestors := tbe.GetSRequestors;
- if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
- out_msg.Destination.add(in_msg.CurOwner);
- } else {
- out_msg.Destination.add(in_msg.Sender);
- }
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.InitialRequestTime := zero_time();
- out_msg.ForwardRequestTime := curCycle();
- }
- }
- }
-
- action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
- assert(machineCount(MachineType:L1Cache) > 1);
- if (probe_filter_enabled || full_bit_dir_enabled) {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(cache_entry.Owner);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.DirectedProbe := true;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- }
- }
- } else {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
- out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- }
- }
- }
- }
-
- action(nofc_forwardRequestConditionalOwner, "nofc", desc="Forward request to one or more nodes if the requestor is not the owner") {
- if (machineCount(MachineType:L1Cache) > 1) {
-
- if (probe_filter_enabled || full_bit_dir_enabled) {
- peek(requestQueue_in, RequestMsg) {
- if (in_msg.Requestor != cache_entry.Owner) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- assert(is_valid(cache_entry));
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(cache_entry.Owner);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- out_msg.DirectedProbe := true;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- }
- }
- }
- } else {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
- out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- out_msg.InitialRequestTime := in_msg.InitialRequestTime;
- out_msg.ForwardRequestTime := curCycle();
- }
- }
- }
- }
- }
-
- action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
- assert(is_valid(tbe));
- if (tbe.NumPendingMsgs > 0) {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETX;
- //
- // Send to all L1 caches, since the requestor is the memory controller
- // itself
- //
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- }
- }
- }
- }
-
- action(f_forwardReadFromDma, "fr", desc="Forward requests") {
- assert(is_valid(tbe));
- if (tbe.NumPendingMsgs > 0) {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
- out_msg.addr := address;
- out_msg.Type := CoherenceRequestType:GETS;
- //
- // Send to all L1 caches, since the requestor is the memory controller
- // itself
- //
- out_msg.Requestor := machineID;
- out_msg.Destination.broadcast(MachineType:L1Cache);
- out_msg.MessageSize := MessageSizeType:Broadcast_Control;
- }
- }
- }
- }
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue(clockEdge());
- }
-
- action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- peek(unblockNetwork_in, ResponseMsg) {
- APPEND_TRANSITION_COMMENT(in_msg.Sender);
- }
- unblockNetwork_in.dequeue(clockEdge());
- }
-
- action(k_wakeUpDependents, "k", desc="wake-up dependents") {
- wakeUpBuffers(address);
- }
-
- action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue(clockEdge());
- }
-
- action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
- triggerQueue_in.dequeue(clockEdge());
- }
-
- action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- stall_and_wait(dmaRequestQueue_in, address);
- }
-
- action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
- peek(memQueue_in, MemoryMsg) {
- assert(is_valid(tbe));
- if (tbe.CacheDirty == false) {
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
- }
-
- action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
- peek(responseToDir_in, ResponseMsg) {
- assert(is_valid(tbe));
- tbe.CacheDirty := true;
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
- assert(is_valid(tbe));
- assert(tbe.CacheDirty);
- }
-
- action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
- if (probe_filter_enabled || full_bit_dir_enabled) {
- peek(requestQueue_in, RequestMsg) {
- assert(is_valid(cache_entry));
- assert(cache_entry.Owner != in_msg.Requestor);
- }
- }
- }
-
- action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
- if (full_bit_dir_enabled) {
- peek(requestQueue_in, RequestMsg) {
- assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
- }
- }
- }
-
- action(rs_removeSharer, "s", desc="remove current sharer") {
- if (full_bit_dir_enabled) {
- peek(unblockNetwork_in, ResponseMsg) {
- assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
- cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
- }
- }
- }
-
- action(cs_clearSharers, "cs", desc="clear current sharers") {
- if (full_bit_dir_enabled) {
- peek(requestQueue_in, RequestMsg) {
- cache_entry.Sharers.clear();
- cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
- }
- }
- }
-
- action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
- peek(unblockNetwork_in, ResponseMsg) {
- queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
- in_msg.DataBlk);
- }
- }
-
- action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
- assert(is_valid(tbe));
- queueMemoryWritePartial(tbe.DmaRequestor, tbe.PhysicalAddress,
- to_memory_controller_latency, tbe.DmaDataBlk,
- tbe.Len);
- }
-
- action(ly_queueMemoryWriteFromTBE, "ly", desc="Write data to memory from TBE") {
- queueMemoryWrite(machineID, address, to_memory_controller_latency,
- tbe.DataBlk);
- }
-
- action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
- peek(unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Dirty == false);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
- DPRINTF(RubySlicc, "%s\n", in_msg.DataBlk);
- }
- }
-
- action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
- peek(requestQueue_in, RequestMsg) {
- APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- }
- stall_and_wait(requestQueue_in, address);
- }
-
- // TRANSITIONS
-
- // Transitions out of E state
- transition(E, GETX, NO_B_W) {
- pfa_probeFilterAllocate;
- v_allocateTBE;
- rx_recordExclusiveInTBE;
- saa_setAcksToAllIfPF;
- qf_queueMemoryFetchRequest;
- fn_forwardRequestIfNecessary;
- i_popIncomingRequestQueue;
- }
-
- transition(E, GETF, NO_F_W) {
- pfa_probeFilterAllocate;
- v_allocateTBE;
- rx_recordExclusiveInTBE;
- saa_setAcksToAllIfPF;
- qf_queueMemoryFetchRequest;
- fn_forwardRequestIfNecessary;
- i_popIncomingRequestQueue;
- }
-
- transition(E, GETS, NO_B_W) {
- pfa_probeFilterAllocate;
- v_allocateTBE;
- rx_recordExclusiveInTBE;
- saa_setAcksToAllIfPF;
- qf_queueMemoryFetchRequest;
- fn_forwardRequestIfNecessary;
- i_popIncomingRequestQueue;
- }
-
- transition(E, DMA_READ, NO_DR_B_W) {
- vd_allocateDmaRequestInTBE;
- qd_queueMemoryRequestFromDmaRead;
- spa_setPendingAcksToZeroIfPF;
- f_forwardReadFromDma;
- p_popDmaRequestQueue;
- }
-
- transition(E, DMA_WRITE, NO_DW_B_W) {
- vd_allocateDmaRequestInTBE;
- spa_setPendingAcksToZeroIfPF;
- sc_signalCompletionIfPF;
- f_forwardWriteFromDma;
- p_popDmaRequestQueue;
- }
-
- // Transitions out of O state
- transition(O, GETX, NO_B_W) {
- r_setMRU;
- v_allocateTBE;
- r_recordDataInTBE;
- sa_setAcksToOne;
- qf_queueMemoryFetchRequest;
- fb_forwardRequestBcast;
- cs_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- transition(O, GETF, NO_F_W) {
- r_setMRU;
- v_allocateTBE;
- r_recordDataInTBE;
- sa_setAcksToOne;
- qf_queueMemoryFetchRequest;
- fb_forwardRequestBcast;
- cs_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- // This transition is dumb, if a shared copy exists on-chip, then that should
- // provide data, not slow off-chip dram. The problem is that the current
- // caches don't provide data in S state
- transition(O, GETS, O_B_W) {
- r_setMRU;
- v_allocateTBE;
- r_recordDataInTBE;
- saa_setAcksToAllIfPF;
- qf_queueMemoryFetchRequest;
- fn_forwardRequestIfNecessary;
- i_popIncomingRequestQueue;
- }
-
- transition(O, DMA_READ, O_DR_B_W) {
- vd_allocateDmaRequestInTBE;
- spa_setPendingAcksToZeroIfPF;
- qd_queueMemoryRequestFromDmaRead;
- f_forwardReadFromDma;
- p_popDmaRequestQueue;
- }
-
- transition(O, Pf_Replacement, O_R) {
- v_allocateTBE;
- pa_setPendingMsgsToAll;
- ia_invalidateAllRequest;
- pfd_probeFilterDeallocate;
- }
-
- transition(S, Pf_Replacement, S_R) {
- v_allocateTBE;
- pa_setPendingMsgsToAll;
- ia_invalidateAllRequest;
- pfd_probeFilterDeallocate;
- }
-
- transition(NO, Pf_Replacement, NO_R) {
- v_allocateTBE;
- po_setPendingMsgsToOne;
- io_invalidateOwnerRequest;
- pfd_probeFilterDeallocate;
- }
-
- transition(NX, Pf_Replacement, NO_R) {
- v_allocateTBE;
- pa_setPendingMsgsToAll;
- ia_invalidateAllRequest;
- pfd_probeFilterDeallocate;
- }
-
- transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
- vd_allocateDmaRequestInTBE;
- f_forwardWriteFromDma;
- p_popDmaRequestQueue;
- }
-
- // Transitions out of NO state
- transition(NX, GETX, NO_B) {
- r_setMRU;
- fb_forwardRequestBcast;
- cs_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- transition(NX, GETF, NO_F) {
- r_setMRU;
- fb_forwardRequestBcast;
- cs_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- // Transitions out of NO state
- transition(NO, GETX, NO_B) {
- r_setMRU;
- ano_assertNotOwner;
- fc_forwardRequestConditionalOwner;
- cs_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- transition(NO, GETF, NO_F) {
- r_setMRU;
- //ano_assertNotOwner;
- nofc_forwardRequestConditionalOwner; //forward request if the requester is not the owner
- cs_clearSharers;
- oc_sendBlockAck; // send ack if the owner
- i_popIncomingRequestQueue;
- }
-
- transition(S, GETX, NO_B) {
- r_setMRU;
- fb_forwardRequestBcast;
- cs_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- transition(S, GETF, NO_F) {
- r_setMRU;
- fb_forwardRequestBcast;
- cs_clearSharers;
- i_popIncomingRequestQueue;
- }
-
- transition(S, GETS, NO_B) {
- r_setMRU;
- ano_assertNotOwner;
- fb_forwardRequestBcast;
- i_popIncomingRequestQueue;
- }
-
- transition(NO, GETS, NO_B) {
- r_setMRU;
- ano_assertNotOwner;
- ans_assertNotSharer;
- fc_forwardRequestConditionalOwner;
- i_popIncomingRequestQueue;
- }
-
- transition(NX, GETS, NO_B) {
- r_setMRU;
- ano_assertNotOwner;
- fc_forwardRequestConditionalOwner;
- i_popIncomingRequestQueue;
- }
-
- transition({NO, NX, S}, PUT, WB) {
- //
- // note that the PUT requestor may not be the current owner if an invalidate
- // raced with PUT
- //
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
- vd_allocateDmaRequestInTBE;
- f_forwardReadFromDma;
- p_popDmaRequestQueue;
- }
-
- // Nack PUT requests when races cause us to believe we own the data
- transition({O, E}, PUT) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- // Blocked transient states
- transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
- NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
- NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
- {GETS, GETX, GETF, PUT, Pf_Replacement}) {
- z_stallAndWaitRequest;
- }
-
- transition(NO_F, {GETS, GETX, GETF, PUT, Pf_Replacement}){
- z_stallAndWaitRequest;
- }
-
- transition(NO_B, {GETX, GETF}, NO_B_X) {
- z_stallAndWaitRequest;
- }
-
- transition(NO_B, {PUT, Pf_Replacement}) {
- z_stallAndWaitRequest;
- }
-
- transition(NO_B_S, {GETX, GETF, PUT, Pf_Replacement}) {
- z_stallAndWaitRequest;
- }
-
- transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
- NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
- NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
- {DMA_READ, DMA_WRITE}) {
- zd_stallAndWaitDMARequest;
- }
-
- // merge GETS into one response
- transition(NO_B, GETS, NO_B_S) {
- v_allocateTBE;
- rs_recordGetSRequestor;
- i_popIncomingRequestQueue;
- }
-
- transition(NO_B_S, GETS) {
- rs_recordGetSRequestor;
- i_popIncomingRequestQueue;
- }
-
- // unblock responses
- transition({NO_B, NO_B_X}, UnblockS, NX) {
- us_updateSharerIfFBD;
- k_wakeUpDependents;
- j_popIncomingUnblockQueue;
- }
-
- transition({NO_B, NO_B_X}, UnblockM, NO) {
- uo_updateOwnerIfPf;
- us_updateSharerIfFBD;
- k_wakeUpDependents;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_B_S, UnblockS, NO_B_S_W) {
- us_updateSharerIfFBD;
- fr_forwardMergeReadRequestsToOwner;
- sp_setPendingMsgsToMergedSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_B_S, UnblockM, NO_B_S_W) {
- uo_updateOwnerIfPf;
- fr_forwardMergeReadRequestsToOwner;
- sp_setPendingMsgsToMergedSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_B_S_W, UnblockS) {
- us_updateSharerIfFBD;
- mu_decrementNumberOfUnblocks;
- os_checkForMergedGetSCompletion;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_B_S_W, All_Unblocks, NX) {
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(O_B, UnblockS, O) {
- us_updateSharerIfFBD;
- k_wakeUpDependents;
- j_popIncomingUnblockQueue;
- }
-
- transition(O_B, UnblockM, NO) {
- us_updateSharerIfFBD;
- uo_updateOwnerIfPf;
- k_wakeUpDependents;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_B_W, Memory_Data, NO_B) {
- d_sendData;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
- transition(NO_F_W, Memory_Data, NO_F) {
- d_sendData;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
- transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
- r_recordMemoryData;
- o_checkForCompletion;
- l_popMemQueue;
- }
-
- transition(O_DR_B_W, Memory_Data, O_DR_B) {
- r_recordMemoryData;
- dr_sendDmaData;
- o_checkForCompletion;
- l_popMemQueue;
- }
-
- transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition({O_R, S_R, NO_R}, Ack) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(S_R, Data) {
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(NO_R, {Data, Exclusive_Data}) {
- r_recordCacheData;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition({O_R, S_R}, All_acks_and_data_no_sharers, E) {
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(NO_R, All_acks_and_data_no_sharers, WB_E_W) {
- ly_queueMemoryWriteFromTBE;
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition({NO_DR_B_W, O_DR_B_W}, Ack) {
- m_decrementNumberOfMessages;
- n_popResponseQueue;
- }
-
- transition(NO_DR_B_W, Shared_Ack) {
- m_decrementNumberOfMessages;
- r_setSharerBit;
- n_popResponseQueue;
- }
-
- transition(O_DR_B, Shared_Ack) {
- m_decrementNumberOfMessages;
- r_setSharerBit;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(O_DR_B_W, Shared_Ack) {
- m_decrementNumberOfMessages;
- r_setSharerBit;
- n_popResponseQueue;
- }
-
- transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
- m_decrementNumberOfMessages;
- r_setSharerBit;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(NO_DR_B_W, Shared_Data) {
- r_recordCacheData;
- m_decrementNumberOfMessages;
- so_setOwnerBit;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
- r_recordCacheData;
- m_decrementNumberOfMessages;
- so_setOwnerBit;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(NO_DR_B_W, {Exclusive_Data, Data}) {
- r_recordCacheData;
- m_decrementNumberOfMessages;
- n_popResponseQueue;
- }
-
- transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
- r_recordCacheData;
- m_decrementNumberOfMessages;
- o_checkForCompletion;
- n_popResponseQueue;
- }
-
- transition(NO_DR_B, All_acks_and_owner_data, WB_O_W) {
- //
- // Note that the DMA consistency model allows us to send the DMA device
- // a response as soon as we receive valid data and prior to receiving
- // all acks. However, to simplify the protocol we wait for all acks.
- //
- dt_sendDmaDataFromTbe;
- ly_queueMemoryWriteFromTBE;
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(NO_DR_B, All_acks_and_shared_data, S) {
- //
- // Note that the DMA consistency model allows us to send the DMA device
- // a response as soon as we receive valid data and prior to receiving
- // all acks. However, to simplify the protocol we wait for all acks.
- //
- dt_sendDmaDataFromTbe;
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(NO_DR_B_D, All_acks_and_owner_data, WB_O_W) {
- //
- // Note that the DMA consistency model allows us to send the DMA device
- // a response as soon as we receive valid data and prior to receiving
- // all acks. However, to simplify the protocol we wait for all acks.
- //
- dt_sendDmaDataFromTbe;
- ly_queueMemoryWriteFromTBE;
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(NO_DR_B_D, All_acks_and_shared_data, S) {
- //
- // Note that the DMA consistency model allows us to send the DMA device
- // a response as soon as we receive valid data and prior to receiving
- // all acks. However, to simplify the protocol we wait for all acks.
- //
- dt_sendDmaDataFromTbe;
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(O_DR_B, All_acks_and_owner_data, WB_O_W) {
- ly_queueMemoryWriteFromTBE;
- w_deallocateTBE;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(O_DR_B, All_acks_and_data_no_sharers, WB_E_W) {
- ly_queueMemoryWriteFromTBE;
- w_deallocateTBE;
- pfd_probeFilterDeallocate;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(NO_DR_B, All_acks_and_data_no_sharers, WB_E_W) {
- //
- // Note that the DMA consistency model allows us to send the DMA device
- // a response as soon as we receive valid data and prior to receiving
- // all acks. However, to simplify the protocol we wait for all acks.
- //
- dt_sendDmaDataFromTbe;
- ly_queueMemoryWriteFromTBE;
- w_deallocateTBE;
- ppfd_possibleProbeFilterDeallocate;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(NO_DR_B_D, All_acks_and_data_no_sharers, WB_E_W) {
- a_assertCacheData;
- //
- // Note that the DMA consistency model allows us to send the DMA device
- // a response as soon as we receive valid data and prior to receiving
- // all acks. However, to simplify the protocol we wait for all acks.
- //
- dt_sendDmaDataFromTbe;
- ly_queueMemoryWriteFromTBE;
- w_deallocateTBE;
- ppfd_possibleProbeFilterDeallocate;
- k_wakeUpDependents;
- g_popTriggerQueue;
- }
-
- transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
- ld_queueMemoryDmaWrite;
- g_popTriggerQueue;
- }
-
- transition(NO_DW_W, Memory_Ack, E) {
- da_sendDmaAck;
- w_deallocateTBE;
- ppfd_possibleProbeFilterDeallocate;
- k_wakeUpDependents;
- l_popMemQueue;
- }
-
- transition(O_B_W, Memory_Data, O_B) {
- d_sendData;
- w_deallocateTBE;
- l_popMemQueue;
- }
-
- transition(NO_B_W, UnblockM, NO_W) {
- uo_updateOwnerIfPf;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_B_W, UnblockS, NO_W) {
- us_updateSharerIfFBD;
- j_popIncomingUnblockQueue;
- }
-
- transition(O_B_W, UnblockS, O_W) {
- us_updateSharerIfFBD;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_W, Memory_Data, NO) {
- w_deallocateTBE;
- k_wakeUpDependents;
- l_popMemQueue;
- }
-
- transition(O_W, Memory_Data, O) {
- w_deallocateTBE;
- k_wakeUpDependents;
- l_popMemQueue;
- }
-
- // WB State Transistions
- transition(WB, Writeback_Dirty, WB_O_W) {
- rs_removeSharer;
- l_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
- rs_removeSharer;
- l_queueMemoryWBRequest;
- pfd_probeFilterDeallocate;
- j_popIncomingUnblockQueue;
- }
-
- transition(WB_E_W, Memory_Ack, E) {
- k_wakeUpDependents;
- l_popMemQueue;
- }
-
- transition(WB_O_W, Memory_Ack, O) {
- k_wakeUpDependents;
- l_popMemQueue;
- }
-
- transition(WB, Writeback_Clean, O) {
- ll_checkIncomingWriteback;
- rs_removeSharer;
- k_wakeUpDependents;
- j_popIncomingUnblockQueue;
- }
-
- transition(WB, Writeback_Exclusive_Clean, E) {
- ll_checkIncomingWriteback;
- rs_removeSharer;
- pfd_probeFilterDeallocate;
- k_wakeUpDependents;
- j_popIncomingUnblockQueue;
- }
-
- transition(WB, Unblock, NX) {
- auno_assertUnblockerNotOwner;
- k_wakeUpDependents;
- j_popIncomingUnblockQueue;
- }
-
- transition(NO_F, PUTF, WB) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- //possible race between GETF and UnblockM -- not sure needed any more?
- transition(NO_F, UnblockM) {
- us_updateSharerIfFBD;
- uo_updateOwnerIfPf;
- j_popIncomingUnblockQueue;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-machine(MachineType:DMA, "DMA Controller")
- : DMASequencer * dma_sequencer;
- Cycles request_latency := 6;
-
- MessageBuffer * responseFromDir, network="From", virtual_network="1",
- vnet_type="response";
- MessageBuffer * requestToDir, network="To", virtual_network="0",
- vnet_type="request";
- MessageBuffer * mandatoryQueue;
-{
- state_declaration(State, desc="DMA states", default="DMA_State_READY") {
- READY, AccessPermission:Invalid, desc="Ready to accept a new request";
- BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
- BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
- }
-
- enumeration(Event, desc="DMA events") {
- ReadRequest, desc="A new read request";
- WriteRequest, desc="A new write request";
- Data, desc="Data from a DMA memory read";
- Ack, desc="DMA write to memory completed";
- }
-
- structure(TBE, desc="...") {
- State TBEState, desc="Transient state";
- DataBlock DataBlk, desc="Data";
- }
-
- structure(TBETable, external = "yes") {
- TBE lookup(Addr);
- void allocate(Addr);
- void deallocate(Addr);
- bool isPresent(Addr);
- }
-
- void set_tbe(TBE b);
- void unset_tbe();
- void wakeUpAllBuffers();
-
- TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
-
- Tick clockEdge();
- MachineID mapAddressToMachine(Addr addr, MachineType mtype);
-
- State getState(TBE tbe, Addr addr) {
- if (is_valid(tbe)) {
- return tbe.TBEState;
- } else {
- return State:READY;
- }
- }
-
- void setState(TBE tbe, Addr addr, State state) {
- if (is_valid(tbe)) {
- tbe.TBEState := state;
- }
- }
-
- AccessPermission getAccessPermission(Addr addr) {
- return AccessPermission:NotPresent;
- }
-
- void setAccessPermission(Addr addr, State state) {
- }
-
- void functionalRead(Addr addr, Packet *pkt) {
- error("DMA does not support functional read.");
- }
-
- int functionalWrite(Addr addr, Packet *pkt) {
- error("DMA does not support functional write.");
- }
-
- out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
-
- in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady(clockEdge())) {
- peek(dmaRequestQueue_in, SequencerMsg) {
- if (in_msg.Type == SequencerRequestType:LD ) {
- trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == SequencerRequestType:ST) {
- trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid request type");
- }
- }
- }
- }
-
- in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady(clockEdge())) {
- peek( dmaResponseQueue_in, DMAResponseMsg) {
- if (in_msg.Type == DMAResponseType:ACK) {
- trigger(Event:Ack, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else if (in_msg.Type == DMAResponseType:DATA) {
- trigger(Event:Data, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
- } else {
- error("Invalid response type");
- }
- }
- }
- }
-
- action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(requestToDir_out, DMARequestMsg, request_latency) {
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.LineAddress := in_msg.LineAddress;
- out_msg.Type := DMARequestType:READ;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
- peek(dmaRequestQueue_in, SequencerMsg) {
- enqueue(requestToDir_out, DMARequestMsg, request_latency) {
- out_msg.PhysicalAddress := in_msg.PhysicalAddress;
- out_msg.LineAddress := in_msg.LineAddress;
- out_msg.Type := DMARequestType:WRITE;
- out_msg.Requestor := machineID;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Len := in_msg.Len;
- out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
- dma_sequencer.ackCallback(address);
- }
-
- action(d_dataCallback, "d", desc="Write data to dma sequencer") {
- dma_sequencer.dataCallback(tbe.DataBlk, address);
- }
-
- action(t_updateTBEData, "t", desc="Update TBE Data") {
- assert(is_valid(tbe));
- peek( dmaResponseQueue_in, DMAResponseMsg) {
- tbe.DataBlk := in_msg.DataBlk;
- }
- }
-
- action(v_allocateTBE, "v", desc="Allocate TBE entry") {
- TBEs.allocate(address);
- set_tbe(TBEs[address]);
- }
-
- action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
- TBEs.deallocate(address);
- unset_tbe();
- }
-
- action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue(clockEdge());
- }
-
- action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue(clockEdge());
- }
-
- action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
- stall_and_wait(dmaRequestQueue_in, address);
- }
-
- action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
- wakeUpAllBuffers();
- }
-
- transition(READY, ReadRequest, BUSY_RD) {
- v_allocateTBE;
- s_sendReadRequest;
- p_popRequestQueue;
- }
-
- transition(READY, WriteRequest, BUSY_WR) {
- v_allocateTBE;
- s_sendWriteRequest;
- p_popRequestQueue;
- }
-
- transition(BUSY_RD, Data, READY) {
- t_updateTBEData;
- d_dataCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition(BUSY_WR, Ack, READY) {
- a_ackCallback;
- w_deallocateTBE;
- p_popResponseQueue;
- wkad_wakeUpAllDependents;
- }
-
- transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
- zz_stallAndWaitRequestQueue;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * AMD's contributions to the MOESI hammer protocol do not constitute an
- * endorsement of its similarity to any AMD products.
- */
-
-// CoherenceRequestType
-enumeration(CoherenceRequestType, desc="...") {
- GETX, desc="Get eXclusive";
- GETS, desc="Get Shared";
- MERGED_GETS, desc="Get Shared";
- PUT, desc="Put Ownership";
- WB_ACK, desc="Writeback ack";
- WB_NACK, desc="Writeback neg. ack";
- PUTF, desc="PUT on a Flush";
- GETF, desc="Issue exclusive for Flushing";
- BLOCK_ACK, desc="Dir Block ack";
- INV, desc="Invalidate";
-}
-
-// CoherenceResponseType
-enumeration(CoherenceResponseType, desc="...") {
- ACK, desc="ACKnowledgment, responder does not have a copy";
- ACK_SHARED, desc="ACKnowledgment, responder has a shared copy";
- DATA, desc="Data, responder does not have a copy";
- DATA_SHARED, desc="Data, responder has a shared copy";
- DATA_EXCLUSIVE, desc="Data, responder was exclusive, gave us a copy, and they went to invalid";
- WB_CLEAN, desc="Clean writeback";
- WB_DIRTY, desc="Dirty writeback";
- WB_EXCLUSIVE_CLEAN, desc="Clean writeback of exclusive data";
- WB_EXCLUSIVE_DIRTY, desc="Dirty writeback of exclusive data";
- UNBLOCK, desc="Unblock for writeback";
- UNBLOCKS, desc="Unblock now in S";
- UNBLOCKM, desc="Unblock now in M/O/E";
- NULL, desc="Null value";
-}
-
-// TriggerType
-enumeration(TriggerType, desc="...") {
- L2_to_L1, desc="L2 to L1 transfer";
- ALL_ACKS, desc="See corresponding event";
- ALL_ACKS_OWNER_EXISTS,desc="See corresponding event";
- ALL_ACKS_NO_SHARERS, desc="See corresponding event";
- ALL_UNBLOCKS, desc="all unblockS received";
-}
-
-// TriggerMsg
-structure(TriggerMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- TriggerType Type, desc="Type of trigger";
-
- bool functionalRead(Packet *pkt) {
- // Trigger messages do not hold any data!
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // Trigger messages do not hold any data!
- return false;
- }
-}
-
-// RequestMsg (and also forwarded requests)
-structure(RequestMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest MergedRequestors, desc="Merge set of read requestors";
- NetDest Destination, desc="Multicast destination mask";
- MessageSizeType MessageSize, desc="size category of the message";
- bool DirectedProbe, default="false", desc="probe filter directed probe";
-
- Cycles InitialRequestTime, default="Cycles(0)",
- desc="time the initial requests was sent from the L1Cache";
- Cycles ForwardRequestTime, default="Cycles(0)",
- desc="time the dir forwarded the request";
- int SilentAcks, default="0", desc="silent acks from the full-bit directory";
-
- bool functionalRead(Packet *pkt) {
- // Request messages do not hold any data
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // Request messages do not hold any data
- return false;
- }
-}
-
-// ResponseMsg (and also unblock requests)
-structure(ResponseMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
- MachineID Sender, desc="Node who sent the data";
- MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
- NetDest Destination, desc="Node to whom the data is sent";
- DataBlock DataBlk, desc="data for the cache line";
- bool Dirty, desc="Is the data dirty (different than memory)?";
- int Acks, default="0", desc="How many messages this counts as";
- MessageSizeType MessageSize, desc="size category of the message";
-
- Cycles InitialRequestTime, default="Cycles(0)",
- desc="time the initial requests was sent from the L1Cache";
- Cycles ForwardRequestTime, default="Cycles(0)",
- desc="time the dir forwarded the request";
- int SilentAcks, default="0", desc="silent acks from the full-bit directory";
-
- bool functionalRead(Packet *pkt) {
- // The check below ensures that data is read only from messages that
- // actually hold data.
- if (Type == CoherenceResponseType:DATA ||
- Type == CoherenceResponseType:DATA_SHARED ||
- Type == CoherenceResponseType:DATA_EXCLUSIVE ||
- Type == CoherenceResponseType:WB_DIRTY ||
- Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- return false;
- }
-
- bool functionalWrite(Packet *pkt) {
- // Message type does not matter since all messages are written.
- // If a protocol reads data from a packet that is not supposed
- // to hold the data, then the fault lies with the protocol.
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
-
-enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
- READ, desc="Memory Read";
- WRITE, desc="Memory Write";
- NULL, desc="Invalid";
-}
-
-enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
- DATA, desc="DATA read";
- ACK, desc="ACK write";
- NULL, desc="Invalid";
-}
-
-structure(DMARequestMsg, desc="...", interface="Message") {
- DMARequestType Type, desc="Request type (read/write)";
- Addr PhysicalAddress, desc="Physical address for this request";
- Addr LineAddress, desc="Line address for this request";
- MachineID Requestor, desc="Node who initiated the request";
- NetDest Destination, desc="Destination";
- DataBlock DataBlk, desc="DataBlk attached to this request";
- int Len, desc="The length of the request";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- return testAndRead(LineAddress, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(LineAddress, DataBlk, pkt);
- }
-}
-
-structure(DMAResponseMsg, desc="...", interface="Message") {
- DMAResponseType Type, desc="Response type (DATA/ACK)";
- Addr PhysicalAddress, desc="Physical address for this request";
- Addr LineAddress, desc="Line address for this request";
- NetDest Destination, desc="Destination";
- DataBlock DataBlk, desc="DataBlk attached to this request";
- MessageSizeType MessageSize, desc="size category of the message";
-
- bool functionalRead(Packet *pkt) {
- return testAndRead(LineAddress, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(LineAddress, DataBlk, pkt);
- }
-}
+++ /dev/null
-protocol "MOESI_hammer";
-include "RubySlicc_interfaces.slicc";
-include "MOESI_hammer-msg.sm";
-include "MOESI_hammer-cache.sm";
-include "MOESI_hammer-dir.sm";
-include "MOESI_hammer-dma.sm";
+++ /dev/null
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// Mapping functions
-
-int machineCount(MachineType machType);
-MachineID mapAddressToRange(Addr addr, MachineType type,
- int low, int high);
-MachineID mapAddressToRange(Addr addr, MachineType type,
- int low, int high, NodeID n);
-NetDest broadcast(MachineType type);
-NodeID machineIDToNodeID(MachineID machID);
-NodeID machineIDToVersion(MachineID machID);
-MachineType machineIDToMachineType(MachineID machID);
-MachineID createMachineID(MachineType t, NodeID i);
+++ /dev/null
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// Hack, no node object since base class has them
-NodeID version;
-MachineID machineID;
-NodeID clusterID;
-Cycles recycle_latency;
-
-// Functions implemented in the AbstractController class for
-// making timing access to the memory maintained by the
-// memory controllers.
-void queueMemoryRead(MachineID id, Addr addr, Cycles latency);
-void queueMemoryWrite(MachineID id, Addr addr, Cycles latency,
- DataBlock block);
-void queueMemoryWritePartial(MachineID id, Addr addr, Cycles latency,
- DataBlock block, int size);
-
-// Functions implemented in the AbstractController class for
-// making functional access to the memory maintained by the
-// memory controllers.
-void functionalMemoryRead(Packet *pkt);
-bool functionalMemoryWrite(Packet *pkt);
+++ /dev/null
-/*
- * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
- * Copyright (c) 2011 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// Declarations of external types that are common to all protocols
-external_type(int, primitive="yes", default="0");
-external_type(bool, primitive="yes", default="false");
-external_type(std::string, primitive="yes");
-external_type(uint32_t, primitive="yes");
-external_type(uint64_t, primitive="yes");
-external_type(PacketPtr, primitive="yes");
-external_type(Packet, primitive="yes");
-external_type(Addr, primitive="yes");
-external_type(Cycles, primitive="yes", default="Cycles(0)");
-external_type(Tick, primitive="yes", default="0");
-
-structure(WriteMask, external="yes", desc="...") {
- void clear();
- bool cmpMask(WriteMask);
- bool isEmpty();
- bool isFull();
- bool isOverlap(WriteMask);
- void orMask(WriteMask);
- void fillMask();
-}
-
-structure(DataBlock, external = "yes", desc="..."){
- void clear();
- void copyPartial(DataBlock, int, int);
- void copyPartial(DataBlock, WriteMask);
- void atomicPartial(DataBlock, WriteMask);
-}
-
-bool testAndRead(Addr addr, DataBlock datablk, Packet *pkt);
-bool testAndReadMask(Addr addr, DataBlock datablk, WriteMask mask, Packet *pkt);
-bool testAndWrite(Addr addr, DataBlock datablk, Packet *pkt);
-
-// AccessPermission
-// The following five states define the access permission of all memory blocks.
-// These permissions have multiple uses. They coordinate locking and
-// synchronization primitives, as well as enable functional accesses.
-// One should not need to add any additional permission values and it is very
-// risky to do so.
-enumeration(AccessPermission, desc="...", default="AccessPermission_NotPresent") {
- // Valid data
- Read_Only, desc="block is Read Only (modulo functional writes)";
- Read_Write, desc="block is Read/Write";
-
- // Possibly Invalid data
- // The maybe stale permission indicates that accordingly to the protocol,
- // there is no guarantee the block contains valid data. However, functional
- // writes should update the block because a dataless PUT request may
- // revalidate the block's data.
- Maybe_Stale, desc="block can be stale or revalidated by a dataless PUT";
- // In Broadcast/Snoop protocols, memory has no idea if it is exclusive owner
- // or not of a block, making it hard to make the logic of having only one
- // read_write block in the system impossible. This is to allow the memory to
- // say, "I have the block" and for the RubyPort logic to know that this is a
- // last-resort block if there are no writable copies in the caching hierarchy.
- // This is not supposed to be used in directory or token protocols where
- // memory/NB has an idea of what is going on in the whole system.
- Backing_Store, desc="for memory in Broadcast/Snoop protocols";
-
- // Invalid data
- Invalid, desc="block is in an Invalid base state";
- NotPresent, desc="block is NotPresent";
- Busy, desc="block is in a transient state, currently invalid";
-}
-//HSA scopes
-enumeration(HSAScope, desc="...", default="HSAScope_UNSPECIFIED") {
- UNSPECIFIED, desc="Unspecified scope";
- NOSCOPE, desc="Explictly unscoped";
- WAVEFRONT, desc="Wavefront scope";
- WORKGROUP, desc="Workgroup scope";
- DEVICE, desc="Device scope";
- SYSTEM, desc="System scope";
-}
-
-// HSA segment types
-enumeration(HSASegment, desc="...", default="HSASegment_GLOBAL") {
- GLOBAL, desc="Global segment";
- GROUP, desc="Group segment";
- PRIVATE, desc="Private segment";
- KERNARG, desc="Kernarg segment";
- READONLY, desc="Readonly segment";
- SPILL, desc="Spill segment";
- ARG, desc="Arg segment";
-}
-
-// TesterStatus
-enumeration(TesterStatus, desc="...") {
- Idle, desc="Idle";
- Action_Pending, desc="Action Pending";
- Ready, desc="Ready";
- Check_Pending, desc="Check Pending";
-}
-
-// InvalidateGeneratorStatus
-enumeration(InvalidateGeneratorStatus, desc="...") {
- Load_Waiting, desc="Load waiting to be issued";
- Load_Pending, desc="Load issued";
- Inv_Waiting, desc="Store (invalidate) waiting to be issued";
- Inv_Pending, desc="Store (invalidate) issued";
-}
-
-// SeriesRequestGeneratorStatus
-enumeration(SeriesRequestGeneratorStatus, desc="...") {
- Thinking, desc="Doing work before next action";
- Request_Pending, desc="Request pending";
-}
-
-// LockStatus
-enumeration(LockStatus, desc="...") {
- Unlocked, desc="Lock is not held";
- Locked, desc="Lock is held";
-}
-
-// SequencerStatus
-enumeration(SequencerStatus, desc="...") {
- Idle, desc="Idle";
- Pending, desc="Pending";
-}
-
-enumeration(TransitionResult, desc="...") {
- Valid, desc="Valid transition";
- ResourceStall, desc="Stalled due to insufficient resources";
- ProtocolStall, desc="Protocol specified stall";
- Reject, desc="Rejected because of a type mismatch";
-}
-
-// RubyRequestType
-enumeration(RubyRequestType, desc="...", default="RubyRequestType_NULL") {
- LD, desc="Load";
- ST, desc="Store";
- ATOMIC, desc="Atomic Load/Store -- depricated. use ATOMIC_RETURN or ATOMIC_NO_RETURN";
- ATOMIC_RETURN, desc="Atomic Load/Store, return data";
- ATOMIC_NO_RETURN, desc="Atomic Load/Store, do not return data";
- IFETCH, desc="Instruction fetch";
- IO, desc="I/O";
- REPLACEMENT, desc="Replacement";
- Load_Linked, desc="";
- Store_Conditional, desc="";
- RMW_Read, desc="";
- RMW_Write, desc="";
- Locked_RMW_Read, desc="";
- Locked_RMW_Write, desc="";
- COMMIT, desc="Commit version";
- NULL, desc="Invalid request type";
- FLUSH, desc="Flush request type";
- Release, desc="Release operation";
- Acquire, desc="Acquire opertion";
- AcquireRelease, desc="Acquire and Release opertion";
-}
-
-enumeration(SequencerRequestType, desc="...", default="SequencerRequestType_NULL") {
- Default, desc="Replace this with access_types passed to the DMA Ruby object";
- LD, desc="Load";
- ST, desc="Store";
- ATOMIC, desc="Atomic Load/Store";
- REPLACEMENT, desc="Replacement";
- FLUSH, desc="Flush request type";
- NULL, desc="Invalid request type";
-}
-
-enumeration(CacheRequestType, desc="...", default="CacheRequestType_NULL") {
- DataArrayRead, desc="Read access to the cache's data array";
- DataArrayWrite, desc="Write access to the cache's data array";
- TagArrayRead, desc="Read access to the cache's tag array";
- TagArrayWrite, desc="Write access to the cache's tag array";
-}
-
-enumeration(CacheResourceType, desc="...", default="CacheResourceType_NULL") {
- DataArray, desc="Access to the cache's data array";
- TagArray, desc="Access to the cache's tag array";
-}
-
-enumeration(DirectoryRequestType, desc="...", default="DirectoryRequestType_NULL") {
- Default, desc="Replace this with access_types passed to the Directory Ruby object";
-}
-
-enumeration(DMASequencerRequestType, desc="...", default="DMASequencerRequestType_NULL") {
- Default, desc="Replace this with access_types passed to the DMA Ruby object";
-}
-
-enumeration(MemoryControlRequestType, desc="...", default="MemoryControlRequestType_NULL") {
- Default, desc="Replace this with access_types passed to the DMA Ruby object";
-}
-
-
-// These are statically defined types of states machines that we can have.
-// If you want to add a new machine type, edit this enum. It is not necessary
-// for a protocol to have state machines defined for the all types here. But
-// you cannot use anything other than the ones defined here. Also, a protocol
-// can have only one state machine for a given type.
-enumeration(MachineType, desc="...", default="MachineType_NULL") {
- L0Cache, desc="L0 Cache Mach";
- L1Cache, desc="L1 Cache Mach";
- L2Cache, desc="L2 Cache Mach";
- L3Cache, desc="L3 Cache Mach";
- Directory, desc="Directory Mach";
- DMA, desc="DMA Mach";
- Collector, desc="Collector Mach";
- L1Cache_wCC, desc="L1 Cache Mach to track cache-to-cache transfer (used for miss latency profile)";
- L2Cache_wCC, desc="L2 Cache Mach to track cache-to-cache transfer (used for miss latency profile)";
- CorePair, desc="Cache Mach (2 cores, Private L1Ds, Shared L1I & L2)";
- TCP, desc="GPU L1 Data Cache (Texture Cache per Pipe)";
- TCC, desc="GPU L2 Shared Cache (Texture Cache per Channel)";
- TCCdir, desc="Directory at the GPU L2 Cache (TCC)";
- SQC, desc="GPU L1 Instr Cache (Sequencer Cache)";
- RegionDir, desc="Region-granular directory";
- RegionBuffer,desc="Region buffer for CPU and GPU";
- NULL, desc="null mach type";
-}
-
-// MessageSizeType
-enumeration(MessageSizeType, desc="...") {
- Control, desc="Control Message";
- Data, desc="Data Message";
- Request_Control, desc="Request";
- Reissue_Control, desc="Reissued request";
- Response_Data, desc="data response";
- ResponseL2hit_Data, desc="data response";
- ResponseLocal_Data, desc="data response";
- Response_Control, desc="non-data response";
- Writeback_Data, desc="Writeback data";
- Writeback_Control, desc="Writeback control";
- Broadcast_Control, desc="Broadcast control";
- Multicast_Control, desc="Multicast control";
- Forwarded_Control, desc="Forwarded control";
- Invalidate_Control, desc="Invalidate control";
- Unblock_Control, desc="Unblock control";
- Persistent_Control, desc="Persistent request activation messages";
- Completion_Control, desc="Completion messages";
-}
-
-// AccessType
-enumeration(AccessType, desc="...") {
- Read, desc="Reading from cache";
- Write, desc="Writing to cache";
-}
-
-// RubyAccessMode
-enumeration(RubyAccessMode, default="RubyAccessMode_User", desc="...") {
- Supervisor, desc="Supervisor mode";
- User, desc="User mode";
- Device, desc="Device mode";
-}
-
-enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
- No, desc="No, not a prefetch";
- Yes, desc="Yes, a prefetch";
- L1_HW, desc="This is a L1 hardware prefetch";
- L2_HW, desc="This is a L2 hardware prefetch";
-}
-
-// CacheMsg
-structure(SequencerMsg, desc="...", interface="Message") {
- Addr LineAddress, desc="Line address for this request";
- Addr PhysicalAddress, desc="Physical address for this request";
- SequencerRequestType Type, desc="Type of request (LD, ST, etc)";
- Addr ProgramCounter, desc="Program counter of the instruction that caused the miss";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- DataBlock DataBlk, desc="Data";
- int Len, desc="size in bytes of access";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
- MessageSizeType MessageSize, default="MessageSizeType_Request_Control";
-
- bool functionalRead(Packet *pkt) {
- return testAndRead(PhysicalAddress, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(PhysicalAddress, DataBlk, pkt);
- }
-}
-
-// MaskPredictorType
-enumeration(MaskPredictorType, "MaskPredictorType_Undefined", desc="...") {
- Undefined, desc="Undefined";
- AlwaysUnicast, desc="AlwaysUnicast";
- TokenD, desc="TokenD";
- AlwaysBroadcast, desc="AlwaysBroadcast";
- TokenB, desc="TokenB";
- TokenNull, desc="TokenNull";
- Random, desc="Random";
- Pairwise, desc="Pairwise";
- Owner, desc="Owner";
- BroadcastIfShared, desc="Broadcast-If-Shared";
- BroadcastCounter, desc="Broadcast Counter";
- Group, desc="Group";
- Counter, desc="Counter";
- StickySpatial, desc="StickySpatial";
- OwnerBroadcast, desc="Owner/Broadcast Hybrid";
- OwnerGroup, desc="Owner/Group Hybrid";
- OwnerBroadcastMod, desc="Owner/Broadcast Hybrid-Mod";
- OwnerGroupMod, desc="Owner/Group Hybrid-Mod";
- LastNMasks, desc="Last N Masks";
- BandwidthAdaptive, desc="Bandwidth Adaptive";
-}
-
-// MaskPredictorIndex
-enumeration(MaskPredictorIndex, "MaskPredictorIndex_Undefined", desc="...") {
- Undefined, desc="Undefined";
- DataBlock, desc="Data Block";
- PC, desc="Program Counter";
-}
-
-// MaskPredictorTraining
-enumeration(MaskPredictorTraining, "MaskPredictorTraining_Undefined", desc="...") {
- Undefined, desc="Undefined";
- None, desc="None";
- Implicit, desc="Implicit";
- Explicit, desc="Explicit";
- Both, desc="Both";
-}
-
-// Request Status
-enumeration(RequestStatus, desc="...", default="RequestStatus_NULL") {
- Ready, desc="The sequencer is ready and the request does not alias";
- Issued, desc="The sequencer successfully issued the request";
- BufferFull, desc="Can not issue because the sequencer is full";
- Aliased, desc="This request aliased with a currently outstanding request";
- NULL, desc="";
-}
-
-// LinkDirection
-enumeration(LinkDirection, desc="...") {
- In, desc="Inward link direction";
- Out, desc="Outward link direction";
-}
+++ /dev/null
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- *
- */
-
-// MemoryRequestType used in MemoryMsg
-
-enumeration(MemoryRequestType, desc="...") {
-
- // Southbound request: from directory to memory cache
- // or directory to memory or memory cache to memory
- MEMORY_READ, desc="Read request to memory";
- MEMORY_WB, desc="Write back data to memory";
-
- // response from memory to directory
- // (These are currently unused!)
- MEMORY_DATA, desc="Data read from memory";
- MEMORY_ACK, desc="Write to memory acknowledgement";
-}
-
-
-// Message to and from Memory Control
-
-structure(MemoryMsg, desc="...", interface="Message") {
- Addr addr, desc="Physical address for this request";
- MemoryRequestType Type, desc="Type of memory request (MEMORY_READ or MEMORY_WB)";
- MachineID Sender, desc="What component sent the data";
- MachineID OriginalRequestorMachId, desc="What component originally requested";
- DataBlock DataBlk, desc="Data to writeback";
- MessageSizeType MessageSize, desc="size category of the message";
- // Not all fields used by all protocols:
- PrefetchBit Prefetch, desc="Is this a prefetch request";
- bool ReadX, desc="Exclusive";
- int Acks, desc="How many acks to expect";
-
- bool functionalRead(Packet *pkt) {
- return testAndRead(addr, DataBlk, pkt);
- }
-
- bool functionalWrite(Packet *pkt) {
- return testAndWrite(addr, DataBlk, pkt);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * Copyright (c) 2013 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// External Types
-
-//
-// **PLEASE NOTE!** When adding objects to this file you must also add a line
-// in the src/mem/ruby/SConscript file. Otherwise the external object's .hh
-// file will not be copied to the protocol directory and you will encounter a
-// undefined declaration error.
-//
-
-external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
-external_type(OutPort, primitive="yes");
-external_type(Scalar, primitive="yes");
-
-structure(InPort, external = "yes", primitive="yes") {
- bool isReady(Tick current_time);
- Tick dequeue(Tick current_time);
- void recycle(Tick current_time, Tick recycle_latency);
- bool isEmpty();
- bool isStallMapEmpty();
- int getStallMapSize();
-}
-
-external_type(NodeID, default="0", primitive="yes");
-external_type(MachineID);
-
-structure (Set, external = "yes", non_obj="yes") {
- void setSize(int);
- void add(NodeID);
- void addSet(Set);
- void remove(NodeID);
- void removeSet(Set);
- void broadcast();
- void addRandom();
- void clear();
- int count();
- bool isElement(NodeID);
- bool isEqual(Set);
- bool isSuperset(Set);
- bool intersectionIsEmpty(Set);
- NodeID smallestElement();
-}
-
-structure (NetDest, external = "yes", non_obj="yes") {
- void setSize(int);
- void setSize(int, int);
- void add(NodeID);
- void add(MachineID);
- void addSet(Set);
- void addNetDest(NetDest);
- void setNetDest(MachineType, Set);
- void remove(NodeID);
- void remove(MachineID);
- void removeSet(Set);
- void removeNetDest(NetDest);
- void broadcast();
- void broadcast(MachineType);
- void addRandom();
- void clear();
- Set toSet();
- int count();
- bool isElement(NodeID);
- bool isElement(MachineID);
- bool isSuperset(Set);
- bool isSuperset(NetDest);
- bool isEmpty();
- bool intersectionIsEmpty(Set);
- bool intersectionIsEmpty(NetDest);
- MachineID smallestElement(MachineType);
- NetDest OR(NetDest);
- NetDest AND(NetDest);
-}
-
-structure (Sequencer, external = "yes") {
- void readCallback(Addr, DataBlock);
- void readCallback(Addr, DataBlock, bool);
- void readCallback(Addr, DataBlock, bool, MachineType);
- void readCallback(Addr, DataBlock, bool, MachineType,
- Cycles, Cycles, Cycles);
-
- void writeCallback(Addr, DataBlock);
- void writeCallback(Addr, DataBlock, bool);
- void writeCallback(Addr, DataBlock, bool, MachineType);
- void writeCallback(Addr, DataBlock, bool, MachineType,
- Cycles, Cycles, Cycles);
-
- void checkCoherence(Addr);
- void evictionCallback(Addr);
- void recordRequestType(SequencerRequestType);
- bool checkResourceAvailable(CacheResourceType, Addr);
- void invalidateSC(Addr);
-}
-
-structure (GPUCoalescer, external = "yes") {
- void readCallback(Addr, DataBlock);
- void readCallback(Addr, MachineType, DataBlock);
- void readCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles);
- void readCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles, bool);
- void writeCallback(Addr, DataBlock);
- void writeCallback(Addr, MachineType, DataBlock);
- void writeCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles);
- void writeCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles, bool);
- void checkCoherence(Addr);
- void evictionCallback(Addr);
- void recordCPReadCallBack(MachineID, MachineID);
- void recordCPWriteCallBack(MachineID, MachineID);
-}
-
-structure (VIPERCoalescer, external = "yes") {
- void readCallback(Addr, DataBlock);
- void readCallback(Addr, MachineType, DataBlock);
- void readCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles);
- void readCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles, bool);
- void writeCallback(Addr, DataBlock);
- void writeCallback(Addr, MachineType, DataBlock);
- void writeCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles);
- void writeCallback(Addr, MachineType, DataBlock,
- Cycles, Cycles, Cycles, bool);
- void invCallback(Addr);
- void wbCallback(Addr);
- void checkCoherence(Addr);
- void evictionCallback(Addr);
-}
-
-structure(RubyRequest, desc="...", interface="Message", external="yes") {
- Addr LineAddress, desc="Line address for this request";
- Addr PhysicalAddress, desc="Physical address for this request";
- RubyRequestType Type, desc="Type of request (LD, ST, etc)";
- Addr ProgramCounter, desc="Program counter of the instruction that caused the miss";
- RubyAccessMode AccessMode, desc="user/supervisor access type";
- int Size, desc="size in bytes of access";
- PrefetchBit Prefetch, desc="Is this a prefetch request";
- int contextId, desc="this goes away but must be replace with Nilay";
- WriteMask writeMask, desc="Writethrough mask";
- DataBlock WTData, desc="Writethrough data block";
- int wfid, desc="Writethrough wavefront";
- HSAScope scope, desc="HSA scope";
- HSASegment segment, desc="HSA segment";
- PacketPtr pkt, desc="Packet associated with this request";
-}
-
-structure(AbstractEntry, primitive="yes", external = "yes") {
- void changePermission(AccessPermission);
-}
-
-structure (DirectoryMemory, external = "yes") {
- AbstractEntry allocate(Addr, AbstractEntry);
- AbstractEntry lookup(Addr);
- bool isPresent(Addr);
- void invalidateBlock(Addr);
- void recordRequestType(DirectoryRequestType);
-}
-
-structure(AbstractCacheEntry, primitive="yes", external = "yes") {
- void changePermission(AccessPermission);
-}
-
-structure (CacheMemory, external = "yes") {
- bool cacheAvail(Addr);
- Addr cacheProbe(Addr);
- AbstractCacheEntry allocate(Addr, AbstractCacheEntry);
- AbstractCacheEntry allocate(Addr, AbstractCacheEntry, bool);
- void allocateVoid(Addr, AbstractCacheEntry);
- void deallocate(Addr);
- AbstractCacheEntry lookup(Addr);
- bool isTagPresent(Addr);
- Cycles getTagLatency();
- Cycles getDataLatency();
- void setMRU(Addr);
- void setMRU(Addr, int);
- void setMRU(AbstractCacheEntry);
- void recordRequestType(CacheRequestType, Addr);
- bool checkResourceAvailable(CacheResourceType, Addr);
-
- int getCacheSize();
- int getNumBlocks();
- Addr getAddressAtIdx(int);
-
- Scalar demand_misses;
- Scalar demand_hits;
-}
-
-structure (WireBuffer, inport="yes", outport="yes", external = "yes") {
-
-}
-
-structure (DMASequencer, external = "yes") {
- void ackCallback(Addr);
- void dataCallback(DataBlock,Addr);
- void recordRequestType(CacheRequestType);
-}
-
-structure (TimerTable, inport="yes", external = "yes") {
- bool isReady(Tick);
- Addr nextAddress();
- void set(Addr, Tick);
- void unset(Addr);
- bool isSet(Addr);
-}
-
-structure (AbstractBloomFilter, external = "yes") {
- void clear(int);
- void set(Addr, int);
- void unset(Addr, int);
-
- bool isSet(Addr, int);
- int getCount(Addr, int);
-}
-
-structure (Prefetcher, external = "yes") {
- void observeMiss(Addr, RubyRequestType);
- void observePfHit(Addr);
- void observePfMiss(Addr);
-}
+++ /dev/null
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// Miscallaneous Functions
-
-void error(std::string msg);
-void assert(bool condition);
-int random(int number);
-Cycles zero_time();
-NodeID intToID(int nodenum);
-int IDToInt(NodeID id);
-int addressToInt(Addr addr);
-Addr intToAddress(int addr);
-void procProfileCoherenceRequest(NodeID node, bool needCLB);
-void dirProfileCoherenceRequest(NodeID node, bool needCLB);
-int max_tokens();
-Addr setOffset(Addr addr, int offset);
-Addr makeLineAddress(Addr addr);
-int getOffset(Addr addr);
-int mod(int val, int mod);
-Addr bitSelect(Addr addr, int small, int big);
-Addr maskLowOrderBits(Addr addr, int number);
-Addr makeNextStrideAddress(Addr addr, int stride);
-structure(BoolVec, external="yes") {
-}
-int countBoolVec(BoolVec bVec);
+++ /dev/null
-include "RubySlicc_Exports.sm";
-include "RubySlicc_Types.sm";
-include "RubySlicc_Util.sm";
-include "RubySlicc_ComponentMapping.sm";
-include "RubySlicc_Defines.sm";
-include "RubySlicc_MemControl.sm";
+++ /dev/null
-# -*- mode:python -*-
-
-# Copyright (c) 2009 The Hewlett-Packard Development Company
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met: redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer;
-# redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution;
-# neither the name of the copyright holders nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Authors: Nathan Binkert
-
-import os
-import re
-import sys
-
-from os.path import isdir, isfile, join as joinpath
-
-from SCons.Scanner import Classic
-
-from gem5_scons import Transform
-
-Import('*')
-
-if env['PROTOCOL'] == 'None':
- Return()
-
-output_dir = Dir('.')
-html_dir = Dir('html')
-slicc_dir = Dir('../slicc')
-
-sys.path[1:1] = [ Dir('..').srcnode().abspath ]
-from slicc.parser import SLICC
-
-slicc_depends = []
-for root,dirs,files in os.walk(slicc_dir.srcnode().abspath):
- for f in files:
- if f.endswith('.py'):
- slicc_depends.append(File(joinpath(root, f)))
-
-#
-# Use SLICC
-#
-env["SLICC_PATH"] = protocol_dirs
-slicc_scanner = Classic("SliccScanner", ['.sm', '.slicc'], "SLICC_PATH",
- r'''include[ \t]["'](.*)["'];''')
-env.Append(SCANNERS=slicc_scanner)
-
-def slicc_emitter(target, source, env):
- assert len(source) == 1
- filepath = source[0].srcnode().abspath
-
- slicc = SLICC(filepath, protocol_base.abspath, verbose=False)
- slicc.process()
- slicc.writeCodeFiles(output_dir.abspath, slicc_includes)
- if env['SLICC_HTML']:
- slicc.writeHTMLFiles(html_dir.abspath)
-
- target.extend([output_dir.File(f) for f in sorted(slicc.files())])
- return target, source
-
-def slicc_action(target, source, env):
- assert len(source) == 1
- filepath = source[0].srcnode().abspath
-
- slicc = SLICC(filepath, protocol_base.abspath, verbose=True)
- slicc.process()
- slicc.writeCodeFiles(output_dir.abspath, slicc_includes)
- if env['SLICC_HTML']:
- slicc.writeHTMLFiles(html_dir.abspath)
-
-slicc_builder = Builder(action=MakeAction(slicc_action, Transform("SLICC")),
- emitter=slicc_emitter)
-
-protocol = env['PROTOCOL']
-protocol_dir = None
-for path in protocol_dirs:
- if os.path.exists(os.path.join(path, "%s.slicc" % protocol)):
- protocol_dir = Dir(path)
- break
-
-if not protocol_dir:
- raise ValueError, "Could not find %s.slicc in protocol_dirs" % protocol
-
-sources = [ protocol_dir.File("%s.slicc" % protocol) ]
-
-env.Append(BUILDERS={'SLICC' : slicc_builder})
-nodes = env.SLICC([], sources)
-env.Depends(nodes, slicc_depends)
-
-for f in nodes:
- s = str(f)
- if s.endswith('.cc'):
- Source(f)
- elif s.endswith('.py'):
- SimObject(f)
-
+++ /dev/null
-# -*- mode:python -*-
-
-# Copyright (c) 2009 The Hewlett-Packard Development Company
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met: redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer;
-# redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution;
-# neither the name of the copyright holders nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Authors: Nathan Binkert
-
-import os
-
-Import('*')
-
-all_protocols.extend([
- 'GPU_VIPER',
- 'GPU_VIPER_Baseline',
- 'GPU_VIPER_Region',
- 'GPU_RfO',
- 'MOESI_AMD_Base',
- 'MESI_Two_Level',
- 'MESI_Three_Level',
- 'MI_example',
- 'MOESI_CMP_directory',
- 'MOESI_CMP_token',
- 'MOESI_hammer',
- 'Garnet_standalone',
- 'None'
- ])
-
-opt = BoolVariable('SLICC_HTML', 'Create HTML files', False)
-sticky_vars.AddVariables(opt)
-
-protocol_dirs.append(Dir('.').abspath)
-
-protocol_base = Dir('.')
-Export('protocol_base')
-
-slicc_includes.append('mem/ruby/slicc_interface/RubySlicc_includes.hh')
#
# Link includes
#
-generated_dir = Dir('../protocol')
+generated_dir = Dir('protocol')
def MakeIncludeAction(target, source, env):
f = file(str(target[0]), 'w')
MakeInclude('system/DMASequencer.hh')
MakeInclude('system/Sequencer.hh')
-# External types : Group "mem/protocol" : include "header.hh" to the bottom
-# of this MakeIncludes if it is referenced as
-# <# include "mem/protocol/header.hh"> in any file
-# generated_dir = Dir('../protocol')
+# External types : Group "mem/ruby/protocol" : include "header.hh" to the
+# bottom of this MakeIncludes if it is referenced as
+# <# include "mem/ruby/protocol/header.hh"> in any file
+# generated_dir = Dir('protocol')
MakeInclude('system/GPUCoalescer.hh')
MakeInclude('system/VIPERCoalescer.hh')
#include <string>
#include "base/cprintf.hh"
-#include "mem/protocol/MachineType.hh"
+#include "mem/ruby/protocol/MachineType.hh"
struct MachineID
{
#include "base/types.hh"
#include "mem/packet.hh"
#include "mem/port.hh"
-#include "mem/protocol/LinkDirection.hh"
-#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/common/MachineID.hh"
#include "mem/ruby/common/TypeDefines.hh"
#include "mem/ruby/network/Topology.hh"
#include "mem/ruby/network/dummy_port.hh"
+#include "mem/ruby/protocol/LinkDirection.hh"
+#include "mem/ruby/protocol/MessageSizeType.hh"
#include "params/RubyNetwork.hh"
#include "sim/clocked_object.hh"
#include <string>
#include <vector>
-#include "mem/protocol/LinkDirection.hh"
#include "mem/ruby/common/TypeDefines.hh"
#include "mem/ruby/network/BasicLink.hh"
+#include "mem/ruby/protocol/LinkDirection.hh"
class NetDest;
class Network;
#include <vector>
#include "mem/packet.hh"
-#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/common/TypeDefines.hh"
#include "mem/ruby/network/BasicRouter.hh"
+#include "mem/ruby/protocol/MessageSizeType.hh"
#include "params/Switch.hh"
class MessageBuffer;
#include <iostream>
-#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/protocol/RubyRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Set.hh"
+#include "mem/ruby/protocol/RubyAccessMode.hh"
+#include "mem/ruby/protocol/RubyRequestType.hh"
class Histogram;
#include <vector>
#include "base/stl_helpers.hh"
-#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/protocol/RubyRequest.hh"
using namespace std;
typedef AddressProfiler::AddressMap AddressMap;
#include <iostream>
#include <unordered_map>
-#include "mem/protocol/AccessType.hh"
-#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/profiler/AccessTraceForAddress.hh"
#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/protocol/AccessType.hh"
+#include "mem/ruby/protocol/RubyRequest.hh"
class Set;
#include "base/stl_helpers.hh"
#include "base/str.hh"
-#include "mem/protocol/MachineType.hh"
-#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/network/Network.hh"
#include "mem/ruby/profiler/AddressProfiler.hh"
+#include "mem/ruby/protocol/MachineType.hh"
+#include "mem/ruby/protocol/RubyRequest.hh"
/**
* the profiler uses GPUCoalescer code even
*/
#ifdef BUILD_GPU
#include "mem/ruby/system/GPUCoalescer.hh"
+
#endif
#include "mem/ruby/system/Sequencer.hh"
#include "base/callback.hh"
#include "base/statistics.hh"
-#include "mem/protocol/AccessType.hh"
-#include "mem/protocol/PrefetchBit.hh"
-#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/protocol/RubyRequestType.hh"
#include "mem/ruby/common/MachineID.hh"
+#include "mem/ruby/protocol/AccessType.hh"
+#include "mem/ruby/protocol/PrefetchBit.hh"
+#include "mem/ruby/protocol/RubyAccessMode.hh"
+#include "mem/ruby/protocol/RubyRequestType.hh"
#include "params/RubySystem.hh"
class RubyRequest;
--- /dev/null
+/*
+ * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:SQC, "GPU SQC (L1 I Cache)")
+ : Sequencer* sequencer;
+ CacheMemory * L1cache;
+ int TCC_select_num_bits;
+ Cycles issue_latency := 80; // time to send data down to TCC
+ Cycles l2_hit_latency := 18;
+
+ MessageBuffer * requestFromSQC, network="To", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseFromSQC, network="To", virtual_network="3", vnet_type="response";
+ MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
+
+ MessageBuffer * probeToSQC, network="From", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseToSQC, network="From", virtual_network="3", vnet_type="response";
+
+ MessageBuffer * mandatoryQueue;
+{
+ state_declaration(State, desc="SQC Cache States", default="SQC_State_I") {
+ I, AccessPermission:Invalid, desc="Invalid";
+ S, AccessPermission:Read_Only, desc="Shared";
+
+ I_S, AccessPermission:Busy, desc="Invalid, issued RdBlkS, have not seen response yet";
+ S_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for clean WB ack";
+ I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from TCCdir for canceled WB";
+ }
+
+ enumeration(Event, desc="SQC Events") {
+ // Core initiated
+ Fetch, desc="Fetch";
+
+ //TCC initiated
+ TCC_AckS, desc="TCC Ack to Core Request";
+ TCC_AckWB, desc="TCC Ack for WB";
+ TCC_NackWB, desc="TCC Nack for WB";
+
+ // Mem sys initiated
+ Repl, desc="Replacing block from cache";
+
+ // Probe Events
+ PrbInvData, desc="probe, return M data";
+ PrbInv, desc="probe, no need for data";
+ PrbShrData, desc="probe downgrade, return data";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ bool FromL2, default="false", desc="block just moved from L2";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
+ bool Shared, desc="Victim hit by shared probe";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<SQC_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+
+ // Internal functions
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
+ return cache_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return tbe.DataBlk;
+ } else {
+ return getCacheEntry(addr).DataBlk;
+ }
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return SQC_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return SQC_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(SQC_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ // Out Ports
+
+ out_port(requestNetwork_out, CPURequestMsg, requestFromSQC);
+ out_port(responseNetwork_out, ResponseMsg, responseFromSQC);
+ out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
+
+ // In Ports
+
+ in_port(probeNetwork_in, TDProbeRequestMsg, probeToSQC) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, TDProbeRequestMsg, block_on="addr") {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ assert(in_msg.ReturnData);
+ trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ in_port(responseToSQC_in, ResponseMsg, responseToSQC) {
+ if (responseToSQC_in.isReady(clockEdge())) {
+ peek(responseToSQC_in, ResponseMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == CoherenceResponseType:TDSysResp) {
+ if (in_msg.State == CoherenceState:Shared) {
+ trigger(Event:TCC_AckS, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("SQC should not receive TDSysResp other than CoherenceState:Shared");
+ }
+ } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
+ trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:TDSysWBNack) {
+ trigger(Event:TCC_NackWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+ Entry cache_entry := getCacheEntry(in_msg.LineAddress);
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
+
+ assert(in_msg.Type == RubyRequestType:IFETCH);
+ if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
+ trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(ic_invCache, "ic", desc="invalidate cache") {
+ if(is_valid(cache_entry)) {
+ L1cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(vc_victim, "vc", desc="Victimize E/S Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicClean;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:S) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(a_allocate, "a", desc="allocate block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1cache.allocate(address, new Entry));
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.Shared := false;
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
+ responseToSQC_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(l_loadDone, "l", desc="local load done") {
+ assert(is_valid(cache_entry));
+ sequencer.readCallback(address, cache_entry.DataBlk,
+ false, MachineType:L1Cache);
+ APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
+ }
+
+ action(xl_loadDone, "xl", desc="remote load done") {
+ peek(responseToSQC_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ sequencer.readCallback(address,
+ cache_entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
+ }
+ }
+
+ action(w_writeCache, "w", desc="write data to cache") {
+ peek(responseToSQC_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
+ peek(responseToSQC_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:StaleNotif;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(wb_data, "wb", desc="write back data") {
+ peek(responseToSQC_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUData;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Shared) {
+ out_msg.NbReqShared := true;
+ } else {
+ out_msg.NbReqShared := false;
+ }
+ out_msg.State := CoherenceState:Shared; // faux info
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Ntsl := true;
+ out_msg.Hit := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false; // only true if sending back data i think
+ out_msg.Hit := false;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry) || is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := getDataBlock(address);
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ } else {
+ out_msg.Dirty := cache_entry.Dirty;
+ }
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry) || is_valid(tbe));
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := getDataBlock(address);
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ } else {
+ out_msg.Dirty := cache_entry.Dirty;
+ }
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
+ assert(is_valid(tbe));
+ tbe.Shared := true;
+ }
+
+ action(uu_sendUnblock, "uu", desc="state changed, unblock") {
+ enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
+ probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ // Transitions
+
+ // transitions from base
+ transition(I, Fetch, I_S) {TagArrayRead, TagArrayWrite} {
+ a_allocate;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ // simple hit transitions
+ transition(S, Fetch) {TagArrayRead, DataArrayRead} {
+ l_loadDone;
+ p_popMandatoryQueue;
+ }
+
+ // recycles from transients
+ transition({I_S, S_I, I_C}, {Fetch, Repl}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition(S, Repl, S_I) {TagArrayRead} {
+ t_allocateTBE;
+ vc_victim;
+ ic_invCache;
+ }
+
+ // TCC event
+ transition(I_S, TCC_AckS, S) {DataArrayRead, DataArrayWrite} {
+ w_writeCache;
+ xl_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S_I, TCC_NackWB, I){TagArrayWrite} {
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(S_I, TCC_AckWB, I) {TagArrayWrite} {
+ wb_data;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(I_C, TCC_AckWB, I){TagArrayWrite} {
+ ss_sendStaleNotification;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(I_C, TCC_NackWB, I) {TagArrayWrite} {
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ // Probe transitions
+ transition({S, I}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbInvData, I_C) {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition({S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition({S}, PrbShrData, S) {DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({I, I_C}, PrbShrData) {TagArrayRead} {
+ prm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbInv, I_C){
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(I_S, {PrbInv, PrbInvData}) {} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ a_allocate; // but make sure there is room for incoming data when it arrives
+ pp_popProbeQueue;
+ }
+
+ transition(I_S, PrbShrData) {} {
+ prm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(S_I, PrbInvData, I_C) {TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(S_I, PrbInv, I_C) {TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(S_I, PrbShrData) {DataArrayRead} {
+ pd_sendProbeResponseData;
+ sf_setSharedFlip;
+ pp_popProbeQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:TCC, "TCC Cache")
+ : CacheMemory * L2cache;
+ WireBuffer * w_reqToTCCDir;
+ WireBuffer * w_respToTCCDir;
+ WireBuffer * w_TCCUnblockToTCCDir;
+ WireBuffer * w_reqToTCC;
+ WireBuffer * w_probeToTCC;
+ WireBuffer * w_respToTCC;
+ int TCC_select_num_bits;
+ Cycles l2_request_latency := 1;
+ Cycles l2_response_latency := 20;
+
+ // To the general response network
+ MessageBuffer * responseFromTCC, network="To", virtual_network="3", vnet_type="response";
+
+ // From the general response network
+ MessageBuffer * responseToTCC, network="From", virtual_network="3", vnet_type="response";
+
+{
+ // EVENTS
+ enumeration(Event, desc="TCC Events") {
+ // Requests coming from the Cores
+ RdBlk, desc="CPU RdBlk event";
+ RdBlkM, desc="CPU RdBlkM event";
+ RdBlkS, desc="CPU RdBlkS event";
+ CtoD, desc="Change to Dirty request";
+ WrVicBlk, desc="L1 Victim (dirty)";
+ WrVicBlkShared, desc="L1 Victim (dirty)";
+ ClVicBlk, desc="L1 Victim (clean)";
+ ClVicBlkShared, desc="L1 Victim (clean)";
+
+ CPUData, desc="WB data from CPU";
+ CPUDataShared, desc="WB data from CPU, NBReqShared 1";
+ StaleWB, desc="Stale WB, No data";
+
+ L2_Repl, desc="L2 Replacement";
+
+ // Probes
+ PrbInvData, desc="Invalidating probe, return dirty data";
+ PrbInv, desc="Invalidating probe, no need to return data";
+ PrbShrData, desc="Downgrading probe, return data";
+
+ // Coming from Memory Controller
+ WBAck, desc="ack from memory";
+
+ CancelWB, desc="Cancel WB from L2";
+ }
+
+ // STATES
+ state_declaration(State, desc="TCC State", default="TCC_State_I") {
+ M, AccessPermission:Read_Write, desc="Modified"; // No other cache has copy, memory stale
+ O, AccessPermission:Read_Only, desc="Owned"; // Correct most recent copy, others may exist in S
+ E, AccessPermission:Read_Write, desc="Exclusive"; // Correct, most recent, and only copy (and == Memory)
+ S, AccessPermission:Read_Only, desc="Shared"; // Correct, most recent. If no one in O, then == Memory
+ I, AccessPermission:Invalid, desc="Invalid";
+
+ I_M, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
+ I_O, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
+ I_E, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
+ I_S, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
+ S_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to M";
+ S_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
+ S_E, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to E";
+ S_S, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to S";
+ E_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
+ E_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
+ E_E, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
+ E_S, AccessPermission:Busy, desc="Shared, received WrVicBlk, sent Ack, waiting for Data";
+ O_M, AccessPermission:Busy, desc="...";
+ O_O, AccessPermission:Busy, desc="...";
+ O_E, AccessPermission:Busy, desc="...";
+ M_M, AccessPermission:Busy, desc="...";
+ M_O, AccessPermission:Busy, desc="...";
+ M_E, AccessPermission:Busy, desc="...";
+ M_S, AccessPermission:Busy, desc="...";
+ D_I, AccessPermission:Invalid, desc="drop WB data on the floor when receive";
+ MOD_I, AccessPermission:Busy, desc="drop WB data on the floor, waiting for WBAck from Mem";
+ MO_I, AccessPermission:Busy, desc="M or O, received L2_Repl, waiting for WBAck from Mem";
+ ES_I, AccessPermission:Busy, desc="E or S, received L2_Repl, waiting for WBAck from Mem";
+ I_C, AccessPermission:Invalid, desc="sent cancel, just waiting to receive mem wb ack so nothing gets confused";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+
+ // STRUCTURES
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff from memory?)";
+ DataBlock DataBlk, desc="Data for the block";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, desc="Is the data dirty?";
+ bool Shared, desc="Victim hit by shared probe";
+ MachineID From, desc="Waiting for writeback from...";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+
+
+ // FUNCTION DEFINITIONS
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L2cache.lookup(addr));
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ return getCacheEntry(addr).DataBlk;
+ }
+
+ bool presentOrAvail(Addr addr) {
+ return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return TCC_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return TCC_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(TCC_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+
+
+ // OUT PORTS
+ out_port(w_requestNetwork_out, CPURequestMsg, w_reqToTCCDir);
+ out_port(w_TCCResp_out, ResponseMsg, w_respToTCCDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromTCC);
+ out_port(w_unblockNetwork_out, UnblockMsg, w_TCCUnblockToTCCDir);
+
+ // IN PORTS
+ in_port(TDResponse_in, ResponseMsg, w_respToTCC) {
+ if (TDResponse_in.isReady(clockEdge())) {
+ peek(TDResponse_in, ResponseMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
+ trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
+ }
+ else {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ error("Error on TDResponse Type");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToTCC) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:CPUData) {
+ if (in_msg.NbReqShared) {
+ trigger(Event:CPUDataShared, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:CPUData, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
+ trigger(Event:StaleWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ error("Error on TDResponse Type");
+ }
+ }
+ }
+ }
+
+ // probe network
+ in_port(probeNetwork_in, TDProbeRequestMsg, w_probeToTCC) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, TDProbeRequestMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Don't think I should get any of these");
+ }
+ }
+ }
+ }
+ }
+
+ // Request Network
+ in_port(requestNetwork_in, CPURequestMsg, w_reqToTCC) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, CPURequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
+ trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (presentOrAvail(in_msg.addr)) {
+ if (in_msg.Shared) {
+ trigger(Event:ClVicBlkShared, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:ClVicBlk, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.addr);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ if (presentOrAvail(in_msg.addr)) {
+ if (in_msg.Shared) {
+ trigger(Event:WrVicBlkShared, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.addr);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+ }
+ }
+ }
+
+ // BEGIN ACTIONS
+
+ action(i_invL2, "i", desc="invalidate TCC cache block") {
+ if (is_valid(cache_entry)) {
+ L2cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(rm_sendResponseM, "rm", desc="send Modified response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.State := CoherenceState:Modified;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(rs_sendResponseS, "rs", desc="send Shared response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.State := CoherenceState:Shared;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+
+ action(r_requestToTD, "r", desc="Miss in L2, pass on") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Shared := false; // unneeded for this request
+ out_msg.MessageSize := in_msg.MessageSize;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ if (is_valid(cache_entry)) {
+ tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
+ tbe.Dirty := cache_entry.Dirty;
+ }
+ tbe.From := machineID;
+ }
+
+ action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(vc_vicClean, "vc", desc="Victimize Clean L2 data") {
+ enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:VicClean;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(vd_vicDirty, "vd", desc="Victimize dirty L2 data") {
+ enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:VicDirty;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(w_sendResponseWBAck, "w", desc="send WB Ack") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBAck;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ph_sendProbeResponseHit, "ph", desc="send probe ack, no data") {
+ enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Hit := true;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pm_sendProbeResponseMiss, "pm", desc="send probe ack, no data") {
+ enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
+ enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ //assert(cache_entry.Dirty); Not needed in TCC where TCC can supply clean data
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
+ enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := tbe.DataBlk;
+ //assert(tbe.Dirty);
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Hit := true;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.State := CoherenceState:NA;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(mc_cancelMemWriteback, "mc", desc="send writeback cancel to memory") {
+ enqueue(w_requestNetwork_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WrCancel;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(a_allocateBlock, "a", desc="allocate TCC block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ }
+ }
+
+ action(d_writeData, "d", desc="write data to TCC") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ cache_entry.DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
+ }
+ }
+
+ action(rd_copyDataFromRequest, "rd", desc="write data to TCC") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := true;
+ }
+ }
+
+ action(f_setFrom, "f", desc="set who WB is expected to come from") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ tbe.From := in_msg.Requestor;
+ }
+ }
+
+ action(rf_resetFrom, "rf", desc="reset From") {
+ tbe.From := machineID;
+ }
+
+ action(wb_data, "wb", desc="write back data") {
+ enqueue(w_TCCResp_out, ResponseMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUData;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Shared) {
+ out_msg.NbReqShared := true;
+ } else {
+ out_msg.NbReqShared := false;
+ }
+ out_msg.State := CoherenceState:Shared; // faux info
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(wt_writeDataToTBE, "wt", desc="write WB data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(uo_sendUnblockOwner, "uo", desc="state changed to E, M, or O, unblock") {
+ enqueue(w_unblockNetwork_out, UnblockMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ out_msg.currentOwner := true;
+ out_msg.valid := true;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(us_sendUnblockSharer, "us", desc="state changed to S , unblock") {
+ enqueue(w_unblockNetwork_out, UnblockMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ out_msg.currentOwner := false;
+ out_msg.valid := true;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(un_sendUnblockNotValid, "un", desc="state changed toI, unblock") {
+ enqueue(w_unblockNetwork_out, UnblockMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ out_msg.currentOwner := false;
+ out_msg.valid := false;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
+ L2cache.setMRU(address);
+ }
+
+ action(p_popRequestQueue, "p", desc="pop request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pn_popTDResponseQueue, "pn", desc="pop TD response queue") {
+ TDResponse_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(zz_recycleRequestQueue, "\z", desc="recycle request queue") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+
+ // END ACTIONS
+
+ // BEGIN TRANSITIONS
+
+ // transitions from base
+
+ transition({I, I_C}, {RdBlk, RdBlkS, RdBlkM, CtoD}){TagArrayRead} {
+ // TCCdir already knows that the block is not here. This is to allocate and get the block.
+ r_requestToTD;
+ p_popRequestQueue;
+ }
+
+// check
+ transition({M, O}, RdBlk, O){TagArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ // detect 2nd chancing
+ p_popRequestQueue;
+ }
+
+//check
+ transition({E, S}, RdBlk, S){TagArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ // detect 2nd chancing
+ p_popRequestQueue;
+ }
+
+// check
+ transition({M, O}, RdBlkS, O){TagArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ // detect 2nd chance sharing
+ p_popRequestQueue;
+ }
+
+//check
+ transition({E, S}, RdBlkS, S){TagArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ // detect 2nd chance sharing
+ p_popRequestQueue;
+ }
+
+// check
+ transition(M, RdBlkM, I){TagArrayRead, TagArrayWrite} {
+ rm_sendResponseM;
+ i_invL2;
+ p_popRequestQueue;
+ }
+
+ //check
+ transition(E, RdBlkM, I){TagArrayRead, TagArrayWrite} {
+ rm_sendResponseM;
+ i_invL2;
+ p_popRequestQueue;
+ }
+
+// check
+ transition({I}, WrVicBlk, I_M){TagArrayRead} {
+ a_allocateBlock;
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(I_C, {WrVicBlk, WrVicBlkShared, ClVicBlk, ClVicBlkShared}) {
+ zz_recycleRequestQueue;
+ }
+
+//check
+ transition({I}, WrVicBlkShared, I_O) {TagArrayRead}{
+ a_allocateBlock;
+ t_allocateTBE;
+ f_setFrom;
+// rd_copyDataFromRequest;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+//check
+ transition(S, WrVicBlkShared, S_O){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(S, WrVicBlk, S_S){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(E, WrVicBlk, E_E){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(E, WrVicBlkShared, E_E){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(O, WrVicBlk, O_O){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(O, WrVicBlkShared, O_O){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(M, WrVicBlk, M_M){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(M, WrVicBlkShared, M_O){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+//check
+ transition({I}, ClVicBlk, I_E){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ a_allocateBlock;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition({I}, ClVicBlkShared, I_S){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ a_allocateBlock;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+//check
+ transition(S, ClVicBlkShared, S_S){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(E, ClVicBlk, E_E){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(E, ClVicBlkShared, E_S){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(O, ClVicBlk, O_O){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// check. Original L3 ahd it going from O to O_S. Something can go from O to S only on writeback.
+ transition(O, ClVicBlkShared, O_O){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(M, ClVicBlk, M_E){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+// a stale writeback
+ transition(M, ClVicBlkShared, M_S){TagArrayRead} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+
+ transition({MO_I}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {
+ a_allocateBlock;
+ t_allocateTBE;
+ f_setFrom;
+ r_requestToTD;
+ p_popRequestQueue;
+ }
+
+ transition(MO_I, {WrVicBlkShared, WrVicBlk, ClVicBlk, ClVicBlkShared}, MOD_I) {
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(I_M, CPUData, M){TagArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M, CPUDataShared, O){TagArrayWrite, DataArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_O, {CPUData, CPUDataShared}, O){TagArrayWrite, DataArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E, CPUData, E){TagArrayWrite, DataArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E, CPUDataShared, S){TagArrayWrite, DataArrayWrite} {
+ us_sendUnblockSharer;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_S, {CPUData, CPUDataShared}, S){TagArrayWrite, DataArrayWrite} {
+ us_sendUnblockSharer;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(S_M, CPUDataShared, O){TagArrayWrite, DataArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(S_O, {CPUData, CPUDataShared}, O){TagArrayWrite, DataArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(S_E, CPUDataShared, S){TagArrayWrite, DataArrayWrite} {
+ us_sendUnblockSharer;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(S_S, {CPUData, CPUDataShared}, S){TagArrayWrite, DataArrayWrite} {
+ us_sendUnblockSharer;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(O_E, CPUDataShared, O){TagArrayWrite, DataArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(O_O, {CPUData, CPUDataShared}, O){TagArrayWrite, DataArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition({D_I}, {CPUData, CPUDataShared}, I){TagArrayWrite} {
+ un_sendUnblockNotValid;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(MOD_I, {CPUData, CPUDataShared}, MO_I) {
+ un_sendUnblockNotValid;
+ rf_resetFrom;
+ pr_popResponseQueue;
+ }
+
+ transition({O,S,I}, CPUData) {
+ pr_popResponseQueue;
+ }
+
+ transition({M, O}, L2_Repl, MO_I){TagArrayRead, DataArrayRead} {
+ t_allocateTBE;
+ vd_vicDirty;
+ i_invL2;
+ }
+
+ transition({E, S,}, L2_Repl, ES_I){TagArrayRead, DataArrayRead} {
+ t_allocateTBE;
+ vc_vicClean;
+ i_invL2;
+ }
+
+ transition({I_M, I_O, S_M, S_O, E_M, E_O}, L2_Repl) {
+ zz_recycleRequestQueue;
+ }
+
+ transition({O_M, O_O, O_E, M_M, M_O, M_E, M_S}, L2_Repl) {
+ zz_recycleRequestQueue;
+ }
+
+ transition({I_E, I_S, S_E, S_S, E_E, E_S}, L2_Repl) {
+ zz_recycleRequestQueue;
+ }
+
+ transition({M, O}, PrbInvData, I){TagArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ i_invL2;
+ pp_popProbeQueue;
+ }
+
+ transition(I, PrbInvData){TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({E, S}, PrbInvData, I){TagArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ i_invL2;
+ pp_popProbeQueue;
+ }
+
+ transition({M, O, E, S, I}, PrbInv, I){TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ i_invL2; // nothing will happen in I
+ pp_popProbeQueue;
+ }
+
+ transition({M, O}, PrbShrData, O){TagArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({E, S}, PrbShrData, S){TagArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition(I, PrbShrData){TagArrayRead} {
+ pm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInvData, I_C) {
+ pdt_sendProbeResponseDataFromTBE;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbInvData, I_C) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({ES_I,MO_I}, PrbInv, I_C) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({ES_I, MO_I}, PrbShrData) {
+ pdt_sendProbeResponseDataFromTBE;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, {PrbInvData, PrbInv}) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbShrData) {
+ pm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(MOD_I, WBAck, D_I) {
+ pn_popTDResponseQueue;
+ }
+
+ transition(MO_I, WBAck, I){TagArrayWrite} {
+ dt_deallocateTBE;
+ pn_popTDResponseQueue;
+ }
+
+ // this can only be a spurious CPUData from a shared block.
+ transition(MO_I, CPUData) {
+ pr_popResponseQueue;
+ }
+
+ transition(ES_I, WBAck, I){TagArrayWrite} {
+ dt_deallocateTBE;
+ pn_popTDResponseQueue;
+ }
+
+ transition(I_C, {WBAck}, I){TagArrayWrite} {
+ dt_deallocateTBE;
+ pn_popTDResponseQueue;
+ }
+
+ transition({I_M, I_O, I_E, I_S}, StaleWB, I){TagArrayWrite} {
+ un_sendUnblockNotValid;
+ dt_deallocateTBE;
+ i_invL2;
+ pr_popResponseQueue;
+ }
+
+ transition({S_S, S_O, S_M, S_E}, StaleWB, S){TagArrayWrite} {
+ us_sendUnblockSharer;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition({E_M, E_O, E_E, E_S}, StaleWB, E){TagArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition({O_M, O_O, O_E}, StaleWB, O){TagArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition({M_M, M_O, M_E, M_S}, StaleWB, M){TagArrayWrite} {
+ uo_sendUnblockOwner;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(D_I, StaleWB, I) {TagArrayWrite}{
+ un_sendUnblockNotValid;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(MOD_I, StaleWB, MO_I) {
+ un_sendUnblockNotValid;
+ rf_resetFrom;
+ pr_popResponseQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Mithuna Thottethodi
+ */
+
+machine(MachineType:TCCdir, "AMD read-for-ownership directory for TCC (aka GPU L2)")
+: CacheMemory * directory;
+ // Convention: wire buffers are prefixed with "w_" for clarity
+ WireBuffer * w_reqToTCCDir;
+ WireBuffer * w_respToTCCDir;
+ WireBuffer * w_TCCUnblockToTCCDir;
+ WireBuffer * w_reqToTCC;
+ WireBuffer * w_probeToTCC;
+ WireBuffer * w_respToTCC;
+ int TCC_select_num_bits;
+ Cycles response_latency := 5;
+ Cycles directory_latency := 6;
+ Cycles issue_latency := 120;
+
+ // From the TCPs or SQCs
+ MessageBuffer * requestFromTCP, network="From", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseFromTCP, network="From", virtual_network="3", vnet_type="response";
+ MessageBuffer * unblockFromTCP, network="From", virtual_network="5", vnet_type="unblock";
+
+ // To the Cores. TCC deals only with TCPs/SQCs. CP cores do not communicate directly with TCC.
+ MessageBuffer * probeToCore, network="To", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseToCore, network="To", virtual_network="3", vnet_type="response";
+
+ // From the NB
+ MessageBuffer * probeFromNB, network="From", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseFromNB, network="From", virtual_network="2", vnet_type="response";
+ // To the NB
+ MessageBuffer * requestToNB, network="To", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseToNB, network="To", virtual_network="2", vnet_type="response";
+ MessageBuffer * unblockToNB, network="To", virtual_network="4", vnet_type="unblock";
+
+ MessageBuffer * triggerQueue, random="false";
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="TCCdir_State_I") {
+ // Base states
+ I, AccessPermission:Invalid, desc="Invalid";
+ S, AccessPermission:Invalid, desc="Shared";
+ E, AccessPermission:Invalid, desc="Shared";
+ O, AccessPermission:Invalid, desc="Owner";
+ M, AccessPermission:Invalid, desc="Modified";
+
+ CP_I, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to invalid";
+ B_I, AccessPermission:Invalid, desc="Blocked, need not send data after acks are in, going to invalid";
+ CP_O, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to owned";
+ CP_S, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to shared";
+ CP_OM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to O_M";
+ CP_SM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to S_M";
+ CP_ISM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to I_M";
+ CP_IOM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to I_M";
+ CP_OSIW, AccessPermission:Invalid, desc="Blocked, must send data after acks+CancelWB are in, going to I_C";
+
+
+ // Transient states and busy states used for handling side (TCC-facing) interactions
+ BW_S, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
+ BW_E, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
+ BW_O, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
+ BW_M, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
+
+ // Transient states and busy states used for handling upward (TCP-facing) interactions
+ I_M, AccessPermission:Invalid, desc="Invalid, issued RdBlkM, have not seen response yet";
+ I_ES, AccessPermission:Invalid, desc="Invalid, issued RdBlk, have not seen response yet";
+ I_S, AccessPermission:Invalid, desc="Invalid, issued RdBlkS, have not seen response yet";
+ BBS_S, AccessPermission:Invalid, desc="Blocked, going from S to S";
+ BBO_O, AccessPermission:Invalid, desc="Blocked, going from O to O";
+ BBM_M, AccessPermission:Invalid, desc="Blocked, going from M to M, waiting for data to forward";
+ BBM_O, AccessPermission:Invalid, desc="Blocked, going from M to O, waiting for data to forward";
+ BB_M, AccessPermission:Invalid, desc="Blocked, going from M to M, waiting for unblock";
+ BB_O, AccessPermission:Invalid, desc="Blocked, going from M to O, waiting for unblock";
+ BB_OO, AccessPermission:Invalid, desc="Blocked, going from O to O (adding sharers), waiting for unblock";
+ BB_S, AccessPermission:Invalid, desc="Blocked, going to S, waiting for (possible multiple) unblock(s)";
+ BBS_M, AccessPermission:Invalid, desc="Blocked, going from S or O to M";
+ BBO_M, AccessPermission:Invalid, desc="Blocked, going from S or O to M";
+ BBS_UM, AccessPermission:Invalid, desc="Blocked, going from S or O to M via upgrade";
+ BBO_UM, AccessPermission:Invalid, desc="Blocked, going from S or O to M via upgrade";
+ S_M, AccessPermission:Invalid, desc="Shared, issued CtoD, have not seen response yet";
+ O_M, AccessPermission:Invalid, desc="Shared, issued CtoD, have not seen response yet";
+
+ //
+ BBB_S, AccessPermission:Invalid, desc="Blocked, going to S after core unblock";
+ BBB_M, AccessPermission:Invalid, desc="Blocked, going to M after core unblock";
+ BBB_E, AccessPermission:Invalid, desc="Blocked, going to E after core unblock";
+
+ VES_I, AccessPermission:Invalid, desc="TCC replacement, waiting for clean WB ack";
+ VM_I, AccessPermission:Invalid, desc="TCC replacement, waiting for dirty WB ack";
+ VO_I, AccessPermission:Invalid, desc="TCC replacement, waiting for dirty WB ack";
+ VO_S, AccessPermission:Invalid, desc="TCC owner replacement, waiting for dirty WB ack";
+
+ ES_I, AccessPermission:Invalid, desc="L1 replacement, waiting for clean WB ack";
+ MO_I, AccessPermission:Invalid, desc="L1 replacement, waiting for dirty WB ack";
+
+ I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from NB for canceled WB";
+ I_W, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from NB; canceled WB raced with directory invalidation";
+
+ // Recall States
+ BRWD_I, AccessPermission:Invalid, desc="Recalling, waiting for WBAck and Probe Data responses";
+ BRW_I, AccessPermission:Read_Write, desc="Recalling, waiting for WBAck";
+ BRD_I, AccessPermission:Invalid, desc="Recalling, waiting for Probe Data responses";
+
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+
+
+ // EVENTS
+ enumeration(Event, desc="TCC Directory Events") {
+ // Upward facing events (TCCdir w.r.t. TCP/SQC and TCC behaves like NBdir behaves with TCP/SQC and L3
+
+ // Directory Recall
+ Recall, desc="directory cache is full";
+ // CPU requests
+ CPUWrite, desc="Initial req from core, sent to TCC";
+ NoCPUWrite, desc="Initial req from core, but non-exclusive clean data; can be discarded";
+ CPUWriteCancel, desc="Initial req from core, sent to TCC";
+
+ // Requests from the TCPs
+ RdBlk, desc="RdBlk event";
+ RdBlkM, desc="RdBlkM event";
+ RdBlkS, desc="RdBlkS event";
+ CtoD, desc="Change to Dirty request";
+
+ // TCC writebacks
+ VicDirty, desc="...";
+ VicDirtyLast, desc="...";
+ VicClean, desc="...";
+ NoVic, desc="...";
+ StaleVic, desc="...";
+ CancelWB, desc="TCC got invalidating probe, canceled WB";
+
+ // Probe Responses from TCP/SQCs
+ CPUPrbResp, desc="Probe response from TCP/SQC";
+ TCCPrbResp, desc="Probe response from TCC";
+
+ ProbeAcksComplete, desc="All acks received";
+ ProbeAcksCompleteReissue, desc="All acks received, changing CtoD to reissue";
+
+ CoreUnblock, desc="unblock from TCP/SQC";
+ LastCoreUnblock, desc="Last unblock from TCP/SQC";
+ TCCUnblock, desc="unblock from TCC (current owner)";
+ TCCUnblock_Sharer, desc="unblock from TCC (a sharer, not owner)";
+ TCCUnblock_NotValid,desc="unblock from TCC (not valid...caused by stale writebacks)";
+
+ // Downward facing events
+
+ // NB initiated
+ NB_AckS, desc="NB Ack to TCC Request";
+ NB_AckE, desc="NB Ack to TCC Request";
+ NB_AckM, desc="NB Ack to TCC Request";
+ NB_AckCtoD, desc="NB Ack to TCC Request";
+ NB_AckWB, desc="NB Ack for clean WB";
+
+
+ // Incoming Probes from NB
+ PrbInvData, desc="Invalidating probe, return dirty data";
+ PrbInv, desc="Invalidating probe, no need to return data";
+ PrbShrData, desc="Downgrading probe, return data";
+ }
+
+
+ // TYPES
+
+ // Entry for directory
+ structure(Entry, desc="...", interface='AbstractCacheEntry') {
+ State CacheState, desc="Cache state (Cache of directory entries)";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ NetDest MergedSharers, desc="Read sharers who are merged on a request";
+ int WaitingUnblocks, desc="Number of acks we're waiting for";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="DataBlk";
+ bool Dirty, desc="Is the data dirty?";
+ MachineID Requestor, desc="requestor";
+ int NumPendingAcks, desc="num acks expected";
+ MachineID OriginalRequestor, desc="Original Requestor";
+ MachineID UntransferredOwner, desc = "Untransferred owner for an upgrade transaction";
+ bool UntransferredOwnerExists, desc = "1 if Untransferred owner exists for an upgrade transaction";
+ bool Cached, desc="data hit in Cache";
+ bool Shared, desc="victim hit by shared probe";
+ bool Upgrade, desc="An upgrade request in progress";
+ bool CtoD, desc="Saved sysack info";
+ CoherenceState CohState, desc="Saved sysack info";
+ MessageSizeType MessageSize, desc="Saved sysack info";
+ MachineID Sender, desc="sender";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ // ** OBJECTS **
+ TBETable TBEs, template="<TCCdir_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ NetDest TCC_dir_subtree;
+ NetDest temp;
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ bool presentOrAvail(Addr addr) {
+ return directory.isTagPresent(addr) || directory.cacheAvail(addr);
+ }
+
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", directory.lookup(addr));
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return tbe.DataBlk;
+ } else {
+ assert(false);
+ return getCacheEntry(addr).DataBlk;
+ }
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(TCCdir_State_to_permission(state));
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return TCCdir_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return TCCdir_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+
+ if (state == State:S) {
+ assert(cache_entry.Owner.count() == 0);
+ }
+
+ if (state == State:O) {
+ assert(cache_entry.Owner.count() == 1);
+ assert(cache_entry.Sharers.isSuperset(cache_entry.Owner) == false);
+ }
+
+ if (state == State:M) {
+ assert(cache_entry.Owner.count() == 1);
+ assert(cache_entry.Sharers.count() == 0);
+ }
+
+ if (state == State:E) {
+ assert(cache_entry.Owner.count() == 0);
+ assert(cache_entry.Sharers.count() == 1);
+ }
+ }
+ }
+
+
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ directory.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ directory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ directory.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ directory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return directory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return directory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return directory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return directory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ // Three classes of ports
+ // Class 1: downward facing network links to NB
+ out_port(requestToNB_out, CPURequestMsg, requestToNB);
+ out_port(responseToNB_out, ResponseMsg, responseToNB);
+ out_port(unblockToNB_out, UnblockMsg, unblockToNB);
+
+
+ // Class 2: upward facing ports to GPU cores
+ out_port(probeToCore_out, TDProbeRequestMsg, probeToCore);
+ out_port(responseToCore_out, ResponseMsg, responseToCore);
+
+ // Class 3: sideward facing ports (on "wirebuffer" links) to TCC
+ out_port(w_requestTCC_out, CPURequestMsg, w_reqToTCC);
+ out_port(w_probeTCC_out, NBProbeRequestMsg, w_probeToTCC);
+ out_port(w_respTCC_out, ResponseMsg, w_respToTCC);
+
+
+ // local trigger port
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ //
+ // request queue going to NB
+ //
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=8) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ assert(is_valid(tbe));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if ((in_msg.Type == TriggerType:AcksComplete) && (tbe.Upgrade == false)) {
+ trigger(Event:ProbeAcksComplete, in_msg.addr, cache_entry, tbe);
+ } else if ((in_msg.Type == TriggerType:AcksComplete) && (tbe.Upgrade == true)) {
+ trigger(Event:ProbeAcksCompleteReissue, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ // Unblock Networks (TCCdir can receive unblocks from TCC, TCPs)
+ // Port on first (of three) wire buffers from TCC
+ in_port(w_TCCUnblock_in, UnblockMsg, w_TCCUnblockToTCCDir, rank=7) {
+ if (w_TCCUnblock_in.isReady(clockEdge())) {
+ peek(w_TCCUnblock_in, UnblockMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.currentOwner) {
+ trigger(Event:TCCUnblock, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.valid) {
+ trigger(Event:TCCUnblock_Sharer, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:TCCUnblock_NotValid, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ in_port(unblockNetwork_in, UnblockMsg, unblockFromTCP, rank=6) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
+ peek(unblockNetwork_in, UnblockMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if(cache_entry.WaitingUnblocks == 1) {
+ trigger(Event:LastCoreUnblock, in_msg.addr, cache_entry, tbe);
+ }
+ else {
+ trigger(Event:CoreUnblock, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+
+ //Responses from TCC, and Cores
+ // Port on second (of three) wire buffers from TCC
+ in_port(w_TCCResponse_in, ResponseMsg, w_respToTCCDir, rank=5) {
+ if (w_TCCResponse_in.isReady(clockEdge())) {
+ peek(w_TCCResponse_in, ResponseMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
+ trigger(Event:TCCPrbResp, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseFromTCP, rank=4) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
+ trigger(Event:CPUPrbResp, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+
+ // Port on third (of three) wire buffers from TCC
+ in_port(w_TCCRequest_in, CPURequestMsg, w_reqToTCCDir, rank=3) {
+ if(w_TCCRequest_in.isReady(clockEdge())) {
+ peek(w_TCCRequest_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceRequestType:WrCancel) {
+ trigger(Event:CancelWB, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ if (is_valid(cache_entry) && cache_entry.Owner.isElement(in_msg.Requestor)) {
+ // if modified, or owner with no other sharers
+ if ((cache_entry.CacheState == State:M) || (cache_entry.Sharers.count() == 0)) {
+ assert(cache_entry.Owner.count()==1);
+ trigger(Event:VicDirtyLast, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:VicDirty, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ trigger(Event:StaleVic, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (is_valid(cache_entry) && cache_entry.Sharers.isElement(in_msg.Requestor)) {
+ if (cache_entry.Sharers.count() == 1) {
+ // Last copy, victimize to L3
+ trigger(Event:VicClean, in_msg.addr, cache_entry, tbe);
+ } else {
+ // Either not the last copy or stall. No need to victimmize
+ // remove sharer from sharer list
+ assert(cache_entry.Sharers.count() > 1);
+ trigger(Event:NoVic, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ trigger(Event:StaleVic, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ in_port(responseFromNB_in, ResponseMsg, responseFromNB, rank=2) {
+ if (responseFromNB_in.isReady(clockEdge())) {
+ peek(responseFromNB_in, ResponseMsg, block_on="addr") {
+
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:NBSysResp) {
+ if (in_msg.State == CoherenceState:Modified) {
+ if (in_msg.CtoD) {
+ trigger(Event:NB_AckCtoD, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:NB_AckM, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.State == CoherenceState:Shared) {
+ trigger(Event:NB_AckS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.State == CoherenceState:Exclusive) {
+ trigger(Event:NB_AckE, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
+ trigger(Event:NB_AckWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ // Finally handling incoming requests (from TCP) and probes (from NB).
+
+ in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB, rank=1) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ DPRINTF(RubySlicc, "machineID: %s\n", machineID);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ assert(in_msg.ReturnData);
+ trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+
+ in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
+ if (coreRequestNetwork_in.isReady(clockEdge())) {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (presentOrAvail(in_msg.addr)) {
+ if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (is_valid(cache_entry) && cache_entry.Owner.isElement(in_msg.Requestor)) {
+ trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
+ } else if(is_valid(cache_entry) && (cache_entry.Sharers.count() + cache_entry.Owner.count() ) >1) {
+ trigger(Event:NoCPUWrite, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
+ trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WrCancel) {
+ trigger(Event:CPUWriteCancel, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ // All requests require a directory entry
+ Addr victim := directory.cacheProbe(in_msg.addr);
+ trigger(Event:Recall, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ }
+ }
+ }
+
+
+
+
+ // Actions
+
+ //Downward facing actions
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ cache_entry.Owner.clear();
+ }
+
+ action(rS_removeRequesterFromSharers, "rS", desc="Remove unblocker from sharer list") {
+ peek(unblockNetwork_in, UnblockMsg) {
+ cache_entry.Sharers.remove(in_msg.Sender);
+ }
+ }
+
+ action(rT_removeTCCFromSharers, "rT", desc="Remove TCC from sharer list") {
+ peek(w_TCCRequest_in, CPURequestMsg) {
+ cache_entry.Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(rO_removeOriginalRequestorFromSharers, "rO", desc="Remove replacing core from sharer list") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ cache_entry.Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(rC_removeCoreFromSharers, "rC", desc="Remove replacing core from sharer list") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ cache_entry.Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(rCo_removeCoreFromOwner, "rCo", desc="Remove replacing core from sharer list") {
+ // Note that under some cases this action will try to remove a stale owner
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ cache_entry.Owner.remove(in_msg.Requestor);
+ }
+ }
+
+ action(rR_removeResponderFromSharers, "rR", desc="Remove responder from sharer list") {
+ peek(responseNetwork_in, ResponseMsg) {
+ cache_entry.Sharers.remove(in_msg.Sender);
+ }
+ }
+
+ action(nC_sendNullWBAckToCore, "nC", desc = "send a null WB Ack to release core") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(responseToCore_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBNack;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(nT_sendNullWBAckToTCC, "nT", desc = "send a null WB Ack to release TCC") {
+ peek(w_TCCRequest_in, CPURequestMsg) {
+ enqueue(w_respTCC_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBAck;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(eto_moveExSharerToOwner, "eto", desc="move the current exclusive sharer to owner") {
+ assert(cache_entry.Sharers.count() == 1);
+ assert(cache_entry.Owner.count() == 0);
+ cache_entry.Owner := cache_entry.Sharers;
+ cache_entry.Sharers.clear();
+ APPEND_TRANSITION_COMMENT(" new owner ");
+ APPEND_TRANSITION_COMMENT(cache_entry.Owner);
+ }
+
+ action(aT_addTCCToSharers, "aT", desc="Add TCC to sharer list") {
+ peek(w_TCCUnblock_in, UnblockMsg) {
+ cache_entry.Sharers.add(in_msg.Sender);
+ }
+ }
+
+ action(as_addToSharers, "as", desc="Add unblocker to sharer list") {
+ peek(unblockNetwork_in, UnblockMsg) {
+ cache_entry.Sharers.add(in_msg.Sender);
+ }
+ }
+
+ action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
+ cache_entry.Sharers.addNetDest(cache_entry.Owner);
+ cache_entry.Owner.clear();
+ }
+
+ action(cc_clearSharers, "\c", desc="Clear the sharers field") {
+ cache_entry.Sharers.clear();
+ }
+
+ action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
+ peek(unblockNetwork_in, UnblockMsg) {
+ cache_entry.Owner.clear();
+ cache_entry.Owner.add(in_msg.Sender);
+ APPEND_TRANSITION_COMMENT(" tcp_ub owner ");
+ APPEND_TRANSITION_COMMENT(cache_entry.Owner);
+ }
+ }
+
+ action(eT_ownerIsUnblocker, "eT", desc="TCC (unblocker) is now owner") {
+ peek(w_TCCUnblock_in, UnblockMsg) {
+ cache_entry.Owner.clear();
+ cache_entry.Owner.add(in_msg.Sender);
+ APPEND_TRANSITION_COMMENT(" tcc_ub owner ");
+ APPEND_TRANSITION_COMMENT(cache_entry.Owner);
+ }
+ }
+
+ action(ctr_copyTCCResponseToTBE, "ctr", desc="Copy TCC probe response data to TBE") {
+ peek(w_TCCResponse_in, ResponseMsg) {
+ // Overwrite data if tbe does not hold dirty data. Stop once it is dirty.
+ if(tbe.Dirty == false) {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ tbe.Sender := in_msg.Sender;
+ }
+ DPRINTF(RubySlicc, "%s\n", (tbe.DataBlk));
+ }
+ }
+
+ action(ccr_copyCoreResponseToTBE, "ccr", desc="Copy core probe response data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ // Overwrite data if tbe does not hold dirty data. Stop once it is dirty.
+ if(tbe.Dirty == false) {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+
+ if(tbe.Sender == machineID) {
+ tbe.Sender := in_msg.Sender;
+ }
+ }
+ DPRINTF(RubySlicc, "%s\n", (tbe.DataBlk));
+ }
+ }
+
+ action(cd_clearDirtyBitTBE, "cd", desc="Clear Dirty bit in TBE") {
+ tbe.Dirty := false;
+ }
+
+ action(n_issueRdBlk, "n-", desc="Issue RdBlk") {
+ enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlk;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
+ enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
+ enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkM;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(rU_rememberUpgrade, "rU", desc="Remember that this was an upgrade") {
+ tbe.Upgrade := true;
+ }
+
+ action(ruo_rememberUntransferredOwner, "ruo", desc="Remember the untransferred owner") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if(in_msg.UntransferredOwner == true) {
+ tbe.UntransferredOwner := in_msg.Sender;
+ tbe.UntransferredOwnerExists := true;
+ }
+ DPRINTF(RubySlicc, "%s\n", (in_msg));
+ }
+ }
+
+ action(ruoT_rememberUntransferredOwnerTCC, "ruoT", desc="Remember the untransferred owner") {
+ peek(w_TCCResponse_in, ResponseMsg) {
+ if(in_msg.UntransferredOwner == true) {
+ tbe.UntransferredOwner := in_msg.Sender;
+ tbe.UntransferredOwnerExists := true;
+ }
+ DPRINTF(RubySlicc, "%s\n", (in_msg));
+ }
+ }
+
+ action(vd_victim, "vd", desc="Victimize M/O Data") {
+ enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicDirty;
+ if (cache_entry.CacheState == State:O) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ out_msg.Dirty := true;
+ }
+ }
+
+ action(vc_victim, "vc", desc="Victimize E/S Data") {
+ enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicClean;
+ if (cache_entry.CacheState == State:S) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ out_msg.Dirty := false;
+ }
+ }
+
+
+ action(sT_sendRequestToTCC, "sT", desc="send request to TCC") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(w_requestTCC_out, CPURequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ APPEND_TRANSITION_COMMENT(" requestor ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+
+ }
+ }
+
+
+ action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+
+ temp := cache_entry.Sharers;
+ temp.addNetDest(cache_entry.Owner);
+ if (temp.isElement(tcc)) {
+ temp.remove(tcc);
+ }
+ if (temp.count() > 0) {
+ enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination := temp;
+ tbe.NumPendingAcks := temp.count();
+ if(cache_entry.CacheState == State:M) {
+ assert(tbe.NumPendingAcks == 1);
+ }
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ }
+ }
+ }
+
+ action(ls2_probeShrL2Data, "ls2", desc="local probe downgrade L2, return data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
+ enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(tcc);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+
+ }
+ }
+ }
+
+ action(s2_probeShrL2Data, "s2", desc="probe shared L2, return data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
+ enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(tcc);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+
+ }
+ }
+ }
+
+ action(ldc_probeInvCoreData, "ldc", desc="local probe to inv cores, return data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ NetDest dest:= cache_entry.Sharers;
+ dest.addNetDest(cache_entry.Owner);
+ if(dest.isElement(tcc)){
+ dest.remove(tcc);
+ }
+ dest.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := dest.count();
+ if (dest.count()>0){
+ enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+
+ out_msg.Destination.addNetDest(dest);
+ if(cache_entry.CacheState == State:M) {
+ assert(tbe.NumPendingAcks == 1);
+ }
+
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ }
+ }
+ }
+ }
+
+ action(ld2_probeInvL2Data, "ld2", desc="local probe inv L2, return data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
+ enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(tcc);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+
+ }
+ }
+ }
+
+ action(dc_probeInvCoreData, "dc", desc="probe inv cores + TCC, return data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+
+ out_msg.Destination.addNetDest(cache_entry.Sharers);
+ out_msg.Destination.addNetDest(cache_entry.Owner);
+ tbe.NumPendingAcks := cache_entry.Sharers.count() + cache_entry.Owner.count();
+ if(cache_entry.CacheState == State:M) {
+ assert(tbe.NumPendingAcks == 1);
+ }
+ if (out_msg.Destination.isElement(tcc)) {
+ out_msg.Destination.remove(tcc);
+ tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
+ }
+
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ }
+ }
+
+ action(d2_probeInvL2Data, "d2", desc="probe inv L2, return data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
+ enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(tcc);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+
+ }
+ }
+ }
+
+ action(lpc_probeInvCore, "lpc", desc="local probe inv cores, no data") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ TCC_dir_subtree.broadcast(MachineType:TCP);
+ TCC_dir_subtree.broadcast(MachineType:SQC);
+
+ temp := cache_entry.Sharers;
+ temp := temp.OR(cache_entry.Owner);
+ TCC_dir_subtree := TCC_dir_subtree.AND(temp);
+ tbe.NumPendingAcks := TCC_dir_subtree.count();
+ if(cache_entry.CacheState == State:M) {
+ assert(tbe.NumPendingAcks == 1);
+ }
+ if(TCC_dir_subtree.isElement(in_msg.Requestor)) {
+ TCC_dir_subtree.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
+ }
+
+ if(TCC_dir_subtree.count() > 0) {
+ enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := false;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.localCtoD := true;
+
+ out_msg.Destination.addNetDest(TCC_dir_subtree);
+
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ }
+ }
+ }
+ }
+
+ action(ipc_probeInvCore, "ipc", desc="probe inv cores, no data") {
+ TCC_dir_subtree.broadcast(MachineType:TCP);
+ TCC_dir_subtree.broadcast(MachineType:SQC);
+
+ temp := cache_entry.Sharers;
+ temp := temp.OR(cache_entry.Owner);
+ TCC_dir_subtree := TCC_dir_subtree.AND(temp);
+ tbe.NumPendingAcks := TCC_dir_subtree.count();
+ if(TCC_dir_subtree.count() > 0) {
+
+ enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := false;
+ out_msg.MessageSize := MessageSizeType:Control;
+
+ out_msg.Destination.addNetDest(TCC_dir_subtree);
+ if(cache_entry.CacheState == State:M) {
+ assert(tbe.NumPendingAcks == 1);
+ }
+
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ }
+ }
+ }
+
+ action(i2_probeInvL2, "i2", desc="probe inv L2, no data") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
+ enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := false;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(tcc);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(responseToNB_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
+ enqueue(responseToNB_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Ntsl := true;
+ out_msg.Hit := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
+ enqueue(responseToNB_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false; // only true if sending back data i think
+ out_msg.Hit := false;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+
+
+ action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
+ enqueue(responseToNB_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry) || is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := getDataBlock(address);
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ }
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+
+ action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
+ enqueue(responseToNB_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry) || is_valid(tbe));
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := getDataBlock(address);
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ }
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(mc_cancelWB, "mc", desc="send writeback cancel to NB directory") {
+ enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WrCancel;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(sCS_sendCollectiveResponseS, "sCS", desc="send shared response to all merged TCP/SQC") {
+ enqueue(responseToCore_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := tbe.Sender;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.CtoD := false;
+ out_msg.State := CoherenceState:Shared;
+ out_msg.Destination.addNetDest(cache_entry.MergedSharers);
+ out_msg.Shared := tbe.Shared;
+ out_msg.Dirty := tbe.Dirty;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(sS_sendResponseS, "sS", desc="send shared response to TCP/SQC") {
+ enqueue(responseToCore_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := tbe.Sender;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.CtoD := false;
+ out_msg.State := CoherenceState:Shared;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.Shared := tbe.Shared;
+ out_msg.Dirty := tbe.Dirty;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(sM_sendResponseM, "sM", desc="send response to TCP/SQC") {
+ enqueue(responseToCore_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := tbe.Sender;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.CtoD := false;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.Shared := tbe.Shared;
+ out_msg.Dirty := tbe.Dirty;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+
+
+ action(fw2_forwardWBAck, "fw2", desc="forward WBAck to TCC") {
+ peek(responseFromNB_in, ResponseMsg) {
+ if(tbe.OriginalRequestor != machineID) {
+ enqueue(w_respTCC_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBAck;
+ out_msg.Sender := machineID;
+ //out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+ }
+
+ action(sa_saveSysAck, "sa", desc="Save SysAck ") {
+ peek(responseFromNB_in, ResponseMsg) {
+ tbe.Dirty := in_msg.Dirty;
+ if (tbe.Dirty == false) {
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ else {
+ tbe.DataBlk := tbe.DataBlk;
+ }
+ tbe.CtoD := in_msg.CtoD;
+ tbe.CohState := in_msg.State;
+ tbe.Shared := in_msg.Shared;
+ tbe.MessageSize := in_msg.MessageSize;
+ }
+ }
+
+ action(fsa_forwardSavedAck, "fsa", desc="forward saved SysAck to TCP or SQC") {
+ enqueue(responseToCore_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ if (tbe.Dirty == false) {
+ out_msg.DataBlk := tbe.DataBlk;
+ }
+ else {
+ out_msg.DataBlk := tbe.DataBlk;
+ }
+ out_msg.CtoD := tbe.CtoD;
+ out_msg.State := tbe.CohState;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.Shared := tbe.Shared;
+ out_msg.MessageSize := tbe.MessageSize;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Sender := tbe.Sender;
+ }
+ }
+
+ action(fa_forwardSysAck, "fa", desc="forward SysAck to TCP or SQC") {
+ peek(responseFromNB_in, ResponseMsg) {
+ enqueue(responseToCore_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ if (tbe.Dirty == false) {
+ out_msg.DataBlk := in_msg.DataBlk;
+ tbe.Sender := machineID;
+ }
+ else {
+ out_msg.DataBlk := tbe.DataBlk;
+ }
+ out_msg.CtoD := in_msg.CtoD;
+ out_msg.State := in_msg.State;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.Sender := tbe.Sender;
+ DPRINTF(RubySlicc, "%s\n", (out_msg.DataBlk));
+ }
+ }
+ }
+
+ action(pso_probeSharedDataOwner, "pso", desc="probe shared data at owner") {
+ MachineID tcc := mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ if (cache_entry.Owner.isElement(tcc)) {
+ enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(tcc);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ else { // i.e., owner is a core
+ enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.addNetDest(cache_entry.Owner);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ tbe.NumPendingAcks := 1;
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ coreRequestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pk_popResponseQueue, "pk", desc="Pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="Pop incoming probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pR_popResponseFromNBQueue, "pR", desc="Pop incoming Response queue From NB") {
+ responseFromNB_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(pl_popTCCRequestQueue, "pl", desc="pop TCC request queue") {
+ w_TCCRequest_in.dequeue(clockEdge());
+ }
+
+ action(plr_popTCCResponseQueue, "plr", desc="pop TCC response queue") {
+ w_TCCResponse_in.dequeue(clockEdge());
+ }
+
+ action(plu_popTCCUnblockQueue, "plu", desc="pop TCC unblock queue") {
+ w_TCCUnblock_in.dequeue(clockEdge());
+ }
+
+
+ action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
+ peek(unblockNetwork_in, UnblockMsg) {
+ cache_entry.Sharers.add(in_msg.Sender);
+ cache_entry.MergedSharers.remove(in_msg.Sender);
+ assert(cache_entry.WaitingUnblocks >= 0);
+ cache_entry.WaitingUnblocks := cache_entry.WaitingUnblocks - 1;
+ }
+ }
+
+ action(q_addOutstandingMergedSharer, "q", desc="Increment outstanding requests") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ cache_entry.MergedSharers.add(in_msg.Requestor);
+ cache_entry.WaitingUnblocks := cache_entry.WaitingUnblocks + 1;
+ }
+ }
+
+ action(uu_sendUnblock, "uu", desc="state changed, unblock") {
+ enqueue(unblockToNB_out, UnblockMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ coreRequestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(yy_recycleTCCRequestQueue, "yy", desc="recycle yy request queue") {
+ w_TCCRequest_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(xz_recycleResponseQueue, "xz", desc="recycle response queue") {
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(xx_recycleTCCResponseQueue, "xx", desc="recycle TCC response queue") {
+ w_TCCResponse_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(vv_recycleTCCUnblockQueue, "vv", desc="Recycle the probe request queue") {
+ w_TCCUnblock_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(xy_recycleUnblockQueue, "xy", desc="Recycle the probe request queue") {
+ w_TCCUnblock_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(ww_recycleProbeRequest, "ww", desc="Recycle the probe request queue") {
+ probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(x_decrementAcks, "x", desc="decrement Acks pending") {
+ tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
+ }
+
+ action(o_checkForAckCompletion, "o", desc="check for ack completion") {
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ APPEND_TRANSITION_COMMENT(" tbe acks ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(tp_allocateTBE, "tp", desc="allocate TBE Entry for upward transactions") {
+ check_allocate(TBEs);
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.Dirty := false;
+ tbe.NumPendingAcks := 0;
+ tbe.UntransferredOwnerExists := false;
+ }
+ }
+
+ action(tv_allocateTBE, "tv", desc="allocate TBE Entry for TCC transactions") {
+ check_allocate(TBEs);
+ peek(w_TCCRequest_in, CPURequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.DataBlk := in_msg.DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ tbe.OriginalRequestor := in_msg.Requestor;
+ tbe.NumPendingAcks := 0;
+ tbe.UntransferredOwnerExists := false;
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);//check whether resources are full
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ tbe.Upgrade := false;
+ tbe.OriginalRequestor := in_msg.Requestor;
+ tbe.NumPendingAcks := 0;
+ tbe.UntransferredOwnerExists := false;
+ tbe.Sender := machineID;
+ }
+ }
+
+ action(tr_allocateTBE, "tr", desc="allocate TBE Entry for recall") {
+ check_allocate(TBEs);//check whether resources are full
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ tbe.Upgrade := false;
+ tbe.OriginalRequestor := machineID; //Recall request, Self initiated
+ tbe.NumPendingAcks := 0;
+ tbe.UntransferredOwnerExists := false;
+ }
+
+ action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+
+ action(d_allocateDir, "d", desc="allocate Directory Cache") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(directory.allocate(address, new Entry));
+ }
+ }
+
+ action(dd_deallocateDir, "dd", desc="deallocate Directory Cache") {
+ if (is_valid(cache_entry)) {
+ directory.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
+ enqueue(responseToNB_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:StaleNotif;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(wb_data, "wb", desc="write back data") {
+ enqueue(responseToNB_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUData;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Shared) {
+ out_msg.NbReqShared := true;
+ } else {
+ out_msg.NbReqShared := false;
+ }
+ out_msg.State := CoherenceState:Shared; // faux info
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
+ assert(is_valid(tbe));
+ tbe.Shared := true;
+ }
+
+ action(y_writeDataToTBE, "y", desc="write Probe Data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (!tbe.Dirty || in_msg.Dirty) {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ }
+ if (in_msg.Hit) {
+ tbe.Cached := true;
+ }
+ }
+ }
+
+ action(ty_writeTCCDataToTBE, "ty", desc="write TCC Probe Data to TBE") {
+ peek(w_TCCResponse_in, ResponseMsg) {
+ if (!tbe.Dirty || in_msg.Dirty) {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ }
+ if (in_msg.Hit) {
+ tbe.Cached := true;
+ }
+ }
+ }
+
+
+ action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
+ directory.setMRU(address);
+ }
+
+ // TRANSITIONS
+
+ // Handling TCP/SQC requests (similar to how NB dir handles TCC events with some changes to account for stateful directory).
+
+
+ // transitions from base
+ transition(I, RdBlk, I_ES){TagArrayRead} {
+ d_allocateDir;
+ t_allocateTBE;
+ n_issueRdBlk;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, RdBlkS, I_S){TagArrayRead} {
+ d_allocateDir;
+ t_allocateTBE;
+ nS_issueRdBlkS;
+ i_popIncomingRequestQueue;
+ }
+
+
+ transition(I_S, NB_AckS, BBB_S) {
+ fa_forwardSysAck;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(I_ES, NB_AckS, BBB_S) {
+ fa_forwardSysAck;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(I_ES, NB_AckE, BBB_E) {
+ fa_forwardSysAck;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition({S_M, O_M}, {NB_AckCtoD,NB_AckM}, BBB_M) {
+ fa_forwardSysAck;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(I_M, NB_AckM, BBB_M) {
+ fa_forwardSysAck;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(BBB_M, CoreUnblock, M){TagArrayWrite} {
+ c_clearOwner;
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(BBB_S, CoreUnblock, S){TagArrayWrite} {
+ as_addToSharers;
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(BBB_E, CoreUnblock, E){TagArrayWrite} {
+ as_addToSharers;
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+
+ transition(I, RdBlkM, I_M){TagArrayRead} {
+ d_allocateDir;
+ t_allocateTBE;
+ nM_issueRdBlkM;
+ i_popIncomingRequestQueue;
+ }
+
+ //
+ transition(S, {RdBlk, RdBlkS}, BBS_S){TagArrayRead} {
+ t_allocateTBE;
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ q_addOutstandingMergedSharer;
+ i_popIncomingRequestQueue;
+ }
+ // Merging of read sharing into a single request
+ transition(BBS_S, {RdBlk, RdBlkS}) {
+ q_addOutstandingMergedSharer;
+ i_popIncomingRequestQueue;
+ }
+ // Wait for probe acks to be complete
+ transition(BBS_S, CPUPrbResp) {
+ ccr_copyCoreResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition(BBS_S, TCCPrbResp) {
+ ctr_copyTCCResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ // Window for merging complete with this transition
+ // Send responses to all outstanding
+ transition(BBS_S, ProbeAcksComplete, BB_S) {
+ sCS_sendCollectiveResponseS;
+ pt_popTriggerQueue;
+ }
+
+ transition(BB_S, CoreUnblock, BB_S) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(BB_S, LastCoreUnblock, S) {
+ m_addUnlockerToSharers;
+ dt_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(O, {RdBlk, RdBlkS}, BBO_O){TagArrayRead} {
+ t_allocateTBE;
+ pso_probeSharedDataOwner;
+ q_addOutstandingMergedSharer;
+ i_popIncomingRequestQueue;
+ }
+ // Merging of read sharing into a single request
+ transition(BBO_O, {RdBlk, RdBlkS}) {
+ q_addOutstandingMergedSharer;
+ i_popIncomingRequestQueue;
+ }
+
+ // Wait for probe acks to be complete
+ transition(BBO_O, CPUPrbResp) {
+ ccr_copyCoreResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition(BBO_O, TCCPrbResp) {
+ ctr_copyTCCResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ // Window for merging complete with this transition
+ // Send responses to all outstanding
+ transition(BBO_O, ProbeAcksComplete, BB_OO) {
+ sCS_sendCollectiveResponseS;
+ pt_popTriggerQueue;
+ }
+
+ transition(BB_OO, CoreUnblock) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(BB_OO, LastCoreUnblock, O){TagArrayWrite} {
+ m_addUnlockerToSharers;
+ dt_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(S, CPUWrite, BW_S){TagArrayRead} {
+ t_allocateTBE;
+ rC_removeCoreFromSharers;
+ sT_sendRequestToTCC;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(E, CPUWrite, BW_E){TagArrayRead} {
+ t_allocateTBE;
+ rC_removeCoreFromSharers;
+ sT_sendRequestToTCC;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, CPUWrite, BW_O){TagArrayRead} {
+ t_allocateTBE;
+ rCo_removeCoreFromOwner;
+ rC_removeCoreFromSharers;
+ sT_sendRequestToTCC;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, CPUWrite, BW_M){TagArrayRead} {
+ t_allocateTBE;
+ rCo_removeCoreFromOwner;
+ rC_removeCoreFromSharers;
+ sT_sendRequestToTCC;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(BW_S, TCCUnblock_Sharer, S){TagArrayWrite} {
+ aT_addTCCToSharers;
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition(BW_S, TCCUnblock_NotValid, S){TagArrayWrite} {
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition(BW_E, TCCUnblock, E){TagArrayWrite} {
+ cc_clearSharers;
+ aT_addTCCToSharers;
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition(BW_E, TCCUnblock_NotValid, E) {
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition(BW_M, TCCUnblock, M) {
+ c_clearOwner;
+ cc_clearSharers;
+ eT_ownerIsUnblocker;
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition(BW_M, TCCUnblock_NotValid, M) {
+ // Note this transition should only be executed if we received a stale wb
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition(BW_O, TCCUnblock, O) {
+ c_clearOwner;
+ eT_ownerIsUnblocker;
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition(BW_O, TCCUnblock_NotValid, O) {
+ // Note this transition should only be executed if we received a stale wb
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ // We lost the owner likely do to an invalidation racing with a 'O' wb
+ transition(BW_O, TCCUnblock_Sharer, S) {
+ c_clearOwner;
+ aT_addTCCToSharers;
+ dt_deallocateTBE;
+ plu_popTCCUnblockQueue;
+ }
+
+ transition({BW_M, BW_S, BW_E, BW_O}, {PrbInv,PrbInvData,PrbShrData}) {
+ ww_recycleProbeRequest;
+ }
+
+ transition(BRWD_I, {PrbInvData, PrbInv, PrbShrData}) {
+ ww_recycleProbeRequest;
+ }
+
+ // Three step process: locally invalidate others, issue CtoD, wait for NB_AckCtoD
+ transition(S, CtoD, BBS_UM) {TagArrayRead} {
+ t_allocateTBE;
+ lpc_probeInvCore;
+ i2_probeInvL2;
+ o_checkForAckCompletion;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(BBS_UM, CPUPrbResp, BBS_UM) {
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition(BBS_UM, TCCPrbResp) {
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ transition(BBS_UM, ProbeAcksComplete, S_M) {
+ rU_rememberUpgrade;
+ nM_issueRdBlkM;
+ pt_popTriggerQueue;
+ }
+
+ // Three step process: locally invalidate others, issue CtoD, wait for NB_AckCtoD
+ transition(O, CtoD, BBO_UM){TagArrayRead} {
+ t_allocateTBE;
+ lpc_probeInvCore;
+ i2_probeInvL2;
+ o_checkForAckCompletion;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(BBO_UM, CPUPrbResp, BBO_UM) {
+ ruo_rememberUntransferredOwner;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition(BBO_UM, TCCPrbResp) {
+ ruoT_rememberUntransferredOwnerTCC;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ transition(BBO_UM, ProbeAcksComplete, O_M) {
+ rU_rememberUpgrade;
+ nM_issueRdBlkM;
+ pt_popTriggerQueue;
+ }
+
+ transition({S,E}, RdBlkM, BBS_M){TagArrayWrite} {
+ t_allocateTBE;
+ ldc_probeInvCoreData;
+ ld2_probeInvL2Data;
+ o_checkForAckCompletion;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(BBS_M, CPUPrbResp) {
+ ccr_copyCoreResponseToTBE;
+ rR_removeResponderFromSharers;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition(BBS_M, TCCPrbResp) {
+ ctr_copyTCCResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ transition(BBS_M, ProbeAcksComplete, S_M) {
+ nM_issueRdBlkM;
+ pt_popTriggerQueue;
+ }
+
+ transition(O, RdBlkM, BBO_M){TagArrayRead} {
+ t_allocateTBE;
+ ldc_probeInvCoreData;
+ ld2_probeInvL2Data;
+ o_checkForAckCompletion;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(BBO_M, CPUPrbResp) {
+ ccr_copyCoreResponseToTBE;
+ rR_removeResponderFromSharers;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition(BBO_M, TCCPrbResp) {
+ ctr_copyTCCResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ transition(BBO_M, ProbeAcksComplete, O_M) {
+ nM_issueRdBlkM;
+ pt_popTriggerQueue;
+ }
+
+ //
+ transition(M, RdBlkM, BBM_M){TagArrayRead} {
+ t_allocateTBE;
+ ldc_probeInvCoreData;
+ ld2_probeInvL2Data;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(BBM_M, CPUPrbResp) {
+ ccr_copyCoreResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ // TCP recalled block before receiving probe
+ transition({BBM_M, BBS_M, BBO_M}, {CPUWrite,NoCPUWrite}) {
+ zz_recycleRequest;
+ }
+
+ transition(BBM_M, TCCPrbResp) {
+ ctr_copyTCCResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ transition(BBM_M, ProbeAcksComplete, BB_M) {
+ sM_sendResponseM;
+ pt_popTriggerQueue;
+ }
+
+ transition(BB_M, CoreUnblock, M){TagArrayWrite} {
+ e_ownerIsUnblocker;
+ dt_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(M, {RdBlkS, RdBlk}, BBM_O){TagArrayRead} {
+ t_allocateTBE;
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(E, {RdBlkS, RdBlk}, BBM_O){TagArrayRead} {
+ t_allocateTBE;
+ eto_moveExSharerToOwner;
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(BBM_O, CPUPrbResp) {
+ ccr_copyCoreResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ transition(BBM_O, TCCPrbResp) {
+ ctr_copyTCCResponseToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ transition(BBM_O, ProbeAcksComplete, BB_O) {
+ sS_sendResponseS;
+ pt_popTriggerQueue;
+ }
+
+ transition(BB_O, CoreUnblock, O){TagArrayWrite} {
+ as_addToSharers;
+ dt_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({BBO_O, BBM_M, BBS_S, BBM_O, BB_M, BB_O, BB_S, BBO_UM, BBS_UM, BBS_M, BBO_M, BB_OO}, {PrbInvData, PrbInv,PrbShrData}) {
+ ww_recycleProbeRequest;
+ }
+
+ transition({BBM_O, BBS_S, CP_S, CP_O, CP_SM, CP_OM, BBO_O}, {CPUWrite,NoCPUWrite}) {
+ zz_recycleRequest;
+ }
+
+ // stale CtoD raced with external invalidation
+ transition({I, CP_I, B_I, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, CtoD) {
+ i_popIncomingRequestQueue;
+ }
+
+ // stale CtoD raced with internal RdBlkM
+ transition({BBM_M, BBS_M, BBO_M, BBB_M, BBS_UM, BBO_UM}, CtoD) {
+ i_popIncomingRequestQueue;
+ }
+
+ transition({E, M}, CtoD) {
+ i_popIncomingRequestQueue;
+ }
+
+
+ // TCC-directory has sent out (And potentially received acks for) probes.
+ // TCP/SQC replacement (known to be stale subsequent) are popped off.
+ transition({BBO_UM, BBS_UM}, {CPUWrite,NoCPUWrite}) {
+ nC_sendNullWBAckToCore;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S_M, {NoCPUWrite, CPUWrite}) {
+ zz_recycleRequest;
+ }
+
+ transition(O_M, {NoCPUWrite, CPUWrite}) {
+ zz_recycleRequest;
+ }
+
+
+ transition({BBM_M, BBS_M, BBO_M, BBO_UM, BBS_UM}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+ transition({CP_S, CP_O, CP_OM, CP_SM}, {VicDirty, VicClean, VicDirtyLast, CancelWB, NoVic}) {
+ yy_recycleTCCRequestQueue;
+ }
+
+ // However, when TCCdir has sent out PrbSharedData, one cannot ignore.
+ transition({BBS_S, BBO_O, BBM_O, S_M, O_M, BBB_M, BBB_S, BBB_E}, {VicDirty, VicClean, VicDirtyLast,CancelWB}) {
+ yy_recycleTCCRequestQueue;
+ }
+
+ transition({BW_S,BW_E,BW_O, BW_M}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
+ yy_recycleTCCRequestQueue;
+ }
+
+ transition({BW_S,BW_E,BW_O, BW_M}, CancelWB) {
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+
+ /// recycle if waiting for unblocks.
+ transition({BB_M,BB_O,BB_S,BB_OO}, {VicDirty, VicClean, VicDirtyLast,NoVic,CancelWB}) {
+ yy_recycleTCCRequestQueue;
+ }
+
+ transition({BBS_S, BBO_O}, NoVic) {
+ rT_removeTCCFromSharers;
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+ // stale. Pop message and send dummy ack.
+ transition({I_S, I_ES, I_M}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+ transition(M, VicDirtyLast, VM_I){TagArrayRead} {
+ tv_allocateTBE;
+ vd_victim;
+ pl_popTCCRequestQueue;
+ }
+
+ transition(E, VicDirty, VM_I){TagArrayRead} {
+ tv_allocateTBE;
+ vd_victim;
+ pl_popTCCRequestQueue;
+ }
+
+ transition(O, VicDirty, VO_S){TagArrayRead} {
+ tv_allocateTBE;
+ vd_victim;
+ pl_popTCCRequestQueue;
+ }
+
+ transition(O, {VicDirtyLast, VicClean}, VO_I){TagArrayRead} {
+ tv_allocateTBE;
+ vd_victim;
+ pl_popTCCRequestQueue;
+ }
+
+ transition({E, S}, VicClean, VES_I){TagArrayRead} {
+ tv_allocateTBE;
+ vc_victim;
+ pl_popTCCRequestQueue;
+ }
+
+ transition({O, S}, NoVic){TagArrayRead} {
+ rT_removeTCCFromSharers;
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+ transition({O,S}, NoCPUWrite){TagArrayRead} {
+ rC_removeCoreFromSharers;
+ nC_sendNullWBAckToCore;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({M,E}, NoCPUWrite){TagArrayRead} {
+ rC_removeCoreFromSharers;
+ nC_sendNullWBAckToCore;
+ i_popIncomingRequestQueue;
+ }
+
+ // This can only happen if it is race. (TCCdir sent out probes which caused this cancel in the first place.)
+ transition({VM_I, VES_I, VO_I}, CancelWB) {
+ pl_popTCCRequestQueue;
+ }
+
+ transition({VM_I, VES_I, VO_I}, NB_AckWB, I){TagArrayWrite} {
+ c_clearOwner;
+ cc_clearSharers;
+ wb_data;
+ fw2_forwardWBAck;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(VO_S, NB_AckWB, S){TagArrayWrite} {
+ c_clearOwner;
+ wb_data;
+ fw2_forwardWBAck;
+ dt_deallocateTBE;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(I_C, NB_AckWB, I){TagArrayWrite} {
+ c_clearOwner;
+ cc_clearSharers;
+ ss_sendStaleNotification;
+ fw2_forwardWBAck;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(I_W, NB_AckWB, I) {
+ ss_sendStaleNotification;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pR_popResponseFromNBQueue;
+ }
+
+
+
+ // Do not handle replacements, reads of any kind or writebacks from transients; recycle
+ transition({I_M, I_ES, I_S, MO_I, ES_I, S_M, O_M, VES_I, VO_I, VO_S, VM_I, I_C, I_W}, {RdBlkS,RdBlkM,RdBlk,CtoD}) {
+ zz_recycleRequest;
+ }
+
+ transition( VO_S, NoCPUWrite) {
+ zz_recycleRequest;
+ }
+
+ transition({BW_M, BW_S, BW_O, BW_E}, {RdBlkS,RdBlkM,RdBlk,CtoD,NoCPUWrite, CPUWrite}) {
+ zz_recycleRequest;
+ }
+
+ transition({BBB_M, BBB_S, BBB_E, BB_O, BB_M, BB_S, BB_OO}, { RdBlk, RdBlkS, RdBlkM, CPUWrite, NoCPUWrite}) {
+ zz_recycleRequest;
+ }
+
+ transition({BBB_S, BBB_E, BB_O, BB_S, BB_OO}, { CtoD}) {
+ zz_recycleRequest;
+ }
+
+ transition({BBS_UM, BBO_UM, BBM_M, BBM_O, BBS_M, BBO_M}, { RdBlk, RdBlkS, RdBlkM}) {
+ zz_recycleRequest;
+ }
+
+ transition(BBM_O, CtoD) {
+ zz_recycleRequest;
+ }
+
+ transition({BBS_S, BBO_O}, {RdBlkM, CtoD}) {
+ zz_recycleRequest;
+ }
+
+ transition({B_I, CP_I, CP_S, CP_O, CP_OM, CP_SM, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, {RdBlk, RdBlkS, RdBlkM}) {
+ zz_recycleRequest;
+ }
+
+ transition({CP_O, CP_S, CP_OM}, CtoD) {
+ zz_recycleRequest;
+ }
+
+ // Ignore replacement related messages after probe got in.
+ transition({CP_I, B_I, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, {CPUWrite, NoCPUWrite}) {
+ zz_recycleRequest;
+ }
+
+ // Ignore replacement related messages after probes processed
+ transition({I, I_S, I_ES, I_M, I_C, I_W}, {CPUWrite,NoCPUWrite}) {
+ nC_sendNullWBAckToCore;
+ i_popIncomingRequestQueue;
+ }
+ // cannot ignore cancel... otherwise TCP/SQC will be stuck in I_C
+ transition({I, I_S, I_ES, I_M, I_C, I_W, S_M, M, O, E, S}, CPUWriteCancel){TagArrayRead} {
+ nC_sendNullWBAckToCore;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({CP_I, B_I, CP_IOM, CP_ISM, BRWD_I, BRW_I, BRD_I}, {NoVic, VicClean, VicDirty, VicDirtyLast}){
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+ // Handling Probes from NB (General process: (1) propagate up, go to blocking state (2) process acks (3) on last ack downward.)
+
+ // step 1
+ transition({M, O, E, S}, PrbInvData, CP_I){TagArrayRead} {
+ tp_allocateTBE;
+ dc_probeInvCoreData;
+ d2_probeInvL2Data;
+ pp_popProbeQueue;
+ }
+ // step 2a
+ transition(CP_I, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(CP_I, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(CP_I, ProbeAcksComplete, I){TagArrayWrite} {
+ pd_sendProbeResponseData;
+ c_clearOwner;
+ cc_clearSharers;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pt_popTriggerQueue;
+ }
+
+ // step 1
+ transition({M, O, E, S}, PrbInv, B_I){TagArrayWrite} {
+ tp_allocateTBE;
+ ipc_probeInvCore;
+ i2_probeInvL2;
+ pp_popProbeQueue;
+ }
+ // step 2
+ transition(B_I, CPUPrbResp) {
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(B_I, TCCPrbResp) {
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(B_I, ProbeAcksComplete, I){TagArrayWrite} {
+ // send response down to NB
+ pi_sendProbeResponseInv;
+ c_clearOwner;
+ cc_clearSharers;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pt_popTriggerQueue;
+ }
+
+
+ // step 1
+ transition({M, O}, PrbShrData, CP_O){TagArrayRead} {
+ tp_allocateTBE;
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ pp_popProbeQueue;
+ }
+
+ transition(E, PrbShrData, CP_O){TagArrayRead} {
+ tp_allocateTBE;
+ eto_moveExSharerToOwner;
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ pp_popProbeQueue;
+ }
+ // step 2
+ transition(CP_O, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(CP_O, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(CP_O, ProbeAcksComplete, O){TagArrayWrite} {
+ // send response down to NB
+ pd_sendProbeResponseData;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ //step 1
+ transition(S, PrbShrData, CP_S) {
+ tp_allocateTBE;
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ pp_popProbeQueue;
+ }
+ // step 2
+ transition(CP_S, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(CP_S, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(CP_S, ProbeAcksComplete, S) {
+ // send response down to NB
+ pd_sendProbeResponseData;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ // step 1
+ transition(O_M, PrbInvData, CP_IOM) {
+ dc_probeInvCoreData;
+ d2_probeInvL2Data;
+ pp_popProbeQueue;
+ }
+ // step 2a
+ transition(CP_IOM, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(CP_IOM, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(CP_IOM, ProbeAcksComplete, I_M) {
+ pdm_sendProbeResponseDataMs;
+ c_clearOwner;
+ cc_clearSharers;
+ cd_clearDirtyBitTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(CP_IOM, ProbeAcksCompleteReissue, I){TagArrayWrite} {
+ pdm_sendProbeResponseDataMs;
+ c_clearOwner;
+ cc_clearSharers;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pt_popTriggerQueue;
+ }
+
+ // step 1
+ transition(S_M, PrbInvData, CP_ISM) {
+ dc_probeInvCoreData;
+ d2_probeInvL2Data;
+ o_checkForAckCompletion;
+ pp_popProbeQueue;
+ }
+ // step 2a
+ transition(CP_ISM, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(CP_ISM, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(CP_ISM, ProbeAcksComplete, I_M) {
+ pdm_sendProbeResponseDataMs;
+ c_clearOwner;
+ cc_clearSharers;
+ cd_clearDirtyBitTBE;
+
+ //dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+ transition(CP_ISM, ProbeAcksCompleteReissue, I){TagArrayWrite} {
+ pim_sendProbeResponseInvMs;
+ c_clearOwner;
+ cc_clearSharers;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pt_popTriggerQueue;
+ }
+
+ // step 1
+ transition({S_M, O_M}, {PrbInv}, CP_ISM) {
+ dc_probeInvCoreData;
+ d2_probeInvL2Data;
+ pp_popProbeQueue;
+ }
+ // next steps inherited from BS_ISM
+
+ // Simpler cases
+
+ transition({I_C, I_W}, {PrbInvData, PrbInv, PrbShrData}) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ //If the directory is certain that the block is not present, one can send an acknowledgement right away.
+ // No need for three step process.
+ transition(I, {PrbInv,PrbShrData,PrbInvData}){TagArrayRead} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({I_M, I_ES, I_S}, {PrbInv, PrbInvData}) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({I_M, I_ES, I_S}, PrbShrData) {
+ prm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ //step 1
+ transition(S_M, PrbShrData, CP_SM) {
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ o_checkForAckCompletion;
+ pp_popProbeQueue;
+ }
+ // step 2
+ transition(CP_SM, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(CP_SM, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(CP_SM, {ProbeAcksComplete,ProbeAcksCompleteReissue}, S_M){DataArrayRead} {
+ // send response down to NB
+ pd_sendProbeResponseData;
+ pt_popTriggerQueue;
+ }
+
+ //step 1
+ transition(O_M, PrbShrData, CP_OM) {
+ sc_probeShrCoreData;
+ s2_probeShrL2Data;
+ pp_popProbeQueue;
+ }
+ // step 2
+ transition(CP_OM, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+ // step 2b
+ transition(CP_OM, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ // step 3
+ transition(CP_OM, {ProbeAcksComplete,ProbeAcksCompleteReissue}, O_M) {
+ // send response down to NB
+ pd_sendProbeResponseData;
+ pt_popTriggerQueue;
+ }
+
+ transition(BRW_I, PrbInvData, I_W) {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({VM_I,VO_I}, PrbInvData, I_C) {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition(VES_I, {PrbInvData,PrbInv}, I_C) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({VM_I, VO_I, BRW_I}, PrbInv, I_W) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({VM_I, VO_I, VO_S, VES_I, BRW_I}, PrbShrData) {
+ pd_sendProbeResponseData;
+ sf_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition(VO_S, PrbInvData, CP_OSIW) {
+ dc_probeInvCoreData;
+ d2_probeInvL2Data;
+ pp_popProbeQueue;
+ }
+
+ transition(CP_OSIW, TCCPrbResp) {
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+ transition(CP_OSIW, CPUPrbResp) {
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition(CP_OSIW, ProbeAcksComplete, I_C) {
+ pd_sendProbeResponseData;
+ cd_clearDirtyBitTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition({I, S, E, O, M, CP_O, CP_S, CP_OM, CP_SM, CP_OSIW, BW_S, BW_E, BW_O, BW_M, I_M, I_ES, I_S, BBS_S, BBO_O, BBM_M, BBM_O, BB_M, BB_O, BB_OO, BB_S, BBS_M, BBO_M, BBO_UM, BBS_UM, S_M, O_M, BBB_S, BBB_M, BBB_E, VES_I, VM_I, VO_I, VO_S, ES_I, MO_I, I_C, I_W}, StaleVic) {
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+ transition({CP_I, B_I, CP_IOM, CP_ISM, BRWD_I, BRW_I, BRD_I}, StaleVic) {
+ nT_sendNullWBAckToTCC;
+ pl_popTCCRequestQueue;
+ }
+
+ // Recall Transistions
+ // transient states still require the directory state
+ transition({M, O}, Recall, BRWD_I) {
+ tr_allocateTBE;
+ vd_victim;
+ dc_probeInvCoreData;
+ d2_probeInvL2Data;
+ }
+
+ transition({E, S}, Recall, BRWD_I) {
+ tr_allocateTBE;
+ vc_victim;
+ dc_probeInvCoreData;
+ d2_probeInvL2Data;
+ }
+
+ transition(I, Recall) {
+ dd_deallocateDir;
+ }
+
+ transition({BRWD_I, BRD_I}, CPUPrbResp) {
+ y_writeDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ pk_popResponseQueue;
+ }
+
+ transition({BRWD_I, BRD_I}, TCCPrbResp) {
+ ty_writeTCCDataToTBE;
+ x_decrementAcks;
+ o_checkForAckCompletion;
+ plr_popTCCResponseQueue;
+ }
+
+ transition(BRWD_I, NB_AckWB, BRD_I) {
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(BRWD_I, ProbeAcksComplete, BRW_I) {
+ pt_popTriggerQueue;
+ }
+
+ transition(BRW_I, NB_AckWB, I) {
+ wb_data;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pR_popResponseFromNBQueue;
+ }
+
+ transition(BRD_I, ProbeAcksComplete, I) {
+ wb_data;
+ dt_deallocateTBE;
+ dd_deallocateDir;
+ pt_popTriggerQueue;
+ }
+
+ // wait for stable state for Recall
+ transition({BRWD_I,BRD_I,BRW_I,CP_O, CP_S, CP_OM, CP_SM, CP_OSIW, BW_S, BW_E, BW_O, BW_M, I_M, I_ES, I_S, BBS_S, BBO_O, BBM_M, BBM_O, BB_M, BB_O, BB_OO, BB_S, BBS_M, BBO_M, BBO_UM, BBS_UM, S_M, O_M, BBB_S, BBB_M, BBB_E, VES_I, VM_I, VO_I, VO_S, ES_I, MO_I, I_C, I_W, CP_I}, Recall) {
+ zz_recycleRequest; // stall and wait would be for the wrong address
+ ut_updateTag; // try to find an easier recall
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:TCP, "GPU TCP (L1 Data Cache)")
+ : GPUCoalescer* coalescer;
+ Sequencer* sequencer;
+ bool use_seq_not_coal;
+ CacheMemory * L1cache;
+ int TCC_select_num_bits;
+ Cycles issue_latency := 40; // time to send data down to TCC
+ Cycles l2_hit_latency := 18;
+
+ MessageBuffer * requestFromTCP, network="To", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseFromTCP, network="To", virtual_network="3", vnet_type="response";
+ MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
+
+ MessageBuffer * probeToTCP, network="From", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseToTCP, network="From", virtual_network="3", vnet_type="response";
+
+ MessageBuffer * mandatoryQueue;
+{
+ state_declaration(State, desc="TCP Cache States", default="TCP_State_I") {
+ I, AccessPermission:Invalid, desc="Invalid";
+ S, AccessPermission:Read_Only, desc="Shared";
+ E, AccessPermission:Read_Write, desc="Exclusive";
+ O, AccessPermission:Read_Only, desc="Owner state in core, both clusters and other cores may be sharing line";
+ M, AccessPermission:Read_Write, desc="Modified";
+
+ I_M, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
+ I_ES, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
+ S_M, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ O_M, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+
+ ES_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for clean WB ack";
+ MO_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for dirty WB ack";
+
+ MO_PI, AccessPermission:Read_Only, desc="L1 downgrade, waiting for CtoD ack (or ProbeInvalidateData)";
+
+ I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from TCC for canceled WB";
+ }
+
+ enumeration(Event, desc="TCP Events") {
+ // Core initiated
+ Load, desc="Load";
+ Store, desc="Store";
+
+ // TCC initiated
+ TCC_AckS, desc="TCC Ack to Core Request";
+ TCC_AckE, desc="TCC Ack to Core Request";
+ TCC_AckM, desc="TCC Ack to Core Request";
+ TCC_AckCtoD, desc="TCC Ack to Core Request";
+ TCC_AckWB, desc="TCC Ack for clean WB";
+ TCC_NackWB, desc="TCC Nack for clean WB";
+
+ // Mem sys initiated
+ Repl, desc="Replacing block from cache";
+
+ // Probe Events
+ PrbInvData, desc="probe, return O or M data";
+ PrbInv, desc="probe, no need for data";
+ LocalPrbInv, desc="local probe, no need for data";
+ PrbShrData, desc="probe downgrade, return O or M data";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ bool FromL2, default="false", desc="block just moved from L2";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
+ bool Shared, desc="Victim hit by shared probe";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<TCP_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+
+ // Internal functions
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
+ return cache_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return tbe.DataBlk;
+ } else {
+ return getCacheEntry(addr).DataBlk;
+ }
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return TCP_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return TCP_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ bool isValid(Addr addr) {
+ AccessPermission perm := getAccessPermission(addr);
+ if (perm == AccessPermission:NotPresent ||
+ perm == AccessPermission:Invalid ||
+ perm == AccessPermission:Busy) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(TCP_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ MachineType getCoherenceType(MachineID myMachID,
+ MachineID senderMachID) {
+ if(myMachID == senderMachID) {
+ return MachineType:TCP;
+ } else if(machineIDToMachineType(senderMachID) == MachineType:TCP) {
+ return MachineType:L1Cache_wCC;
+ } else if(machineIDToMachineType(senderMachID) == MachineType:TCC) {
+ return MachineType:TCC;
+ } else {
+ return MachineType:TCCdir;
+ }
+ }
+
+ // Out Ports
+
+ out_port(requestNetwork_out, CPURequestMsg, requestFromTCP);
+ out_port(responseNetwork_out, ResponseMsg, responseFromTCP);
+ out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
+
+ // In Ports
+
+ in_port(probeNetwork_in, TDProbeRequestMsg, probeToTCP) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, TDProbeRequestMsg, block_on="addr") {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ DPRINTF(RubySlicc, "machineID: %s\n", machineID);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
+ } else {
+ if(in_msg.localCtoD) {
+ trigger(Event:LocalPrbInv, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ assert(in_msg.ReturnData);
+ trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ in_port(responseToTCP_in, ResponseMsg, responseToTCP) {
+ if (responseToTCP_in.isReady(clockEdge())) {
+ peek(responseToTCP_in, ResponseMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == CoherenceResponseType:TDSysResp) {
+ if (in_msg.State == CoherenceState:Modified) {
+ if (in_msg.CtoD) {
+ trigger(Event:TCC_AckCtoD, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:TCC_AckM, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.State == CoherenceState:Shared) {
+ trigger(Event:TCC_AckS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.State == CoherenceState:Exclusive) {
+ trigger(Event:TCC_AckE, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
+ trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:TDSysWBNack) {
+ trigger(Event:TCC_NackWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+ Entry cache_entry := getCacheEntry(in_msg.LineAddress);
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ if (in_msg.Type == RubyRequestType:LD) {
+ if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
+ trigger(Event:Load, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else {
+ if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
+ trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(ic_invCache, "ic", desc="invalidate cache") {
+ if(is_valid(cache_entry)) {
+ L1cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(n_issueRdBlk, "n", desc="Issue RdBlk") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlk;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkM;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(vd_victim, "vd", desc="Victimize M/O Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ assert(is_valid(cache_entry));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicDirty;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:O) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ out_msg.Dirty := cache_entry.Dirty;
+ }
+ }
+
+ action(vc_victim, "vc", desc="Victimize E/S Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicClean;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:S) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ }
+ }
+
+ action(a_allocate, "a", desc="allocate block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1cache.allocate(address, new Entry));
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.Shared := false;
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
+ responseToTCP_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(l_loadDone, "l", desc="local load done") {
+ assert(is_valid(cache_entry));
+ if (use_seq_not_coal) {
+ sequencer.readCallback(address, cache_entry.DataBlk,
+ false, MachineType:TCP);
+ } else {
+ coalescer.readCallback(address, MachineType:TCP, cache_entry.DataBlk);
+ }
+ }
+
+ action(xl_loadDone, "xl", desc="remote load done") {
+ peek(responseToTCP_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ if (use_seq_not_coal) {
+ coalescer.recordCPReadCallBack(machineID, in_msg.Sender);
+ sequencer.readCallback(address,
+ cache_entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ } else {
+ MachineType cc_mach_type := getCoherenceType(machineID,
+ in_msg.Sender);
+ coalescer.readCallback(address,
+ cc_mach_type,
+ cache_entry.DataBlk,
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+ }
+
+ action(s_storeDone, "s", desc="local store done") {
+ assert(is_valid(cache_entry));
+ if (use_seq_not_coal) {
+ coalescer.recordCPWriteCallBack(machineID, machineID);
+ sequencer.writeCallback(address, cache_entry.DataBlk,
+ false, MachineType:TCP);
+ } else {
+ coalescer.writeCallback(address, MachineType:TCP, cache_entry.DataBlk);
+ }
+ cache_entry.Dirty := true;
+ }
+
+ action(xs_storeDone, "xs", desc="remote store done") {
+ peek(responseToTCP_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ if (use_seq_not_coal) {
+ coalescer.recordCPWriteCallBack(machineID, in_msg.Sender);
+ sequencer.writeCallback(address,
+ cache_entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ } else {
+ MachineType cc_mach_type := getCoherenceType(machineID,
+ in_msg.Sender);
+ coalescer.writeCallback(address,
+ cc_mach_type,
+ cache_entry.DataBlk,
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ cache_entry.Dirty := true;
+ }
+ }
+
+ action(w_writeCache, "w", desc="write data to cache") {
+ peek(responseToTCP_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
+ peek(responseToTCP_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:StaleNotif;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(wb_data, "wb", desc="write back data") {
+ peek(responseToTCP_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUData;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Shared) {
+ out_msg.NbReqShared := true;
+ } else {
+ out_msg.NbReqShared := false;
+ }
+ out_msg.State := CoherenceState:Shared; // faux info
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(piu_sendProbeResponseInvUntransferredOwnership, "piu", desc="send probe ack inv, no data, retain ownership") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.UntransferredOwner :=true;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false;
+ out_msg.Ntsl := true;
+ out_msg.Hit := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.Dirty := false; // only true if sending back data i think
+ out_msg.Hit := false;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry) || is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := getDataBlock(address);
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ } else {
+ out_msg.Dirty := cache_entry.Dirty;
+ }
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.isValid := isValid(address);
+ APPEND_TRANSITION_COMMENT("Sending ack with dirty ");
+ APPEND_TRANSITION_COMMENT(out_msg.Dirty);
+ }
+ }
+
+ action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry) || is_valid(tbe));
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.DataBlk := getDataBlock(address);
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ } else {
+ out_msg.Dirty := cache_entry.Dirty;
+ }
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.isValid := isValid(address);
+ APPEND_TRANSITION_COMMENT("Sending ack with dirty ");
+ APPEND_TRANSITION_COMMENT(out_msg.Dirty);
+ DPRINTF(RubySlicc, "Data is %s\n", out_msg.DataBlk);
+ }
+ }
+
+ action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
+ assert(is_valid(tbe));
+ tbe.Shared := true;
+ }
+
+ action(mru_updateMRU, "mru", desc="Touch block for replacement policy") {
+ L1cache.setMRU(address);
+ }
+
+ action(uu_sendUnblock, "uu", desc="state changed, unblock") {
+ enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ out_msg.wasValid := isValid(address);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
+ probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ // Transitions
+
+ // transitions from base
+ transition(I, Load, I_ES) {TagArrayRead} {
+ a_allocate;
+ n_issueRdBlk;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, Store, I_M) {TagArrayRead, TagArrayWrite} {
+ a_allocate;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, Store, S_M) {TagArrayRead} {
+ mru_updateMRU;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(E, Store, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ mru_updateMRU;
+ s_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(O, Store, O_M) {TagArrayRead, DataArrayWrite} {
+ mru_updateMRU;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(M, Store) {TagArrayRead, DataArrayWrite} {
+ mru_updateMRU;
+ s_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ // simple hit transitions
+ transition({S, E, O, M}, Load) {TagArrayRead, DataArrayRead} {
+ l_loadDone;
+ mru_updateMRU;
+ p_popMandatoryQueue;
+ }
+
+ // recycles from transients
+ transition({I_M, I_ES, ES_I, MO_I, S_M, O_M, MO_PI, I_C}, {Load, Store, Repl}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({S, E}, Repl, ES_I) {TagArrayRead} {
+ t_allocateTBE;
+ vc_victim;
+ ic_invCache;
+ }
+
+ transition({O, M}, Repl, MO_I) {TagArrayRead, DataArrayRead} {
+ t_allocateTBE;
+ vd_victim;
+ ic_invCache;
+ }
+
+ // TD event transitions
+ transition(I_M, {TCC_AckM, TCC_AckCtoD}, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ w_writeCache;
+ xs_storeDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_ES, TCC_AckS, S) {TagArrayWrite, DataArrayWrite} {
+ w_writeCache;
+ xl_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_ES, TCC_AckE, E) {TagArrayWrite, DataArrayWrite} {
+ w_writeCache;
+ xl_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition({S_M, O_M}, TCC_AckM, M) {TagArrayWrite, DataArrayWrite} {
+ xs_storeDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition({MO_I, ES_I}, TCC_NackWB, I){TagArrayWrite} {
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition({MO_I, ES_I}, TCC_AckWB, I) {TagArrayWrite, DataArrayRead} {
+ wb_data;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(I_C, TCC_AckWB, I) {TagArrayWrite} {
+ ss_sendStaleNotification;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(I_C, TCC_NackWB, I) {TagArrayWrite} {
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ // Probe transitions
+ transition({M, O}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(I, PrbInvData) {TagArrayRead, TagArrayWrite} {
+ prm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition({E, S}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbInvData, I_C) {} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ // Needed for TCC-based protocols. Must hold on to ownership till transfer complete
+ transition({M, O}, LocalPrbInv, MO_PI){TagArrayRead, TagArrayWrite} {
+ piu_sendProbeResponseInvUntransferredOwnership;
+ pp_popProbeQueue;
+ }
+
+ // If there is a race and we see a probe invalidate, handle normally.
+ transition(MO_PI, PrbInvData, I){TagArrayWrite} {
+ pd_sendProbeResponseData;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_PI, PrbInv, I){TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ // normal exit when ownership is successfully transferred
+ transition(MO_PI, TCC_AckCtoD, I) {TagArrayWrite} {
+ ic_invCache;
+ pr_popResponseQueue;
+ }
+
+ transition({M, O, E, S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition({E, S, I}, LocalPrbInv, I){TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+
+ transition({M, E, O}, PrbShrData, O) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_PI, PrbShrData) {DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+
+ transition(S, PrbShrData, S) {TagArrayRead, DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({I, I_C}, PrbShrData) {TagArrayRead} {
+ prm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbInv, I_C) {} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition({I_M, I_ES}, {PrbInv, PrbInvData}){TagArrayRead} {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ a_allocate; // but make sure there is room for incoming data when it arrives
+ pp_popProbeQueue;
+ }
+
+ transition({I_M, I_ES}, PrbShrData) {} {
+ prm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(S_M, PrbInvData, I_M) {TagArrayRead} {
+ pim_sendProbeResponseInvMs;
+ ic_invCache;
+ a_allocate;
+ pp_popProbeQueue;
+ }
+
+ transition(O_M, PrbInvData, I_M) {TagArrayRead,DataArrayRead} {
+ pdm_sendProbeResponseDataMs;
+ ic_invCache;
+ a_allocate;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M, O_M}, {PrbInv}, I_M) {TagArrayRead} {
+ pim_sendProbeResponseInvMs;
+ ic_invCache;
+ a_allocate;
+ pp_popProbeQueue;
+ }
+
+ transition(S_M, {LocalPrbInv}, I_M) {TagArrayRead} {
+ pim_sendProbeResponseInvMs;
+ ic_invCache;
+ a_allocate;
+ pp_popProbeQueue;
+ }
+
+ transition(O_M, LocalPrbInv, I_M) {TagArrayRead} {
+ piu_sendProbeResponseInvUntransferredOwnership;
+ ic_invCache;
+ a_allocate;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M, O_M}, PrbShrData) {DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbInvData, I_C){
+ pd_sendProbeResponseData;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInvData, I_C) {DataArrayRead} {
+ pd_sendProbeResponseData;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInv, I_C) {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbInv, I_C) {
+ pi_sendProbeResponseInv;
+ ic_invCache;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbShrData, ES_I) {DataArrayRead} {
+ pd_sendProbeResponseData;
+ sf_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbShrData, MO_I) {DataArrayRead} {
+ pd_sendProbeResponseData;
+ sf_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+}
--- /dev/null
+protocol "GPU_AMD_Base";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_AMD_Base-msg.sm";
+include "MOESI_AMD_Base-dir.sm";
+include "MOESI_AMD_Base-CorePair.sm";
+include "GPU_RfO-TCP.sm";
+include "GPU_RfO-SQC.sm";
+include "GPU_RfO-TCC.sm";
+include "GPU_RfO-TCCdir.sm";
+include "MOESI_AMD_Base-L3cache.sm";
+include "MOESI_AMD_Base-RegionBuffer.sm";
--- /dev/null
+/*
+ * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Blake Hechtman
+ */
+
+machine(MachineType:SQC, "GPU SQC (L1 I Cache)")
+ : Sequencer* sequencer;
+ CacheMemory * L1cache;
+ int TCC_select_num_bits;
+ Cycles issue_latency := 80; // time to send data down to TCC
+ Cycles l2_hit_latency := 18; // for 1MB L2, 20 for 2MB
+
+ MessageBuffer * requestFromSQC, network="To", virtual_network="1", vnet_type="request";
+
+ MessageBuffer * probeToSQC, network="From", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseToSQC, network="From", virtual_network="3", vnet_type="response";
+
+ MessageBuffer * mandatoryQueue;
+{
+ state_declaration(State, desc="SQC Cache States", default="SQC_State_I") {
+ I, AccessPermission:Invalid, desc="Invalid";
+ V, AccessPermission:Read_Only, desc="Valid";
+ }
+
+ enumeration(Event, desc="SQC Events") {
+ // Core initiated
+ Fetch, desc="Fetch";
+ // Mem sys initiated
+ Repl, desc="Replacing block from cache";
+ Data, desc="Received Data";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ bool FromL2, default="false", desc="block just moved from L2";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
+ bool Shared, desc="Victim hit by shared probe";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<SQC_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+
+ // Internal functions
+ Tick clockEdge();
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
+ return cache_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return tbe.DataBlk;
+ } else {
+ return getCacheEntry(addr).DataBlk;
+ }
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes +
+ functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return SQC_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return SQC_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(SQC_State_to_permission(state));
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ // Out Ports
+
+ out_port(requestNetwork_out, CPURequestMsg, requestFromSQC);
+
+ // In Ports
+
+ in_port(responseToSQC_in, ResponseMsg, responseToSQC) {
+ if (responseToSQC_in.isReady(clockEdge())) {
+ peek(responseToSQC_in, ResponseMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == CoherenceResponseType:TDSysResp) {
+ if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.addr)) {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ } else {
+ Addr victim := L1cache.cacheProbe(in_msg.addr);
+ trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+ Entry cache_entry := getCacheEntry(in_msg.LineAddress);
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
+
+ assert(in_msg.Type == RubyRequestType:IFETCH);
+ trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
+ }
+ }
+ }
+
+ // Actions
+
+ action(ic_invCache, "ic", desc="invalidate cache") {
+ if(is_valid(cache_entry)) {
+ L1cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlk;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(a_allocate, "a", desc="allocate block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1cache.allocate(address, new Entry));
+ }
+ }
+
+ action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
+ responseToSQC_in.dequeue(clockEdge());
+ }
+
+ action(l_loadDone, "l", desc="local load done") {
+ assert(is_valid(cache_entry));
+ sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
+ APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
+ }
+
+ action(w_writeCache, "w", desc="write data to cache") {
+ peek(responseToSQC_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := false;
+ }
+ }
+
+ // Transitions
+
+ // transitions from base
+ transition({I, V}, Repl, I) {TagArrayRead, TagArrayWrite} {
+ ic_invCache
+ }
+
+ transition(I, Data, V) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ a_allocate;
+ w_writeCache
+ l_loadDone;
+ pr_popResponseQueue;
+ }
+
+ transition(I, Fetch) {TagArrayRead, TagArrayWrite} {
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ // simple hit transitions
+ transition(V, Fetch) {TagArrayRead, DataArrayRead} {
+ l_loadDone;
+ p_popMandatoryQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Blake Hechtman
+ */
+
+machine(MachineType:TCC, "TCC Cache")
+ : CacheMemory * L2cache;
+ bool WB; /*is this cache Writeback?*/
+ Cycles l2_request_latency := 50;
+ Cycles l2_response_latency := 20;
+
+ // From the TCPs or SQCs
+ MessageBuffer * requestFromTCP, network="From", virtual_network="1", vnet_type="request";
+ // To the Cores. TCC deals only with TCPs/SQCs.
+ MessageBuffer * responseToCore, network="To", virtual_network="3", vnet_type="response";
+ // From the NB
+ MessageBuffer * probeFromNB, network="From", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseFromNB, network="From", virtual_network="2", vnet_type="response";
+ // To the NB
+ MessageBuffer * requestToNB, network="To", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseToNB, network="To", virtual_network="2", vnet_type="response";
+ MessageBuffer * unblockToNB, network="To", virtual_network="4", vnet_type="unblock";
+
+ MessageBuffer * triggerQueue;
+
+{
+ // EVENTS
+ enumeration(Event, desc="TCC Events") {
+ // Requests coming from the Cores
+ RdBlk, desc="RdBlk event";
+ WrVicBlk, desc="L1 Write Through";
+ WrVicBlkBack, desc="L1 Write Through(dirty cache)";
+ Atomic, desc="Atomic Op";
+ AtomicDone, desc="AtomicOps Complete";
+ AtomicNotDone, desc="AtomicOps not Complete";
+ Data, desc="data messgae";
+ // Coming from this TCC
+ L2_Repl, desc="L2 Replacement";
+ // Probes
+ PrbInv, desc="Invalidating probe";
+ // Coming from Memory Controller
+ WBAck, desc="writethrough ack from memory";
+ }
+
+ // STATES
+ state_declaration(State, desc="TCC State", default="TCC_State_I") {
+ M, AccessPermission:Read_Write, desc="Modified(dirty cache only)";
+ W, AccessPermission:Read_Write, desc="Written(dirty cache only)";
+ V, AccessPermission:Read_Only, desc="Valid";
+ I, AccessPermission:Invalid, desc="Invalid";
+ IV, AccessPermission:Busy, desc="Waiting for Data";
+ WI, AccessPermission:Busy, desc="Waiting on Writethrough Ack";
+ A, AccessPermission:Busy, desc="Invalid waiting on atomici Data";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+
+ // STRUCTURES
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff from memory?)";
+ DataBlock DataBlk, desc="Data for the block";
+ WriteMask writeMask, desc="Dirty byte mask";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, desc="Is the data dirty?";
+ bool Shared, desc="Victim hit by shared probe";
+ MachineID From, desc="Waiting for writeback from...";
+ NetDest Destination, desc="Data destination";
+ int numAtomics, desc="number remaining atomics";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ // FUNCTION DEFINITIONS
+ Tick clockEdge();
+
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L2cache.lookup(addr));
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ return getCacheEntry(addr).DataBlk;
+ }
+
+ bool presentOrAvail(Addr addr) {
+ return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes +
+ functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return TCC_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return TCC_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(TCC_State_to_permission(state));
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+
+ // ** OUT_PORTS **
+
+ // Three classes of ports
+ // Class 1: downward facing network links to NB
+ out_port(requestToNB_out, CPURequestMsg, requestToNB);
+ out_port(responseToNB_out, ResponseMsg, responseToNB);
+ out_port(unblockToNB_out, UnblockMsg, unblockToNB);
+
+ // Class 2: upward facing ports to GPU cores
+ out_port(responseToCore_out, ResponseMsg, responseToCore);
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ //
+ // request queue going to NB
+ //
+
+
+// ** IN_PORTS **
+ in_port(triggerQueue_in, TiggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (tbe.numAtomics == 0) {
+ trigger(Event:AtomicDone, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:AtomicNotDone, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+
+
+ in_port(responseFromNB_in, ResponseMsg, responseFromNB) {
+ if (responseFromNB_in.isReady(clockEdge())) {
+ peek(responseFromNB_in, ResponseMsg, block_on="addr") {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:NBSysResp) {
+ if(presentOrAvail(in_msg.addr)) {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.addr);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
+ trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ // Finally handling incoming requests (from TCP) and probes (from NB).
+ in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+
+ in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
+ if (coreRequestNetwork_in.isReady(clockEdge())) {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ if(WB) {
+ if(presentOrAvail(in_msg.addr)) {
+ trigger(Event:WrVicBlkBack, in_msg.addr, cache_entry, tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.addr);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else {
+ trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:Atomic) {
+ trigger(Event:Atomic, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+ // BEGIN ACTIONS
+
+ action(i_invL2, "i", desc="invalidate TCC cache block") {
+ if (is_valid(cache_entry)) {
+ L2cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(sd_sendData, "sd", desc="send Shared response") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+
+ action(sdr_sendDataResponse, "sdr", desc="send Shared response") {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination := tbe.Destination;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ enqueue(unblockToNB_out, UnblockMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+
+ action(rd_requestData, "r", desc="Miss in L2, pass on") {
+ if(tbe.Destination.count()==1){
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Shared := false; // unneeded for this request
+ out_msg.MessageSize := in_msg.MessageSize;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+ }
+
+ action(w_sendResponseWBAck, "w", desc="send WB Ack") {
+ peek(responseFromNB_in, ResponseMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBAck;
+ out_msg.Destination.clear();
+ out_msg.Destination.add(in_msg.WTRequestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(swb_sendWBAck, "swb", desc="send WB Ack") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBAck;
+ out_msg.Destination.clear();
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(ar_sendAtomicResponse, "ar", desc="send Atomic Ack") {
+ peek(responseFromNB_in, ResponseMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Destination.add(in_msg.WTRequestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ }
+ }
+ }
+
+ action(a_allocateBlock, "a", desc="allocate TCC block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ cache_entry.writeMask.clear();
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ if (is_invalid(tbe)) {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.Destination.clear();
+ tbe.numAtomics := 0;
+ }
+ if (coreRequestNetwork_in.isReady(clockEdge())) {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ if(in_msg.Type == CoherenceRequestType:RdBlk || in_msg.Type == CoherenceRequestType:Atomic){
+ tbe.Destination.add(in_msg.Requestor);
+ }
+ }
+ }
+ }
+
+ action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
+ tbe.Destination.clear();
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(wcb_writeCacheBlock, "wcb", desc="write data to TCC") {
+ peek(responseFromNB_in, ResponseMsg) {
+ cache_entry.DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
+ }
+ }
+
+ action(wdb_writeDirtyBytes, "wdb", desc="write data to TCC") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ cache_entry.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
+ cache_entry.writeMask.orMask(in_msg.writeMask);
+ DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
+ }
+ }
+
+ action(wt_writeThrough, "wt", desc="write back data") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := in_msg.Requestor;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:WriteThrough;
+ out_msg.Dirty := true;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.writeMask.orMask(in_msg.writeMask);
+ }
+ }
+ }
+
+ action(wb_writeBack, "wb", desc="write back data") {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:WriteThrough;
+ out_msg.Dirty := true;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.writeMask.orMask(cache_entry.writeMask);
+ }
+ }
+
+ action(at_atomicThrough, "at", desc="write back data") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := in_msg.Requestor;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:Atomic;
+ out_msg.Dirty := true;
+ out_msg.writeMask.orMask(in_msg.writeMask);
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(responseToNB_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
+ L2cache.setMRU(address);
+ }
+
+ action(p_popRequestQueue, "p", desc="pop request queue") {
+ coreRequestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="pop response queue") {
+ responseFromNB_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(z_stall, "z", desc="stall") {
+ // built-in
+ }
+
+
+ action(ina_incrementNumAtomics, "ina", desc="inc num atomics") {
+ tbe.numAtomics := tbe.numAtomics + 1;
+ }
+
+
+ action(dna_decrementNumAtomics, "dna", desc="inc num atomics") {
+ tbe.numAtomics := tbe.numAtomics - 1;
+ if (tbe.numAtomics==0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AtomicDone;
+ }
+ }
+ }
+
+ action(ptr_popTriggerQueue, "ptr", desc="pop Trigger") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ // END ACTIONS
+
+ // BEGIN TRANSITIONS
+ // transitions from base
+ // Assumptions for ArrayRead/Write
+ // TBE checked before tags
+ // Data Read/Write requires Tag Read
+
+ // Stalling transitions do NOT check the tag array...and if they do,
+ // they can cause a resource stall deadlock!
+
+ transition(WI, {RdBlk, WrVicBlk, Atomic, WrVicBlkBack}) { //TagArrayRead} {
+ z_stall;
+ }
+ transition(A, {RdBlk, WrVicBlk, WrVicBlkBack}) { //TagArrayRead} {
+ z_stall;
+ }
+ transition(IV, {WrVicBlk, Atomic, WrVicBlkBack}) { //TagArrayRead} {
+ z_stall;
+ }
+ transition({M, V}, RdBlk) {TagArrayRead, DataArrayRead} {
+ sd_sendData;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+ transition(W, RdBlk, WI) {TagArrayRead, DataArrayRead} {
+ t_allocateTBE;
+ wb_writeBack;
+ }
+
+ transition(I, RdBlk, IV) {TagArrayRead} {
+ t_allocateTBE;
+ rd_requestData;
+ p_popRequestQueue;
+ }
+
+ transition(IV, RdBlk) {
+ t_allocateTBE;
+ rd_requestData;
+ p_popRequestQueue;
+ }
+
+ transition({V, I},Atomic, A) {TagArrayRead} {
+ i_invL2;
+ t_allocateTBE;
+ at_atomicThrough;
+ ina_incrementNumAtomics;
+ p_popRequestQueue;
+ }
+
+ transition(A, Atomic) {
+ at_atomicThrough;
+ ina_incrementNumAtomics;
+ p_popRequestQueue;
+ }
+
+ transition({M, W}, Atomic, WI) {TagArrayRead} {
+ t_allocateTBE;
+ wb_writeBack;
+ }
+
+ transition(I, WrVicBlk) {TagArrayRead} {
+ wt_writeThrough;
+ p_popRequestQueue;
+ }
+
+ transition(V, WrVicBlk) {TagArrayRead, DataArrayWrite} {
+ ut_updateTag;
+ wdb_writeDirtyBytes;
+ wt_writeThrough;
+ p_popRequestQueue;
+ }
+
+ transition({V, M}, WrVicBlkBack, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ ut_updateTag;
+ swb_sendWBAck;
+ wdb_writeDirtyBytes;
+ p_popRequestQueue;
+ }
+
+ transition(W, WrVicBlkBack) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ ut_updateTag;
+ swb_sendWBAck;
+ wdb_writeDirtyBytes;
+ p_popRequestQueue;
+ }
+
+ transition(I, WrVicBlkBack, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocateBlock;
+ ut_updateTag;
+ swb_sendWBAck;
+ wdb_writeDirtyBytes;
+ p_popRequestQueue;
+ }
+
+ transition({W, M}, L2_Repl, WI) {TagArrayRead, DataArrayRead} {
+ t_allocateTBE;
+ wb_writeBack;
+ i_invL2;
+ }
+
+ transition({I, V}, L2_Repl, I) {TagArrayRead, TagArrayWrite} {
+ i_invL2;
+ }
+
+ transition({A, IV, WI}, L2_Repl) {
+ i_invL2;
+ }
+
+ transition({I, V}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(M, PrbInv, W) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(W, PrbInv) {TagArrayRead} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({A, IV, WI}, PrbInv) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(IV, Data, V) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocateBlock;
+ ut_updateTag;
+ wcb_writeCacheBlock;
+ sdr_sendDataResponse;
+ pr_popResponseQueue;
+ dt_deallocateTBE;
+ }
+
+ transition(A, Data) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocateBlock;
+ ar_sendAtomicResponse;
+ dna_decrementNumAtomics;
+ pr_popResponseQueue;
+ }
+
+ transition(A, AtomicDone, I) {TagArrayRead, TagArrayWrite} {
+ dt_deallocateTBE;
+ ptr_popTriggerQueue;
+ }
+
+ transition(A, AtomicNotDone) {TagArrayRead} {
+ ptr_popTriggerQueue;
+ }
+
+ //M,W should not see WBAck as the cache is in WB mode
+ //WBAcks do not need to check tags
+ transition({I, V, IV, A}, WBAck) {
+ w_sendResponseWBAck;
+ pr_popResponseQueue;
+ }
+
+ transition(WI, WBAck,I) {
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Blake Hechtman
+ */
+
+machine(MachineType:TCP, "GPU TCP (L1 Data Cache)")
+ : VIPERCoalescer* coalescer;
+ Sequencer* sequencer;
+ bool use_seq_not_coal;
+ CacheMemory * L1cache;
+ bool WB; /*is this cache Writeback?*/
+ bool disableL1; /* bypass L1 cache? */
+ int TCC_select_num_bits;
+ Cycles issue_latency := 40; // time to send data down to TCC
+ Cycles l2_hit_latency := 18;
+
+ MessageBuffer * requestFromTCP, network="To", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseFromTCP, network="To", virtual_network="3", vnet_type="response";
+ MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
+
+ MessageBuffer * probeToTCP, network="From", virtual_network="1", vnet_type="request";
+ MessageBuffer * responseToTCP, network="From", virtual_network="3", vnet_type="response";
+ MessageBuffer * mandatoryQueue;
+
+{
+ state_declaration(State, desc="TCP Cache States", default="TCP_State_I") {
+ I, AccessPermission:Invalid, desc="Invalid";
+ V, AccessPermission:Read_Only, desc="Valid";
+ W, AccessPermission:Read_Write, desc="Written";
+ M, AccessPermission:Read_Write, desc="Written and Valid";
+ L, AccessPermission:Read_Write, desc="Local access is modifable";
+ A, AccessPermission:Invalid, desc="Waiting on Atomic";
+ }
+
+ enumeration(Event, desc="TCP Events") {
+ // Core initiated
+ Load, desc="Load";
+ Store, desc="Store to L1 (L1 is dirty)";
+ StoreThrough, desc="Store directly to L2(L1 is clean)";
+ StoreLocal, desc="Store to L1 but L1 is clean";
+ Atomic, desc="Atomic";
+ Flush, desc="Flush if dirty(wbL1 for Store Release)";
+ Evict, desc="Evict if clean(invL1 for Load Acquire)";
+ // Mem sys initiated
+ Repl, desc="Replacing block from cache";
+
+ // TCC initiated
+ TCC_Ack, desc="TCC Ack to Core Request";
+ TCC_AckWB, desc="TCC Ack for WB";
+ // Disable L1 cache
+ Bypass, desc="Bypass the entire L1 cache";
+ }
+
+ enumeration(RequestType,
+ desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ TagArrayFlash, desc="Flash clear the data array";
+ }
+
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ bool FromL2, default="false", desc="block just moved from L2";
+ WriteMask writeMask, desc="written bytes masks";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs,desc="Number of acks/data messages that this processor is waiting for";
+ bool Shared, desc="Victim hit by shared probe";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<TCP_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ int WTcnt, default="0";
+ int Fcnt, default="0";
+ bool inFlush, default="false";
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+
+ // Internal functions
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
+ return cache_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return tbe.DataBlk;
+ } else {
+ return getCacheEntry(addr).DataBlk;
+ }
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes +
+ functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return TCP_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return TCP_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ bool isValid(Addr addr) {
+ AccessPermission perm := getAccessPermission(addr);
+ if (perm == AccessPermission:NotPresent ||
+ perm == AccessPermission:Invalid ||
+ perm == AccessPermission:Busy) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(TCP_State_to_permission(state));
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayFlash) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayFlash) {
+ // FIXME should check once per cache, rather than once per cacheline
+ return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ // Out Ports
+
+ out_port(requestNetwork_out, CPURequestMsg, requestFromTCP);
+
+ // In Ports
+
+ in_port(responseToTCP_in, ResponseMsg, responseToTCP) {
+ if (responseToTCP_in.isReady(clockEdge())) {
+ peek(responseToTCP_in, ResponseMsg, block_on="addr") {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:TDSysResp) {
+ // disable L1 cache
+ if (disableL1) {
+ trigger(Event:Bypass, in_msg.addr, cache_entry, tbe);
+ } else {
+ if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.addr)) {
+ trigger(Event:TCC_Ack, in_msg.addr, cache_entry, tbe);
+ } else {
+ Addr victim := L1cache.cacheProbe(in_msg.addr);
+ trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ }
+ } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck ||
+ in_msg.Type == CoherenceResponseType:NBSysWBAck) {
+ trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+ Entry cache_entry := getCacheEntry(in_msg.LineAddress);
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:Load, in_msg.LineAddress, cache_entry, tbe);
+ } else if (in_msg.Type == RubyRequestType:ATOMIC) {
+ trigger(Event:Atomic, in_msg.LineAddress, cache_entry, tbe);
+ } else if (in_msg.Type == RubyRequestType:ST) {
+ if(disableL1) {
+ trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
+ if (in_msg.segment == HSASegment:SPILL) {
+ trigger(Event:StoreLocal, in_msg.LineAddress, cache_entry, tbe);
+ } else if (WB) {
+ trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } // end if (disableL1)
+ } else if (in_msg.Type == RubyRequestType:FLUSH) {
+ trigger(Event:Flush, in_msg.LineAddress, cache_entry, tbe);
+ } else if (in_msg.Type == RubyRequestType:REPLACEMENT){
+ trigger(Event:Evict, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ error("Unexpected Request Message from VIC");
+ if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
+ if (WB) {
+ trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(ic_invCache, "ic", desc="invalidate cache") {
+ if(is_valid(cache_entry)) {
+ cache_entry.writeMask.clear();
+ L1cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(n_issueRdBlk, "n", desc="Issue RdBlk") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlk;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(rb_bypassDone, "rb", desc="bypass L1 of read access") {
+ peek(responseToTCP_in, ResponseMsg) {
+ DataBlock tmp:= in_msg.DataBlk;
+ if (use_seq_not_coal) {
+ sequencer.readCallback(address, tmp, false, MachineType:L1Cache);
+ } else {
+ coalescer.readCallback(address, MachineType:L1Cache, tmp);
+ }
+ if(is_valid(cache_entry)) {
+ unset_cache_entry();
+ }
+ }
+ }
+
+ action(wab_bypassDone, "wab", desc="bypass L1 of write access") {
+ peek(responseToTCP_in, ResponseMsg) {
+ DataBlock tmp := in_msg.DataBlk;
+ if (use_seq_not_coal) {
+ sequencer.writeCallback(address, tmp, false, MachineType:L1Cache);
+ } else {
+ coalescer.writeCallback(address, MachineType:L1Cache, tmp);
+ }
+ }
+ }
+
+ action(norl_issueRdBlkOrloadDone, "norl", desc="local load done") {
+ peek(mandatoryQueue_in, RubyRequest){
+ if (cache_entry.writeMask.cmpMask(in_msg.writeMask)) {
+ if (use_seq_not_coal) {
+ sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
+ } else {
+ coalescer.readCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
+ }
+ } else {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlk;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+ }
+ }
+
+ action(wt_writeThrough, "wt", desc="Flush dirty data") {
+ WTcnt := WTcnt + 1;
+ APPEND_TRANSITION_COMMENT("write++ = ");
+ APPEND_TRANSITION_COMMENT(WTcnt);
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ assert(is_valid(cache_entry));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.writeMask.clear();
+ out_msg.writeMask.orMask(cache_entry.writeMask);
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:WriteThrough;
+ out_msg.InitialRequestTime := curCycle();
+ out_msg.Shared := false;
+ }
+ }
+
+ action(at_atomicThrough, "at", desc="send Atomic") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.writeMask.clear();
+ out_msg.writeMask.orMask(in_msg.writeMask);
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:Atomic;
+ out_msg.InitialRequestTime := curCycle();
+ out_msg.Shared := false;
+ }
+ }
+ }
+
+ action(a_allocate, "a", desc="allocate block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1cache.allocate(address, new Entry));
+ }
+ cache_entry.writeMask.clear();
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(sf_setFlush, "sf", desc="set flush") {
+ inFlush := true;
+ APPEND_TRANSITION_COMMENT(" inFlush is true");
+ }
+
+ action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
+ responseToTCP_in.dequeue(clockEdge());
+ }
+
+ action(l_loadDone, "l", desc="local load done") {
+ assert(is_valid(cache_entry));
+ if (use_seq_not_coal) {
+ sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
+ } else {
+ coalescer.readCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
+ }
+ }
+
+ action(s_storeDone, "s", desc="local store done") {
+ assert(is_valid(cache_entry));
+
+ if (use_seq_not_coal) {
+ sequencer.writeCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
+ } else {
+ coalescer.writeCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
+ }
+ cache_entry.Dirty := true;
+ }
+
+ action(inv_invDone, "inv", desc="local inv done") {
+ if (use_seq_not_coal) {
+ DPRINTF(RubySlicc, "Sequencer does not define invCallback!\n");
+ assert(false);
+ } else {
+ coalescer.invCallback(address);
+ }
+ }
+
+ action(wb_wbDone, "wb", desc="local wb done") {
+ if (inFlush == true) {
+ Fcnt := Fcnt + 1;
+ if (Fcnt > WTcnt) {
+ if (use_seq_not_coal) {
+ DPRINTF(RubySlicc, "Sequencer does not define wbCallback!\n");
+ assert(false);
+ } else {
+ coalescer.wbCallback(address);
+ }
+ Fcnt := Fcnt - 1;
+ }
+ if (WTcnt == 0 && Fcnt == 0) {
+ inFlush := false;
+ APPEND_TRANSITION_COMMENT(" inFlush is false");
+ }
+ }
+ }
+
+ action(wd_wtDone, "wd", desc="writethrough done") {
+ WTcnt := WTcnt - 1;
+ if (inFlush == true) {
+ Fcnt := Fcnt -1;
+ }
+ assert(WTcnt >= 0);
+ APPEND_TRANSITION_COMMENT("write-- = ");
+ APPEND_TRANSITION_COMMENT(WTcnt);
+ }
+
+ action(dw_dirtyWrite, "dw", desc="update write mask"){
+ peek(mandatoryQueue_in, RubyRequest) {
+ cache_entry.DataBlk.copyPartial(in_msg.WTData,in_msg.writeMask);
+ cache_entry.writeMask.orMask(in_msg.writeMask);
+ }
+ }
+ action(w_writeCache, "w", desc="write data to cache") {
+ peek(responseToTCP_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ DataBlock tmp := in_msg.DataBlk;
+ tmp.copyPartial(cache_entry.DataBlk,cache_entry.writeMask);
+ cache_entry.DataBlk := tmp;
+ }
+ }
+
+ action(mru_updateMRU, "mru", desc="Touch block for replacement policy") {
+ L1cache.setMRU(address);
+ }
+
+// action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
+// mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+// }
+
+ action(z_stall, "z", desc="stall; built-in") {
+ // built-int action
+ }
+
+ // Transitions
+ // ArrayRead/Write assumptions:
+ // All requests read Tag Array
+ // TBE allocation write the TagArray to I
+ // TBE only checked on misses
+ // Stores will also write dirty bits in the tag
+ // WriteThroughs still need to use cache entry as staging buffer for wavefront
+
+ // Stalling transitions do NOT check the tag array...and if they do,
+ // they can cause a resource stall deadlock!
+
+ transition({A}, {Load, Store, Atomic, StoreThrough}) { //TagArrayRead} {
+ z_stall;
+ }
+
+ transition({M, V, L}, Load) {TagArrayRead, DataArrayRead} {
+ l_loadDone;
+ mru_updateMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, Load) {TagArrayRead} {
+ n_issueRdBlk;
+ p_popMandatoryQueue;
+ }
+
+ transition({V, I}, Atomic, A) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ mru_updateMRU;
+ at_atomicThrough;
+ p_popMandatoryQueue;
+ }
+
+ transition({M, W}, Atomic, A) {TagArrayRead, TagArrayWrite} {
+ wt_writeThrough;
+ t_allocateTBE;
+ at_atomicThrough;
+ ic_invCache;
+ }
+
+ transition(W, Load, I) {TagArrayRead, DataArrayRead} {
+ wt_writeThrough;
+ norl_issueRdBlkOrloadDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({I}, StoreLocal, L) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocate;
+ dw_dirtyWrite;
+ s_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({L, V}, StoreLocal, L) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ dw_dirtyWrite;
+ mru_updateMRU;
+ s_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, Store, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocate;
+ dw_dirtyWrite;
+ s_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(V, Store, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ dw_dirtyWrite;
+ mru_updateMRU;
+ s_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({M, W}, Store) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ dw_dirtyWrite;
+ mru_updateMRU;
+ s_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ //M,W should not see storeThrough
+ transition(I, StoreThrough) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocate;
+ dw_dirtyWrite;
+ s_storeDone;
+ wt_writeThrough;
+ ic_invCache;
+ p_popMandatoryQueue;
+ }
+
+ transition({V,L}, StoreThrough, I) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ dw_dirtyWrite;
+ s_storeDone;
+ wt_writeThrough;
+ ic_invCache;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, TCC_Ack, V) {TagArrayRead, TagArrayWrite, DataArrayRead, DataArrayWrite} {
+ a_allocate;
+ w_writeCache;
+ l_loadDone;
+ pr_popResponseQueue;
+ }
+
+ transition(I, Bypass, I) {
+ rb_bypassDone;
+ pr_popResponseQueue;
+ }
+
+ transition(A, Bypass, I){
+ d_deallocateTBE;
+ wab_bypassDone;
+ pr_popResponseQueue;
+ }
+
+ transition(A, TCC_Ack, I) {TagArrayRead, DataArrayRead, DataArrayWrite} {
+ d_deallocateTBE;
+ a_allocate;
+ w_writeCache;
+ s_storeDone;
+ pr_popResponseQueue;
+ ic_invCache;
+ }
+
+ transition(V, TCC_Ack, V) {TagArrayRead, DataArrayRead, DataArrayWrite} {
+ w_writeCache;
+ l_loadDone;
+ pr_popResponseQueue;
+ }
+
+ transition({W, M}, TCC_Ack, M) {TagArrayRead, TagArrayWrite, DataArrayRead, DataArrayWrite} {
+ w_writeCache;
+ l_loadDone;
+ pr_popResponseQueue;
+ }
+
+ transition({I, V}, Repl, I) {TagArrayRead, TagArrayWrite} {
+ ic_invCache;
+ }
+
+ transition({A}, Repl) {TagArrayRead, TagArrayWrite} {
+ ic_invCache;
+ }
+
+ transition({W, M}, Repl, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ wt_writeThrough;
+ ic_invCache;
+ }
+
+ transition(L, Repl, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ wt_writeThrough;
+ ic_invCache;
+ }
+
+ transition({W, M}, Flush, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ sf_setFlush;
+ wt_writeThrough;
+ ic_invCache;
+ p_popMandatoryQueue;
+ }
+
+ transition({V, I, A, L},Flush) {TagArrayFlash} {
+ sf_setFlush;
+ wb_wbDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({I, V}, Evict, I) {TagArrayFlash} {
+ inv_invDone;
+ p_popMandatoryQueue;
+ ic_invCache;
+ }
+
+ transition({W, M}, Evict, W) {TagArrayFlash} {
+ inv_invDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({A, L}, Evict) {TagArrayFlash} {
+ inv_invDone;
+ p_popMandatoryQueue;
+ }
+
+ // TCC_AckWB only snoops TBE
+ transition({V, I, A, M, W, L}, TCC_AckWB) {
+ wd_wtDone;
+ wb_wbDone;
+ pr_popResponseQueue;
+ }
+}
--- /dev/null
+protocol "GPU_VIPER";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_AMD_Base-msg.sm";
+include "MOESI_AMD_Base-dir.sm";
+include "MOESI_AMD_Base-CorePair.sm";
+include "GPU_VIPER-TCP.sm";
+include "GPU_VIPER-SQC.sm";
+include "GPU_VIPER-TCC.sm";
+include "MOESI_AMD_Base-L3cache.sm";
--- /dev/null
+protocol "GPU_VIPER";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_AMD_Base-msg.sm";
+include "MOESI_AMD_Base-probeFilter.sm";
+include "MOESI_AMD_Base-CorePair.sm";
+include "GPU_VIPER-TCP.sm";
+include "GPU_VIPER-SQC.sm";
+include "GPU_VIPER-TCC.sm";
+include "MOESI_AMD_Base-L3cache.sm";
--- /dev/null
+/*
+ * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Sooraj Puthoor, Blake Hechtman
+ */
+
+/*
+ * This file is inherited from GPU_VIPER-TCC.sm and retains its structure.
+ * There are very few modifications in this file from the original VIPER TCC
+ */
+
+machine(MachineType:TCC, "TCC Cache")
+ : CacheMemory * L2cache;
+ bool WB; /*is this cache Writeback?*/
+ int regionBufferNum;
+ Cycles l2_request_latency := 50;
+ Cycles l2_response_latency := 20;
+
+ // From the TCPs or SQCs
+ MessageBuffer * requestFromTCP, network="From", virtual_network="1", ordered="true", vnet_type="request";
+ // To the Cores. TCC deals only with TCPs/SQCs. CP cores do not communicate directly with TCC.
+ MessageBuffer * responseToCore, network="To", virtual_network="3", ordered="true", vnet_type="response";
+ // From the NB
+ MessageBuffer * probeFromNB, network="From", virtual_network="0", ordered="false", vnet_type="request";
+ MessageBuffer * responseFromNB, network="From", virtual_network="2", ordered="false", vnet_type="response";
+ // To the NB
+ MessageBuffer * requestToNB, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ MessageBuffer * responseToNB, network="To", virtual_network="2", ordered="false", vnet_type="response";
+ MessageBuffer * unblockToNB, network="To", virtual_network="4", ordered="false", vnet_type="unblock";
+
+ MessageBuffer * triggerQueue, ordered="true", random="false";
+{
+ // EVENTS
+ enumeration(Event, desc="TCC Events") {
+ // Requests coming from the Cores
+ RdBlk, desc="RdBlk event";
+ WrVicBlk, desc="L1 Write Through";
+ WrVicBlkBack, desc="L1 Write Back(dirty cache)";
+ Atomic, desc="Atomic Op";
+ AtomicDone, desc="AtomicOps Complete";
+ AtomicNotDone, desc="AtomicOps not Complete";
+ Data, desc="data messgae";
+ // Coming from this TCC
+ L2_Repl, desc="L2 Replacement";
+ // Probes
+ PrbInv, desc="Invalidating probe";
+ // Coming from Memory Controller
+ WBAck, desc="writethrough ack from memory";
+ }
+
+ // STATES
+ state_declaration(State, desc="TCC State", default="TCC_State_I") {
+ M, AccessPermission:Read_Write, desc="Modified(dirty cache only)";
+ W, AccessPermission:Read_Write, desc="Written(dirty cache only)";
+ V, AccessPermission:Read_Only, desc="Valid";
+ I, AccessPermission:Invalid, desc="Invalid";
+ IV, AccessPermission:Busy, desc="Waiting for Data";
+ WI, AccessPermission:Busy, desc="Waiting on Writethrough Ack";
+ A, AccessPermission:Busy, desc="Invalid waiting on atomic Data";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+
+ // STRUCTURES
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff from memory?)";
+ DataBlock DataBlk, desc="Data for the block";
+ WriteMask writeMask, desc="Dirty byte mask";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, desc="Is the data dirty?";
+ bool Shared, desc="Victim hit by shared probe";
+ MachineID From, desc="Waiting for writeback from...";
+ NetDest Destination, desc="Data destination";
+ int numAtomics, desc="number remaining atomics";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ // FUNCTION DEFINITIONS
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ MachineID getPeer(MachineID mach) {
+ return createMachineID(MachineType:RegionBuffer, intToID(regionBufferNum));
+ }
+
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L2cache.lookup(addr));
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ return getCacheEntry(addr).DataBlk;
+ }
+
+ bool presentOrAvail(Addr addr) {
+ return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes +
+ functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return TCC_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return TCC_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(TCC_State_to_permission(state));
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayRead,addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayWrite,addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayRead,addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayWrite,addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+
+ // ** OUT_PORTS **
+
+ // Three classes of ports
+ // Class 1: downward facing network links to NB
+ out_port(requestToNB_out, CPURequestMsg, requestToNB);
+ out_port(responseToNB_out, ResponseMsg, responseToNB);
+ out_port(unblockToNB_out, UnblockMsg, unblockToNB);
+
+ // Class 2: upward facing ports to GPU cores
+ out_port(responseToCore_out, ResponseMsg, responseToCore);
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ //
+ // request queue going to NB
+ //
+
+
+// ** IN_PORTS **
+ in_port(triggerQueue_in, TiggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (tbe.numAtomics == 0) {
+ trigger(Event:AtomicDone, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:AtomicNotDone, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+
+
+ in_port(responseFromNB_in, ResponseMsg, responseFromNB) {
+ if (responseFromNB_in.isReady(clockEdge())) {
+ peek(responseFromNB_in, ResponseMsg, block_on="addr") {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:NBSysResp) {
+ if(presentOrAvail(in_msg.addr)) {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.addr);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
+ trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ // Finally handling incoming requests (from TCP) and probes (from NB).
+
+ in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ DPRINTF(RubySlicc, "machineID: %s\n", machineID);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+
+
+ in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
+ if (coreRequestNetwork_in.isReady(clockEdge())) {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ if(WB) {
+ if(presentOrAvail(in_msg.addr)) {
+ trigger(Event:WrVicBlkBack, in_msg.addr, cache_entry, tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.addr);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else {
+ trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:Atomic) {
+ trigger(Event:Atomic, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+ // BEGIN ACTIONS
+
+ action(i_invL2, "i", desc="invalidate TCC cache block") {
+ if (is_valid(cache_entry)) {
+ L2cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ // Data available at TCC. Send the DATA to TCP
+ action(sd_sendData, "sd", desc="send Shared response") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+
+ // Data was not available at TCC. So, TCC forwarded the request to
+ // directory and directory responded back with data. Now, forward the
+ // DATA to TCP and send the unblock ack back to directory.
+ action(sdr_sendDataResponse, "sdr", desc="send Shared response") {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination := tbe.Destination;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ enqueue(unblockToNB_out, UnblockMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+
+ action(rd_requestData, "r", desc="Miss in L2, pass on") {
+ if(tbe.Destination.count()==1){
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.Shared := false; // unneeded for this request
+ out_msg.MessageSize := in_msg.MessageSize;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+ }
+
+ action(w_sendResponseWBAck, "w", desc="send WB Ack") {
+ peek(responseFromNB_in, ResponseMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBAck;
+ out_msg.Destination.clear();
+ out_msg.Destination.add(in_msg.WTRequestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(swb_sendWBAck, "swb", desc="send WB Ack") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysWBAck;
+ out_msg.Destination.clear();
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(ar_sendAtomicResponse, "ar", desc="send Atomic Ack") {
+ peek(responseFromNB_in, ResponseMsg) {
+ enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:TDSysResp;
+ out_msg.Destination.add(in_msg.WTRequestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ }
+ }
+ }
+ action(sd2rb_sendDone2RegionBuffer, "sd2rb", desc="Request finished, send done ack") {
+ enqueue(unblockToNB_out, UnblockMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.DoneAck := true;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ } else {
+ out_msg.Dirty := false;
+ }
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(a_allocateBlock, "a", desc="allocate TCC block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ cache_entry.writeMask.clear();
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ if (is_invalid(tbe)) {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.Destination.clear();
+ tbe.numAtomics := 0;
+ }
+ if (coreRequestNetwork_in.isReady(clockEdge())) {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ if(in_msg.Type == CoherenceRequestType:RdBlk || in_msg.Type == CoherenceRequestType:Atomic){
+ tbe.Destination.add(in_msg.Requestor);
+ }
+ }
+ }
+ }
+
+ action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
+ tbe.Destination.clear();
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(wcb_writeCacheBlock, "wcb", desc="write data to TCC") {
+ peek(responseFromNB_in, ResponseMsg) {
+ cache_entry.DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
+ }
+ }
+
+ action(wdb_writeDirtyBytes, "wdb", desc="write data to TCC") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ cache_entry.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
+ cache_entry.writeMask.orMask(in_msg.writeMask);
+ DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
+ }
+ }
+
+ action(wt_writeThrough, "wt", desc="write through data") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := in_msg.Requestor;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:WriteThrough;
+ out_msg.Dirty := true;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.writeMask.orMask(in_msg.writeMask);
+ }
+ }
+ }
+
+ action(wb_writeBack, "wb", desc="write back data") {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:WriteThrough;
+ out_msg.Dirty := true;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.writeMask.orMask(cache_entry.writeMask);
+ }
+ }
+
+ action(at_atomicThrough, "at", desc="write back data") {
+ peek(coreRequestNetwork_in, CPURequestMsg) {
+ enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := in_msg.Requestor;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Data;
+ out_msg.Type := CoherenceRequestType:Atomic;
+ out_msg.Dirty := true;
+ out_msg.writeMask.orMask(in_msg.writeMask);
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(responseToNB_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
+ L2cache.setMRU(address);
+ }
+
+ action(p_popRequestQueue, "p", desc="pop request queue") {
+ coreRequestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="pop response queue") {
+ responseFromNB_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+ action(zz_recycleRequestQueue, "z", desc="stall"){
+ coreRequestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+
+ action(ina_incrementNumAtomics, "ina", desc="inc num atomics") {
+ tbe.numAtomics := tbe.numAtomics + 1;
+ }
+
+
+ action(dna_decrementNumAtomics, "dna", desc="dec num atomics") {
+ tbe.numAtomics := tbe.numAtomics - 1;
+ if (tbe.numAtomics==0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AtomicDone;
+ }
+ }
+ }
+
+ action(ptr_popTriggerQueue, "ptr", desc="pop Trigger") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ // END ACTIONS
+
+ // BEGIN TRANSITIONS
+ // transitions from base
+ // Assumptions for ArrayRead/Write
+ // TBE checked before tags
+ // Data Read/Write requires Tag Read
+
+ transition(WI, {RdBlk, WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
+ zz_recycleRequestQueue;
+ }
+ transition(A, {RdBlk, WrVicBlk, WrVicBlkBack}) {TagArrayRead} {
+ zz_recycleRequestQueue;
+ }
+ transition(IV, {WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
+ zz_recycleRequestQueue;
+ }
+ transition({M, V}, RdBlk) {TagArrayRead, DataArrayRead} {
+ sd_sendData;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+ transition(W, RdBlk, WI) {TagArrayRead, DataArrayRead} {
+ t_allocateTBE;
+ wb_writeBack;
+ }
+
+ transition(I, RdBlk, IV) {TagArrayRead} {
+ t_allocateTBE;
+ rd_requestData;
+ p_popRequestQueue;
+ }
+
+ transition(IV, RdBlk) {
+ t_allocateTBE;
+ rd_requestData;
+ p_popRequestQueue;
+ }
+
+ transition({V, I},Atomic, A) {TagArrayRead} {
+ i_invL2;
+ t_allocateTBE;
+ at_atomicThrough;
+ ina_incrementNumAtomics;
+ p_popRequestQueue;
+ }
+
+ transition(A, Atomic) {
+ at_atomicThrough;
+ ina_incrementNumAtomics;
+ p_popRequestQueue;
+ }
+
+ transition({M, W}, Atomic, WI) {TagArrayRead} {
+ t_allocateTBE;
+ wb_writeBack;
+ }
+
+ // Cahceblock stays in I state which implies
+ // this TCC is a write-no-allocate cache
+ transition(I, WrVicBlk) {TagArrayRead} {
+ wt_writeThrough;
+ p_popRequestQueue;
+ }
+
+ transition(V, WrVicBlk) {TagArrayRead, DataArrayWrite} {
+ ut_updateTag;
+ wdb_writeDirtyBytes;
+ wt_writeThrough;
+ p_popRequestQueue;
+ }
+
+ transition({V, M}, WrVicBlkBack, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ ut_updateTag;
+ swb_sendWBAck;
+ wdb_writeDirtyBytes;
+ p_popRequestQueue;
+ }
+
+ transition(W, WrVicBlkBack) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ ut_updateTag;
+ swb_sendWBAck;
+ wdb_writeDirtyBytes;
+ p_popRequestQueue;
+ }
+
+ transition(I, WrVicBlkBack, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocateBlock;
+ ut_updateTag;
+ swb_sendWBAck;
+ wdb_writeDirtyBytes;
+ p_popRequestQueue;
+ }
+
+ transition({W, M}, L2_Repl, WI) {TagArrayRead, DataArrayRead} {
+ t_allocateTBE;
+ wb_writeBack;
+ i_invL2;
+ }
+
+ transition({I, V}, L2_Repl, I) {TagArrayRead, TagArrayWrite} {
+ i_invL2;
+ }
+
+ transition({A, IV, WI}, L2_Repl) {
+ i_invL2;
+ }
+
+ transition({I, V}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(M, PrbInv, W) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(W, PrbInv) {TagArrayRead} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition({A, IV, WI}, PrbInv) {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(IV, Data, V) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocateBlock;
+ ut_updateTag;
+ wcb_writeCacheBlock;
+ sdr_sendDataResponse;
+ sd2rb_sendDone2RegionBuffer;
+ pr_popResponseQueue;
+ dt_deallocateTBE;
+ }
+
+ transition(A, Data) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
+ a_allocateBlock;
+ ar_sendAtomicResponse;
+ sd2rb_sendDone2RegionBuffer;
+ dna_decrementNumAtomics;
+ pr_popResponseQueue;
+ }
+
+ transition(A, AtomicDone, I) {TagArrayRead, TagArrayWrite} {
+ dt_deallocateTBE;
+ ptr_popTriggerQueue;
+ }
+
+ transition(A, AtomicNotDone) {TagArrayRead} {
+ ptr_popTriggerQueue;
+ }
+
+ //M,W should not see WBAck as the cache is in WB mode
+ //WBAcks do not need to check tags
+ transition({I, V, IV, A}, WBAck) {
+ w_sendResponseWBAck;
+ sd2rb_sendDone2RegionBuffer;
+ pr_popResponseQueue;
+ }
+
+ transition(WI, WBAck,I) {
+ sd2rb_sendDone2RegionBuffer;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+}
--- /dev/null
+protocol "GPU_VIPER_Region";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_AMD_Base-msg.sm";
+include "MOESI_AMD_Base-Region-CorePair.sm";
+include "MOESI_AMD_Base-L3cache.sm";
+include "MOESI_AMD_Base-Region-dir.sm";
+include "GPU_VIPER_Region-TCC.sm";
+include "GPU_VIPER-TCP.sm";
+include "GPU_VIPER-SQC.sm";
+include "MOESI_AMD_Base-RegionDir.sm";
+include "MOESI_AMD_Base-RegionBuffer.sm";
--- /dev/null
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Authors: Brad Beckmann
+ * Tushar Krishna
+ */
+
+
+machine(MachineType:L1Cache, "Garnet_standalone L1 Cache")
+ : Sequencer * sequencer;
+ Cycles issue_latency := 2;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="0",
+ vnet_type = "request";
+ MessageBuffer * forwardFromCache, network="To", virtual_network="1",
+ vnet_type = "forward";
+ MessageBuffer * responseFromCache, network="To", virtual_network="2",
+ vnet_type = "response";
+
+ MessageBuffer * mandatoryQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
+ I, AccessPermission:Invalid, desc="Not Present/Invalid";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // From processor
+ Request, desc="Request from Garnet_standalone";
+ Forward, desc="Forward from Garnet_standalone";
+ Response, desc="Response from Garnet_standalone";
+ }
+
+ // STRUCTURE DEFINITIONS
+ DataBlock dummyData;
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="Data in the block";
+ }
+
+ // FUNCTIONS
+ Tick clockEdge();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ // cpu/testers/networktest/networktest.cc generates packets of the type
+ // ReadReq, INST_FETCH, and WriteReq.
+ // These are converted to LD, IFETCH and ST by mem/ruby/system/RubyPort.cc.
+ // These are then sent to the sequencer, which sends them here.
+ // Garnet_standalone-cache.sm tags LD, IFETCH and ST as Request, Forward,
+ // and Response Events respectively, which are then injected into
+ // virtual networks 0, 1 and 2 respectively.
+ // This models traffic of different types within the network.
+ //
+ // Note that requests and forwards are MessageSizeType:Control,
+ // while responses are MessageSizeType:Data.
+ //
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Request;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Forward;
+ } else if (type == RubyRequestType:ST) {
+ return Event:Response;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+
+ State getState(Entry cache_entry, Addr addr) {
+ return State:I;
+ }
+
+ void setState(Entry cache_entry, Addr addr, State state) {
+
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ }
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ return OOD;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional write.");
+ }
+
+ // NETWORK PORTS
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(forwardNetwork_out, RequestMsg, forwardFromCache);
+ out_port(responseNetwork_out, RequestMsg, responseFromCache);
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest) {
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
+ }
+ }
+ }
+
+ // ACTIONS
+
+ // The destination directory of the packets is embedded in the address
+ // map_Address_to_Directory is used to retrieve it.
+
+ action(a_issueRequest, "a", desc="Issue a request") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:MSG;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+
+ // To send broadcasts in vnet0 (to emulate broadcast-based protocols),
+ // replace the above line by the following:
+ // out_msg.Destination := broadcast(MachineType:Directory);
+
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(b_issueForward, "b", desc="Issue a forward") {
+ enqueue(forwardNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:MSG;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(c_issueResponse, "c", desc="Issue a response") {
+ enqueue(responseNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:MSG;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
+ sequencer.readCallback(address, dummyData);
+ }
+
+ action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
+ sequencer.writeCallback(address, dummyData);
+ }
+
+
+ // TRANSITIONS
+
+ // sequencer hit call back is performed after injecting the packets.
+ // The goal of the Garnet_standalone protocol is only to inject packets into
+ // the network, not to keep track of them via TBEs.
+
+ transition(I, Response) {
+ s_store_hit;
+ c_issueResponse;
+ m_popMandatoryQueue;
+ }
+
+ transition(I, Request) {
+ r_load_hit;
+ a_issueRequest;
+ m_popMandatoryQueue;
+ }
+ transition(I, Forward) {
+ r_load_hit;
+ b_issueForward;
+ m_popMandatoryQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Authors: Brad Beckmann
+ * Tushar Krishna
+ */
+
+
+machine(MachineType:Directory, "Garnet_standalone Directory")
+ : MessageBuffer * requestToDir, network="From", virtual_network="0",
+ vnet_type = "request";
+ MessageBuffer * forwardToDir, network="From", virtual_network="1",
+ vnet_type = "forward";
+ MessageBuffer * responseToDir, network="From", virtual_network="2",
+ vnet_type = "response";
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, AccessPermission:Invalid, desc="Invalid";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ // processor requests
+ Receive_Request, desc="Receive Message";
+ Receive_Forward, desc="Receive Message";
+ Receive_Response, desc="Receive Message";
+ }
+
+ // TYPES
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // ** FUNCTIONS **
+ Tick clockEdge();
+
+ State getState(Addr addr) {
+ return State:I;
+ }
+
+ void setState(Addr addr, State state) {
+
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("Garnet_standalone does not support functional write.");
+ }
+
+ // ** IN_PORTS **
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady(clockEdge())) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:MSG) {
+ trigger(Event:Receive_Request, in_msg.addr);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+ in_port(forwardQueue_in, RequestMsg, forwardToDir) {
+ if (forwardQueue_in.isReady(clockEdge())) {
+ peek(forwardQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:MSG) {
+ trigger(Event:Receive_Forward, in_msg.addr);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+ in_port(responseQueue_in, RequestMsg, responseToDir) {
+ if (responseQueue_in.isReady(clockEdge())) {
+ peek(responseQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:MSG) {
+ trigger(Event:Receive_Response, in_msg.addr);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue(clockEdge());
+ }
+
+ action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
+ forwardQueue_in.dequeue(clockEdge());
+ }
+
+ action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
+ responseQueue_in.dequeue(clockEdge());
+ }
+
+ // TRANSITIONS
+
+ // The directory simply drops the received packets.
+ // The goal of Garnet_standalone is only to track network stats.
+
+ transition(I, Receive_Request) {
+ i_popIncomingRequestQueue;
+ }
+ transition(I, Receive_Forward) {
+ f_popIncomingForwardQueue;
+ }
+ transition(I, Receive_Response) {
+ r_popIncomingResponseQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ MSG, desc="Message";
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="data for the cache line";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ error("Garnet_standalone does not support functional accesses!");
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ error("Garnet_standalone does not support functional accesses!");
+ }
+}
--- /dev/null
+protocol "Garnet_standalone";
+include "RubySlicc_interfaces.slicc";
+include "Garnet_standalone-msg.sm";
+include "Garnet_standalone-cache.sm";
+include "Garnet_standalone-dir.sm";
--- /dev/null
+/*
+ * Copyright (c) 2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L0Cache, "MESI Directory L0 Cache")
+ : Sequencer * sequencer;
+ CacheMemory * Icache;
+ CacheMemory * Dcache;
+ Cycles request_latency := 2;
+ Cycles response_latency := 2;
+ bool send_evictions;
+
+ // From this node's L0 cache to the network
+ MessageBuffer * bufferToL1, network="To";
+
+ // To this node's L0 cache FROM the network
+ MessageBuffer * bufferFromL1, network="From";
+
+ // Message queue between this controller and the processor
+ MessageBuffer * mandatoryQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L0Cache_State_I") {
+ // Base states
+
+ // The cache entry has not been allocated.
+ I, AccessPermission:Invalid;
+
+ // The cache entry is in shared mode. The processor can read this entry
+ // but it cannot write to it.
+ S, AccessPermission:Read_Only;
+
+ // The cache entry is in exclusive mode. The processor can read this
+ // entry. It can write to this entry without informing the directory.
+ // On writing, the entry moves to M state.
+ E, AccessPermission:Read_Only;
+
+ // The processor has read and write permissions on this entry.
+ M, AccessPermission:Read_Write;
+
+ // Transient States
+
+ // The cache controller has requested an instruction. It will be stored
+ // in the shared state so that the processor can read it.
+ Inst_IS, AccessPermission:Busy;
+
+ // The cache controller has requested that this entry be fetched in
+ // shared state so that the processor can read it.
+ IS, AccessPermission:Busy;
+
+ // The cache controller has requested that this entry be fetched in
+ // modify state so that the processor can read/write it.
+ IM, AccessPermission:Busy;
+
+ // The cache controller had read permission over the entry. But now the
+ // processor needs to write to it. So, the controller has requested for
+ // write permission.
+ SM, AccessPermission:Read_Only;
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // L0 events
+ Load, desc="Load request from the home processor";
+ Ifetch, desc="I-fetch request from the home processor";
+ Store, desc="Store request from the home processor";
+
+ Inv, desc="Invalidate request from L2 bank";
+
+ // internal generated request
+ L0_Replacement, desc="L0 Replacement", format="!r";
+
+ // other requests
+ Fwd_GETX, desc="GETX from other processor";
+ Fwd_GETS, desc="GETS from other processor";
+ Fwd_GET_INSTR, desc="GET_INSTR from other processor";
+
+ Data, desc="Data for processor";
+ Data_Exclusive, desc="Data for processor";
+ Data_Stale, desc="Data for processor, but not for storage";
+
+ Ack, desc="Ack for processor";
+ Ack_all, desc="Last ack for processor";
+
+ WB_Ack, desc="Ack for replacement";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Addr addr, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="data is dirty";
+ int pendingAcks, default="0", desc="number of pending acks";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ void set_cache_entry(AbstractCacheEntry a);
+ void unset_cache_entry();
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpBuffers(Addr a);
+ void wakeUpAllBuffers(Addr a);
+ void profileMsgDelay(int virtualNetworkType, Cycles c);
+
+ // inclusive cache returns L0 entries only
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
+ if(is_valid(Dcache_entry)) {
+ return Dcache_entry;
+ }
+
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
+ return Icache_entry;
+ }
+
+ Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
+ return Dcache_entry;
+ }
+
+ Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
+ return Icache_entry;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
+
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
+
+ // MUST CHANGE
+ if(is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
+ return L0Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(cache_entry.CacheState));
+ return L0Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L0Cache_State_to_permission(state));
+ }
+ }
+
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Load;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ int getPendingAcks(TBE tbe) {
+ return tbe.pendingAcks;
+ }
+
+ out_port(requestNetwork_out, CoherenceMsg, bufferToL1);
+
+ // Messages for this L0 cache from the L1 cache
+ in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
+ if (messgeBuffer_in.isReady(clockEdge())) {
+ peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
+ assert(in_msg.Dest == machineID);
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
+ trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
+ } else if(in_msg.Class == CoherenceClass:DATA) {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ } else if(in_msg.Class == CoherenceClass:STALE_DATA) {
+ trigger(Event:Data_Stale, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Class == CoherenceClass:ACK) {
+ trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Class == CoherenceClass:WB_ACK) {
+ trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Class == CoherenceClass:INV) {
+ trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Class == CoherenceClass:GETX ||
+ in_msg.Class == CoherenceClass:UPGRADE) {
+ // upgrade transforms to GETX due to race
+ trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Class == CoherenceClass:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Class == CoherenceClass:GET_INSTR) {
+ trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue betweens Node's CPU and it's L0 caches
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
+ if (is_valid(Icache_entry)) {
+ // The tag matches for the L0, so the L0 asks the L2 for it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ Icache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+
+ // Check to see if it is in the OTHER L0
+ Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
+ if (is_valid(Dcache_entry)) {
+ // The block is in the wrong L0, put the request on the queue to the shared L2
+ trigger(Event:L0_Replacement, in_msg.LineAddress,
+ Dcache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ if (Icache.cacheAvail(in_msg.LineAddress)) {
+ // L0 does't have the line, but we have space for it
+ // in the L0 so let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ Icache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L0, so we need to make room in the L0
+ // Check if the line we want to evict is not locked
+ Addr addr := Icache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, addr);
+ trigger(Event:L0_Replacement, addr,
+ getICacheEntry(addr),
+ TBEs[addr]);
+ }
+ }
+ } else {
+
+ // *** DATA ACCESS ***
+ Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
+ if (is_valid(Dcache_entry)) {
+ // The tag matches for the L0, so the L0 ask the L1 for it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ Dcache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+
+ // Check to see if it is in the OTHER L0
+ Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
+ if (is_valid(Icache_entry)) {
+ // The block is in the wrong L0, put the request on the queue to the private L1
+ trigger(Event:L0_Replacement, in_msg.LineAddress,
+ Icache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ if (Dcache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it
+ // in the L0 let's see if the L1 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ Dcache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L1, so we need to make room in the L0
+ // Check if the line we want to evict is not locked
+ Addr addr := Dcache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, addr);
+ trigger(Event:L0_Replacement, addr,
+ getDCacheEntry(addr),
+ TBEs[addr]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:GETS;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L1Cache, version);
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Dest);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:GETX;
+ out_msg.Sender := machineID;
+ DPRINTF(RubySlicc, "%s\n", machineID);
+ out_msg.Dest := createMachineID(MachineType:L1Cache, version);
+
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Dest);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(c_issueUPGRADE, "c", desc="Issue GETX") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:UPGRADE;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L1Cache, version);
+
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Dest);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(f_sendDataToL1, "f", desc="send data to the L2 cache") {
+ enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:INV_DATA;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L1Cache, version);
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ cache_entry.Dirty := false;
+ }
+
+ action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
+ peek(messgeBuffer_in, CoherenceMsg) {
+ enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:INV_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L1Cache, version);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(g_issuePUTX, "g", desc="send data to the L2 cache") {
+ enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:PUTX;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender:= machineID;
+ out_msg.Dest := createMachineID(MachineType:L1Cache, version);
+
+ if (cache_entry.Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ Dcache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk);
+ }
+
+ action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ Icache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk);
+ }
+
+ action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ Dcache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, true);
+ }
+
+ action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ Icache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, true);
+ }
+
+ action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ Dcache.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk);
+ cache_entry.Dirty := true;
+ }
+
+ action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ Dcache.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk, true);
+ cache_entry.Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.DataBlk := cache_entry.DataBlk;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(l_popRequestQueue, "l",
+ desc="Pop incoming request queue and profile the delay within this virtual network") {
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
+ }
+
+ action(o_popIncomingResponseQueue, "o",
+ desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(messgeBuffer_in, CoherenceMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(u_writeInstToCache, "ui", desc="Write data to cache") {
+ peek(messgeBuffer_in, CoherenceMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(ff_deallocateCacheBlock, "\f",
+ desc="Deallocate L1 cache block.") {
+ if (Dcache.isTagPresent(address)) {
+ Dcache.deallocate(address);
+ } else {
+ Icache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(oo_allocateDCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(Dcache.allocate(address, new Entry));
+ }
+ }
+
+ action(pp_allocateICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(Icache.allocate(address, new Entry));
+ }
+ }
+
+ action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle cpu request queue") {
+ stall_and_wait(mandatoryQueue_in, address);
+ }
+
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpAllBuffers(address);
+ }
+
+ action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
+ ++Icache.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand miss") {
+ ++Icache.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
+ ++Dcache.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand miss") {
+ ++Dcache.demand_hits;
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Replacement/WriteBack from transient states
+ transition({Inst_IS, IS, IM, SM}, {Load, Ifetch, Store, L0_Replacement}) {
+ z_stallAndWaitMandatoryQueue;
+ }
+
+ // Transitions from Idle
+ transition(I, Load, IS) {
+ oo_allocateDCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch, Inst_IS) {
+ pp_allocateICacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileInstMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store, IM) {
+ oo_allocateDCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({I, IS, IM, Inst_IS}, Inv) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(SM, Inv, IM) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition({S,E,M}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S,E,M}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ c_issueUPGRADE;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L0_Replacement, I) {
+ forward_eviction_to_cpu;
+ ff_deallocateCacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ forward_eviction_to_cpu;
+ fi_sendInvAck;
+ ff_deallocateCacheBlock;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Exclusive
+ transition({E,M}, Store, M) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, L0_Replacement, I) {
+ forward_eviction_to_cpu;
+ g_issuePUTX;
+ ff_deallocateCacheBlock;
+ }
+
+ transition(E, {Inv, Fwd_GETX}, I) {
+ // don't send data
+ forward_eviction_to_cpu;
+ fi_sendInvAck;
+ ff_deallocateCacheBlock;
+ l_popRequestQueue;
+ }
+
+ transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ f_sendDataToL1;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Modified
+ transition(M, L0_Replacement, I) {
+ forward_eviction_to_cpu;
+ g_issuePUTX;
+ ff_deallocateCacheBlock;
+ }
+
+ transition(M, {Inv, Fwd_GETX}, I) {
+ forward_eviction_to_cpu;
+ f_sendDataToL1;
+ ff_deallocateCacheBlock;
+ l_popRequestQueue;
+ }
+
+ transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ f_sendDataToL1;
+ l_popRequestQueue;
+ }
+
+ transition(IS, Data, S) {
+ u_writeDataToCache;
+ hx_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Data_Exclusive, E) {
+ u_writeDataToCache;
+ hx_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Data_Stale, I) {
+ u_writeDataToCache;
+ hx_load_hit;
+ s_deallocateTBE;
+ ff_deallocateCacheBlock;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(Inst_IS, Data, S) {
+ u_writeInstToCache;
+ hx_ifetch_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(Inst_IS, Data_Exclusive, E) {
+ u_writeInstToCache;
+ hx_ifetch_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(Inst_IS, Data_Stale, I) {
+ u_writeInstToCache;
+ hx_ifetch_hit;
+ s_deallocateTBE;
+ ff_deallocateCacheBlock;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({IM,SM}, Data_Exclusive, M) {
+ u_writeDataToCache;
+ hhx_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
+ : CacheMemory * cache;
+ int l2_select_num_bits;
+ Cycles l1_request_latency := 2;
+ Cycles l1_response_latency := 2;
+ Cycles to_l2_latency := 1;
+
+ // Message Buffers between the L1 and the L0 Cache
+ // From the L1 cache to the L0 cache
+ MessageBuffer * bufferToL0, network="To";
+
+ // From the L0 cache to the L1 cache
+ MessageBuffer * bufferFromL0, network="From";
+
+ // Message queue from this L1 cache TO the network / L2
+ MessageBuffer * requestToL2, network="To", virtual_network="0",
+ vnet_type="request";
+
+ MessageBuffer * responseToL2, network="To", virtual_network="1",
+ vnet_type="response";
+ MessageBuffer * unblockToL2, network="To", virtual_network="2",
+ vnet_type="unblock";
+
+ // To this L1 cache FROM the network / L2
+ MessageBuffer * requestFromL2, network="From", virtual_network="2",
+ vnet_type="request";
+ MessageBuffer * responseFromL2, network="From", virtual_network="1",
+ vnet_type="response";
+
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
+ S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
+ SS, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
+ E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
+ EE, AccessPermission:Read_Write, desc="a L1 cache entry Exclusive";
+ M, AccessPermission:Maybe_Stale, desc="a L1 cache entry Modified", format="!b";
+ MM, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
+
+ // Transient States
+ IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
+ IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
+ SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
+ IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
+ M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
+ SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
+
+ // For all of the following states, invalidate
+ // message has been sent to L0 cache. The response
+ // from the L0 cache has not been seen yet.
+ S_IL0, AccessPermission:Busy;
+ E_IL0, AccessPermission:Busy;
+ M_IL0, AccessPermission:Busy;
+ MM_IL0, AccessPermission:Read_Write;
+ SM_IL0, AccessPermission:Busy;
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // Requests from the L0 cache
+ Load, desc="Load request";
+ Store, desc="Store request";
+ WriteBack, desc="Writeback request";
+
+ // Responses from the L0 Cache
+ // L0 cache received the invalidation message
+ // and has sent the data.
+ L0_DataAck;
+
+ Inv, desc="Invalidate request from L2 bank";
+
+ // internal generated request
+ // Invalidate the line in L0 due to own requirements
+ L0_Invalidate_Own;
+ // Invalidate the line in L0 due to some other cache's requirements
+ L0_Invalidate_Else;
+ // Invalidate the line in the cache due to some one else / space needs.
+ L1_Replacement;
+
+ // other requests
+ Fwd_GETX, desc="GETX from other processor";
+ Fwd_GETS, desc="GETS from other processor";
+
+ Data, desc="Data for processor";
+ Data_Exclusive, desc="Data for processor";
+ DataS_fromL1, desc="data for GETS request, need to unblock directory";
+ Data_all_Acks, desc="Data for processor, all acks";
+
+ L0_Ack, desc="Ack for processor";
+ Ack, desc="Ack for processor";
+ Ack_all, desc="Last ack for processor";
+
+ WB_Ack, desc="Ack for replacement";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Addr addr, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="data is dirty";
+ int pendingAcks, default="0", desc="number of pending acks";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+
+ int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ void set_cache_entry(AbstractCacheEntry a);
+ void unset_cache_entry();
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpBuffers(Addr a);
+ void wakeUpAllBuffers(Addr a);
+ void profileMsgDelay(int virtualNetworkType, Cycles c);
+
+ // inclusive cache returns L1 entries only
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
+ return cache_entry;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ // MUST CHANGE
+ if(is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
+ return L1Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
+ return L1Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L1Cache_State_to_permission(state));
+ }
+ }
+
+ Event mandatory_request_type_to_event(CoherenceClass type) {
+ if (type == CoherenceClass:GETS) {
+ return Event:Load;
+ } else if ((type == CoherenceClass:GETX) ||
+ (type == CoherenceClass:UPGRADE)) {
+ return Event:Store;
+ } else if (type == CoherenceClass:PUTX) {
+ return Event:WriteBack;
+ } else {
+ error("Invalid RequestType");
+ }
+ }
+
+ int getPendingAcks(TBE tbe) {
+ return tbe.pendingAcks;
+ }
+
+ bool inL0Cache(State state) {
+ if (state == State:S || state == State:E || state == State:M ||
+ state == State:S_IL0 || state == State:E_IL0 ||
+ state == State:M_IL0 || state == State:SM_IL0) {
+ return true;
+ }
+
+ return false;
+ }
+
+ out_port(requestNetwork_out, RequestMsg, requestToL2);
+ out_port(responseNetwork_out, ResponseMsg, responseToL2);
+ out_port(unblockNetwork_out, ResponseMsg, unblockToL2);
+ out_port(bufferToL0_out, CoherenceMsg, bufferToL0);
+
+ // Response From the L2 Cache to this L1 cache
+ in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
+ } else if(in_msg.Type == CoherenceResponseType:DATA) {
+ if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
+ getState(tbe, cache_entry, in_msg.addr) == State:IS_I) &&
+ machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+
+ trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
+
+ } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
+ trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
+ trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
+ trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Invalid L1 response type");
+ }
+ }
+ }
+ }
+
+ // Request to this L1 cache from the shared L2
+ in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
+ if(requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if (in_msg.Type == CoherenceRequestType:INV) {
+ if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
+ trigger(Event:L0_Invalidate_Else, in_msg.addr,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETX ||
+ in_msg.Type == CoherenceRequestType:UPGRADE) {
+ if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
+ trigger(Event:L0_Invalidate_Else, in_msg.addr,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
+ trigger(Event:L0_Invalidate_Else, in_msg.addr,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Requests to this L1 cache from the L0 cache.
+ in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
+ if (messageBufferFromL0_in.isReady(clockEdge())) {
+ peek(messageBufferFromL0_in, CoherenceMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if(in_msg.Class == CoherenceClass:INV_DATA) {
+ trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Class == CoherenceClass:INV_ACK) {
+ trigger(Event:L0_Ack, in_msg.addr, cache_entry, tbe);
+ } else {
+ if (is_valid(cache_entry)) {
+ trigger(mandatory_request_type_to_event(in_msg.Class),
+ in_msg.addr, cache_entry, tbe);
+ } else {
+ if (cache.cacheAvail(in_msg.addr)) {
+ // L1 does't have the line, but we have space for it
+ // in the L1 let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Class),
+ in_msg.addr, cache_entry, tbe);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ Entry victim_entry :=
+ getCacheEntry(cache.cacheProbe(in_msg.addr));
+ TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
+
+ if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
+ trigger(Event:L0_Invalidate_Own,
+ cache.cacheProbe(in_msg.addr),
+ victim_entry, victim_tbe);
+ } else {
+ trigger(Event:L1_Replacement,
+ cache.cacheProbe(in_msg.addr),
+ victim_entry, victim_tbe);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(messageBufferFromL0_in, CoherenceMsg) {
+ enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(messageBufferFromL0_in, CoherenceMsg) {
+ enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ DPRINTF(RubySlicc, "%s\n", machineID);
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(c_issueUPGRADE, "c", desc="Issue GETX") {
+ peek(messageBufferFromL0_in, CoherenceMsg) {
+ enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:UPGRADE;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(d_sendDataToRequestor, "d", desc="send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.AckCount := 1;
+ }
+ }
+ }
+
+ action(forward_eviction_to_L0, "\cc", desc="sends eviction information to the processor") {
+ enqueue(bufferToL0_out, CoherenceMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:INV;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L0Cache, version);
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(g_issuePUTX, "g", desc="send data to the L2 cache") {
+ enqueue(requestNetwork_out, RequestMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Requestor:= machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ if (cache_entry.Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%#x\n", address);
+ }
+ }
+
+ action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, clusterID));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%#x\n", address);
+
+ }
+ }
+
+ action(h_data_to_l0, "h", desc="If not prefetch, send data to the L0 cache.") {
+ enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L0Cache, version);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(hh_xdata_to_l0, "\h", desc="If not prefetch, notify sequencer that store completed.") {
+ enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L0Cache, version);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ //cache_entry.Dirty := true;
+ }
+ }
+
+ action(h_stale_data_to_l0, "hs", desc="If not prefetch, send data to the L0 cache.") {
+ enqueue(bufferToL0_out, CoherenceMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+
+ out_msg.addr := address;
+ out_msg.Class := CoherenceClass:STALE_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Dest := createMachineID(MachineType:L0Cache, version);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.DataBlk := cache_entry.DataBlk;
+ }
+
+ action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
+ messageBufferFromL0_in.dequeue(clockEdge());
+ }
+
+ action(l_popL2RequestQueue, "l",
+ desc="Pop incoming request queue and profile the delay within this virtual network") {
+ Tick delay := requestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
+ }
+
+ action(o_popL2ResponseQueue, "o",
+ desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(u_writeDataFromL0Request, "ureql0", desc="Write data to cache") {
+ peek(messageBufferFromL0_in, CoherenceMsg) {
+ assert(is_valid(cache_entry));
+ if (in_msg.Dirty) {
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(u_writeDataFromL2Response, "uresl2", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(u_writeDataFromL0Response, "uresl0", desc="Write data to cache") {
+ peek(messageBufferFromL0_in, CoherenceMsg) {
+ assert(is_valid(cache_entry));
+ if (in_msg.Dirty) {
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(q_updateAckCount, "q", desc="Update ack count") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
+ }
+ }
+
+ action(ff_deallocateCacheBlock, "\f",
+ desc="Deallocate L1 cache block.") {
+ if (cache.isTagPresent(address)) {
+ cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(oo_allocateCacheBlock, "\o", desc="Set cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(cache.allocate(address, new Entry));
+ }
+ }
+
+ action(z0_stallAndWaitL0Queue, "\z0", desc="recycle L0 request queue") {
+ stall_and_wait(messageBufferFromL0_in, address);
+ }
+
+ action(z2_stallAndWaitL2Queue, "\z2", desc="recycle L2 request queue") {
+ stall_and_wait(requestNetwork_in, address);
+ }
+
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpAllBuffers(address);
+ }
+
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++cache.demand_misses;
+ }
+
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++cache.demand_hits;
+ }
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Replacement/WriteBack from transient states
+ transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, S_IL0, M_IL0, E_IL0, MM_IL0},
+ {Load, Store, L1_Replacement}) {
+ z0_stallAndWaitL0Queue;
+ }
+
+ transition(I, Load, IS) {
+ oo_allocateCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileMiss;
+ k_popL0RequestQueue;
+ }
+
+ transition(I, Store, IM) {
+ oo_allocateCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileMiss;
+ k_popL0RequestQueue;
+ }
+
+ transition(I, Inv) {
+ fi_sendInvAck;
+ l_popL2RequestQueue;
+ }
+
+ // Transitions from Shared
+ transition({S,SS}, Load, S) {
+ h_data_to_l0;
+ uu_profileHit;
+ k_popL0RequestQueue;
+ }
+
+ transition(EE, Load, E) {
+ hh_xdata_to_l0;
+ uu_profileHit;
+ k_popL0RequestQueue;
+ }
+
+ transition(MM, Load, M) {
+ hh_xdata_to_l0;
+ uu_profileHit;
+ k_popL0RequestQueue;
+ }
+
+ transition({S,SS}, Store, SM) {
+ i_allocateTBE;
+ c_issueUPGRADE;
+ uu_profileMiss;
+ k_popL0RequestQueue;
+ }
+
+ transition(SS, L1_Replacement, I) {
+ ff_deallocateCacheBlock;
+ }
+
+ transition(S, {L0_Invalidate_Own, L0_Invalidate_Else}, S_IL0) {
+ forward_eviction_to_L0;
+ }
+
+ transition(SS, Inv, I) {
+ fi_sendInvAck;
+ ff_deallocateCacheBlock;
+ l_popL2RequestQueue;
+ }
+
+ // Transitions from Exclusive
+
+ transition({EE,MM}, Store, M) {
+ hh_xdata_to_l0;
+ uu_profileHit;
+ k_popL0RequestQueue;
+ }
+
+ transition(EE, L1_Replacement, M_I) {
+ // silent E replacement??
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateCacheBlock;
+ }
+
+ transition(EE, Inv, I) {
+ // don't send data
+ fi_sendInvAck;
+ ff_deallocateCacheBlock;
+ l_popL2RequestQueue;
+ }
+
+ transition(EE, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ ff_deallocateCacheBlock;
+ l_popL2RequestQueue;
+ }
+
+ transition(EE, Fwd_GETS, SS) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popL2RequestQueue;
+ }
+
+ transition(E, {L0_Invalidate_Own, L0_Invalidate_Else}, E_IL0) {
+ forward_eviction_to_L0;
+ }
+
+ // Transitions from Modified
+ transition(MM, L1_Replacement, M_I) {
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateCacheBlock;
+ }
+
+ transition({M,E}, WriteBack, MM) {
+ u_writeDataFromL0Request;
+ k_popL0RequestQueue;
+ }
+
+ transition(M_I, WB_Ack, I) {
+ s_deallocateTBE;
+ o_popL2ResponseQueue;
+ ff_deallocateCacheBlock;
+ kd_wakeUpDependents;
+ }
+
+ transition(MM, Inv, I) {
+ f_sendDataToL2;
+ ff_deallocateCacheBlock;
+ l_popL2RequestQueue;
+ }
+
+ transition(M_I, Inv, SINK_WB_ACK) {
+ ft_sendDataToL2_fromTBE;
+ l_popL2RequestQueue;
+ }
+
+ transition(MM, Fwd_GETX, I) {
+ d_sendDataToRequestor;
+ ff_deallocateCacheBlock;
+ l_popL2RequestQueue;
+ }
+
+ transition(MM, Fwd_GETS, SS) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popL2RequestQueue;
+ }
+
+ transition(M, {L0_Invalidate_Own, L0_Invalidate_Else}, M_IL0) {
+ forward_eviction_to_L0;
+ }
+
+ transition(M_I, Fwd_GETX, SINK_WB_ACK) {
+ dt_sendDataToRequestor_fromTBE;
+ l_popL2RequestQueue;
+ }
+
+ transition(M_I, Fwd_GETS, SINK_WB_ACK) {
+ dt_sendDataToRequestor_fromTBE;
+ d2t_sendDataToL2_fromTBE;
+ l_popL2RequestQueue;
+ }
+
+ // Transitions from IS
+ transition({IS,IS_I}, Inv, IS_I) {
+ fi_sendInvAck;
+ l_popL2RequestQueue;
+ }
+
+ transition(IS, Data_all_Acks, S) {
+ u_writeDataFromL2Response;
+ h_data_to_l0;
+ s_deallocateTBE;
+ o_popL2ResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS_I, Data_all_Acks, I) {
+ u_writeDataFromL2Response;
+ h_stale_data_to_l0;
+ s_deallocateTBE;
+ ff_deallocateCacheBlock;
+ o_popL2ResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, DataS_fromL1, S) {
+ u_writeDataFromL2Response;
+ j_sendUnblock;
+ h_data_to_l0;
+ s_deallocateTBE;
+ o_popL2ResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS_I, DataS_fromL1, I) {
+ u_writeDataFromL2Response;
+ j_sendUnblock;
+ h_stale_data_to_l0;
+ s_deallocateTBE;
+ ff_deallocateCacheBlock;
+ o_popL2ResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // directory is blocked when sending exclusive data
+ transition({IS,IS_I}, Data_Exclusive, E) {
+ u_writeDataFromL2Response;
+ hh_xdata_to_l0;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popL2ResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from IM
+ transition({IM,SM}, Inv, IM) {
+ fi_sendInvAck;
+ l_popL2RequestQueue;
+ }
+
+ transition(IM, Data, SM) {
+ u_writeDataFromL2Response;
+ q_updateAckCount;
+ o_popL2ResponseQueue;
+ }
+
+ transition(IM, Data_all_Acks, M) {
+ u_writeDataFromL2Response;
+ hh_xdata_to_l0;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popL2ResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({SM, IM}, Ack) {
+ q_updateAckCount;
+ o_popL2ResponseQueue;
+ }
+
+ transition(SM, Ack_all, M) {
+ jj_sendExclusiveUnblock;
+ hh_xdata_to_l0;
+ s_deallocateTBE;
+ o_popL2ResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(SM, L0_Invalidate_Else, SM_IL0) {
+ forward_eviction_to_L0;
+ }
+
+ transition(SINK_WB_ACK, Inv){
+ fi_sendInvAck;
+ l_popL2RequestQueue;
+ }
+
+ transition(SINK_WB_ACK, WB_Ack, I){
+ s_deallocateTBE;
+ o_popL2ResponseQueue;
+ ff_deallocateCacheBlock;
+ kd_wakeUpDependents;
+ }
+
+ transition({M_IL0, E_IL0}, WriteBack, MM_IL0) {
+ u_writeDataFromL0Request;
+ k_popL0RequestQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({M_IL0, E_IL0}, L0_DataAck, MM) {
+ u_writeDataFromL0Response;
+ k_popL0RequestQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({M_IL0, MM_IL0}, L0_Ack, MM) {
+ k_popL0RequestQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(E_IL0, L0_Ack, EE) {
+ k_popL0RequestQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(S_IL0, L0_Ack, SS) {
+ k_popL0RequestQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(SM_IL0, L0_Ack, IM) {
+ k_popL0RequestQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({S_IL0, M_IL0, E_IL0, SM_IL0, SM}, L0_Invalidate_Own) {
+ z0_stallAndWaitL0Queue;
+ }
+
+ transition({S_IL0, M_IL0, E_IL0, SM_IL0}, L0_Invalidate_Else) {
+ z2_stallAndWaitL2Queue;
+ }
+
+ transition({S_IL0, M_IL0, E_IL0, MM_IL0}, {Inv, Fwd_GETX, Fwd_GETS}) {
+ z2_stallAndWaitL2Queue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Various class of messages that can be exchanged between the L0 and the L1
+// controllers.
+enumeration(CoherenceClass, desc="...") {
+ GETX, desc="Get eXclusive";
+ UPGRADE, desc="UPGRADE to exclusive";
+ GETS, desc="Get Shared";
+ GET_INSTR, desc="Get Instruction";
+ INV, desc="INValidate";
+ PUTX, desc="Replacement message";
+
+ WB_ACK, desc="Writeback ack";
+
+ // Request types for sending data and acks from L0 to L1 cache
+ // when an invalidation message is received.
+ INV_DATA;
+ INV_ACK;
+
+ DATA, desc="Data block for L1 cache in S state";
+ DATA_EXCLUSIVE, desc="Data block for L1 cache in M/E state";
+ ACK, desc="Generic invalidate ack";
+
+ // This is a special case in which the L1 cache lost permissions to the
+ // shared block before it got the data. So the L0 cache can use the data
+ // but not store it.
+ STALE_DATA;
+}
+
+// Class for messages sent between the L0 and the L1 controllers.
+structure(CoherenceMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address of the cache block";
+ CoherenceClass Class, desc="Type of message (GetS, GetX, PutX, etc)";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ MachineID Sender, desc="What component sent this message";
+ MachineID Dest, desc="What machine receives this message";
+ MessageSizeType MessageSize, desc="size category of the message";
+ DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
+ bool Dirty, default="false", desc="Dirty bit";
+
+ bool functionalRead(Packet *pkt) {
+ // Only PUTX messages contains the data block
+ if (Class == CoherenceClass:PUTX) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
--- /dev/null
+protocol "MESI_Three_Level";
+include "RubySlicc_interfaces.slicc";
+include "MESI_Two_Level-msg.sm";
+include "MESI_Three_Level-msg.sm";
+include "MESI_Three_Level-L0cache.sm";
+include "MESI_Three_Level-L1cache.sm";
+include "MESI_Two_Level-L2cache.sm";
+include "MESI_Two_Level-dir.sm";
+include "MESI_Two_Level-dma.sm";
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
+ : Sequencer * sequencer;
+ CacheMemory * L1Icache;
+ CacheMemory * L1Dcache;
+ Prefetcher * prefetcher;
+ int l2_select_num_bits;
+ Cycles l1_request_latency := 2;
+ Cycles l1_response_latency := 2;
+ Cycles to_l2_latency := 1;
+ bool send_evictions;
+ bool enable_prefetch := "False";
+
+ // Message Queues
+ // From this node's L1 cache TO the network
+
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+ vnet_type="request";
+
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
+ vnet_type="response";
+
+ MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
+ vnet_type="unblock";
+
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
+ vnet_type="request";
+
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
+ vnet_type="response";
+
+ // Request Buffer for prefetches
+ MessageBuffer * optionalQueue;
+
+ // Buffer for requests generated by the processor core.
+ MessageBuffer * mandatoryQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, AccessPermission:Invalid, desc="Not present in either cache";
+ I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
+ S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
+ E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
+ M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
+
+ // Transient States
+ IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
+ IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
+ SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
+ IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
+
+ M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
+ SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
+
+ // Transient States in which block is being prefetched
+ PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
+ PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
+ PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
+ PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // L1 events
+ Load, desc="Load request from the home processor";
+ Ifetch, desc="I-fetch request from the home processor";
+ Store, desc="Store request from the home processor";
+
+ Inv, desc="Invalidate request from L2 bank";
+
+ // internal generated request
+ L1_Replacement, desc="L1 Replacement", format="!r";
+ PF_L1_Replacement, desc="Prefetch L1 Replacement", format="!pr";
+
+ // other requests
+ Fwd_GETX, desc="GETX from other processor";
+ Fwd_GETS, desc="GETS from other processor";
+ Fwd_GET_INSTR, desc="GET_INSTR from other processor";
+
+ Data, desc="Data for processor";
+ Data_Exclusive, desc="Data for processor";
+ DataS_fromL1, desc="data for GETS request, need to unblock directory";
+ Data_all_Acks, desc="Data for processor, all acks";
+
+ Ack, desc="Ack for processor";
+ Ack_all, desc="Last ack for processor";
+
+ WB_Ack, desc="Ack for replacement";
+
+ PF_Load, desc="load request from prefetcher";
+ PF_Ifetch, desc="instruction fetch request from prefetcher";
+ PF_Store, desc="exclusive load request from prefetcher";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
+ State CacheState, desc="cache state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ bool isPrefetch, desc="Set if this block was prefetched and not yet accessed";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Addr addr, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="data is dirty";
+ bool isPrefetch, desc="Set if this was caused by a prefetch";
+ int pendingAcks, default="0", desc="number of pending acks";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+
+ int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ void set_cache_entry(AbstractCacheEntry a);
+ void unset_cache_entry();
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpBuffers(Addr a);
+ void profileMsgDelay(int virtualNetworkType, Cycles c);
+
+ // inclusive cache returns L1 entries only
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
+ if(is_valid(L1Dcache_entry)) {
+ return L1Dcache_entry;
+ }
+
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
+ return L1Icache_entry;
+ }
+
+ Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
+ return L1Dcache_entry;
+ }
+
+ Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
+ return L1Icache_entry;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
+
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:NP;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
+
+ // MUST CHANGE
+ if(is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
+ return L1Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
+ return L1Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L1Cache_State_to_permission(state));
+ }
+ }
+
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Load;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ Event prefetch_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:PF_Load;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:PF_Ifetch;
+ } else if ((type == RubyRequestType:ST) ||
+ (type == RubyRequestType:ATOMIC)) {
+ return Event:PF_Store;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ int getPendingAcks(TBE tbe) {
+ return tbe.pendingAcks;
+ }
+
+ out_port(requestL1Network_out, RequestMsg, requestFromL1Cache);
+ out_port(responseL1Network_out, ResponseMsg, responseFromL1Cache);
+ out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
+ out_port(optionalQueue_out, RubyRequest, optionalQueue);
+
+
+ // Prefetch queue between the controller and the prefetcher
+ // As per Spracklen et al. (HPCA 2005), the prefetch queue should be
+ // implemented as a LIFO structure. The structure would allow for fast
+ // searches of all entries in the queue, not just the head msg. All
+ // msgs in the structure can be invalidated if a demand miss matches.
+ in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
+ if (optionalQueue_in.isReady(clockEdge())) {
+ peek(optionalQueue_in, RubyRequest) {
+ // Instruction Prefetch
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The block to be prefetched is already present in the
+ // cache. We should drop this request.
+ trigger(prefetch_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The block is in the wrong L1 cache. We should drop
+ // this request.
+ trigger(prefetch_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress,
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ if (L1Icache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it
+ // in the L1 so let's see if the L2 has it
+ trigger(prefetch_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:PF_L1_Replacement,
+ L1Icache.cacheProbe(in_msg.LineAddress),
+ getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
+ }
+ } else {
+ // Data prefetch
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The block to be prefetched is already present in the
+ // cache. We should drop this request.
+ trigger(prefetch_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress,
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The block is in the wrong L1. Just drop the prefetch
+ // request.
+ trigger(prefetch_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it in
+ // the L1 let's see if the L2 has it
+ trigger(prefetch_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress,
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ trigger(Event:PF_L1_Replacement,
+ L1Dcache.cacheProbe(in_msg.LineAddress),
+ getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
+ }
+ }
+ }
+ }
+ }
+
+ // Response L1 Network - response msg to this L1 cache
+ in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
+ if (responseL1Network_in.isReady(clockEdge())) {
+ peek(responseL1Network_in, ResponseMsg, block_on="addr") {
+ assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
+ } else if(in_msg.Type == CoherenceResponseType:DATA) {
+ if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
+ getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
+ getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
+ getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
+ machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+
+ trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
+
+ } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
+ trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
+ trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
+ trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Invalid L1 response type");
+ }
+ }
+ }
+ }
+
+ // Request InterChip network - request from this L1 cache to the shared L2
+ in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
+ if(requestL1Network_in.isReady(clockEdge())) {
+ peek(requestL1Network_in, RequestMsg, block_on="addr") {
+ assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:GETX ||
+ in_msg.Type == CoherenceRequestType:UPGRADE) {
+ // upgrade transforms to GETX due to race
+ trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
+ trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Invalid forwarded request type");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue betweens Node's CPU and it's L1 caches
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The tag matches for the L1, so the L1 asks the L2 for it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ if (L1Icache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it
+ // in the L1 so let's see if the L2 has it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+
+ // Check if the line we want to evict is not locked
+ Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, addr);
+
+ trigger(Event:L1_Replacement, addr,
+ getL1ICacheEntry(addr),
+ TBEs[addr]);
+ }
+ }
+ } else {
+
+ // *** DATA ACCESS ***
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The tag matches for the L1, so the L1 ask the L2 for it
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ }
+
+ if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it
+ // in the L1 let's see if the L2 has it.
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+
+ // Check if the line we want to evict is not locked
+ Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, addr);
+
+ trigger(Event:L1_Replacement, addr,
+ getL1DCacheEntry(addr),
+ TBEs[addr]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void enqueuePrefetch(Addr address, RubyRequestType type) {
+ enqueue(optionalQueue_out, RubyRequest, 1) {
+ out_msg.LineAddress := address;
+ out_msg.Type := type;
+ out_msg.AccessMode := RubyAccessMode:Supervisor;
+ }
+ }
+
+ // ACTIONS
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
+ peek(optionalQueue_in, RubyRequest) {
+ enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(pai_issuePfGETINSTR, "pai",
+ desc="Issue GETINSTR for prefetch request") {
+ peek(optionalQueue_in, RubyRequest) {
+ enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GET_INSTR;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(
+ mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ }
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ DPRINTF(RubySlicc, "%s\n", machineID);
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
+ peek(optionalQueue_in, RubyRequest) {
+ enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ DPRINTF(RubySlicc, "%s\n", machineID);
+
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(c_issueUPGRADE, "c", desc="Issue GETX") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:UPGRADE;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
+ address, out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Prefetch := in_msg.Prefetch;
+ out_msg.AccessMode := in_msg.AccessMode;
+ }
+ }
+ }
+
+ action(d_sendDataToRequestor, "d", desc="send data to requestor") {
+ peek(requestL1Network_in, RequestMsg) {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
+ peek(requestL1Network_in, RequestMsg) {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
+ peek(requestL1Network_in, RequestMsg) {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
+ peek(requestL1Network_in, RequestMsg) {
+ enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.AckCount := 1;
+ }
+ }
+ }
+
+ action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(g_issuePUTX, "g", desc="send data to the L2 cache") {
+ enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Requestor:= machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ if (cache_entry.Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%#x\n", address);
+ }
+ }
+
+ action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
+ enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%#x\n", address);
+
+ }
+ }
+
+ action(dg_invalidate_sc, "dg",
+ desc="Invalidate store conditional as the cache lost permissions") {
+ sequencer.invalidateSC(address);
+ }
+
+ action(h_load_hit, "hd",
+ desc="Notify sequencer the load completed.")
+ {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Dcache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk);
+ }
+
+ action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
+ {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk);
+ }
+
+ action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
+ {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.readCallback(address, cache_entry.DataBlk, true);
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
+ {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Dcache.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk);
+ cache_entry.Dirty := true;
+ }
+
+ action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
+ {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.writeCallback(address, cache_entry.DataBlk, true);
+ cache_entry.Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.isPrefetch := false;
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.DataBlk := cache_entry.DataBlk;
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(l_popRequestQueue, "l",
+ desc="Pop incoming request queue and profile the delay within this virtual network") {
+ Tick delay := requestL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
+ }
+
+ action(o_popIncomingResponseQueue, "o",
+ desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ Tick delay := responseL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
+ peek(responseL1Network_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(q_updateAckCount, "q", desc="Update ack count") {
+ peek(responseL1Network_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
+ }
+ }
+
+ action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ if (L1Dcache.isTagPresent(address)) {
+ L1Dcache.deallocate(address);
+ } else {
+ L1Icache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1Dcache.allocate(address, new Entry));
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1Icache.allocate(address, new Entry));
+ }
+ }
+
+ action(z_stallAndWaitMandatoryQueue, "\z", desc="Stall and wait the L1 mandatory request queue") {
+ stall_and_wait(mandatoryQueue_in, address);
+ }
+
+ action(z_stallAndWaitOptionalQueue, "\pz", desc="Stall and wait the L1 prefetch request queue") {
+ stall_and_wait(optionalQueue_in, address);
+ }
+
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
+
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1Icache.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1Dcache.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1Dcache.demand_hits;
+ }
+
+ action(po_observeHit, "\ph", desc="Inform the prefetcher about the hit") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ if (cache_entry.isPrefetch) {
+ prefetcher.observePfHit(in_msg.LineAddress);
+ cache_entry.isPrefetch := false;
+ }
+ }
+ }
+
+ action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ if (enable_prefetch) {
+ prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
+ }
+ }
+ }
+
+ action(ppm_observePfMiss, "\ppm",
+ desc="Inform the prefetcher about the partial miss") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ prefetcher.observePfMiss(in_msg.LineAddress);
+ }
+ }
+
+ action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
+ optionalQueue_in.dequeue(clockEdge());
+ }
+
+ action(mp_markPrefetched, "mp", desc="Set the isPrefetch flag") {
+ assert(is_valid(cache_entry));
+ cache_entry.isPrefetch := true;
+ }
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/Replacement/WriteBack from transient states
+ transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
+ z_stallAndWaitMandatoryQueue;
+ }
+
+ transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
+ z_stallAndWaitMandatoryQueue;
+ }
+
+ transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
+ z_stallAndWaitMandatoryQueue;
+ }
+
+ transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, PF_IS, PF_IS_I, PF_IM, PF_SM}, PF_L1_Replacement) {
+ z_stallAndWaitOptionalQueue;
+ }
+
+ // Transitions from Idle
+ transition({NP,I}, {L1_Replacement, PF_L1_Replacement}) {
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition({S,E,M,IS,IM,SM,IS_I,PF_IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
+ {PF_Load, PF_Store, PF_Ifetch}) {
+ pq_popPrefetchQueue;
+ }
+
+ transition({NP,I}, Load, IS) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileDataMiss;
+ po_observeMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, PF_Load, PF_IS) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ pa_issuePfGETS;
+ pq_popPrefetchQueue;
+ }
+
+ transition(PF_IS, Load, IS) {
+ uu_profileDataMiss;
+ ppm_observePfMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(PF_IS_I, Load, IS_I) {
+ uu_profileDataMiss;
+ ppm_observePfMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(PF_IS_I, Ifetch, IS_I) {
+ uu_profileInstMiss;
+ ppm_observePfMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Ifetch, IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ ai_issueGETINSTR;
+ uu_profileInstMiss;
+ po_observeMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, PF_Ifetch, PF_IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ pai_issuePfGETINSTR;
+ pq_popPrefetchQueue;
+ }
+
+ // We proactively assume that the prefetch is in to
+ // the instruction cache
+ transition(PF_IS, Ifetch, IS) {
+ uu_profileDataMiss;
+ ppm_observePfMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, Store, IM) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileDataMiss;
+ po_observeMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP,I}, PF_Store, PF_IM) {
+ oo_allocateL1DCacheBlock;
+ i_allocateTBE;
+ pb_issuePfGETX;
+ pq_popPrefetchQueue;
+ }
+
+ transition(PF_IM, Store, IM) {
+ uu_profileDataMiss;
+ ppm_observePfMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(PF_SM, Store, SM) {
+ uu_profileDataMiss;
+ ppm_observePfMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition({NP, I}, Inv) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Shared
+ transition({S,E,M}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ po_observeHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S,E,M}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileInstHit;
+ po_observeHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ c_issueUPGRADE;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, {L1_Replacement, PF_L1_Replacement}, I) {
+ forward_eviction_to_cpu;
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ forward_eviction_to_cpu;
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Exclusive
+
+ transition({E,M}, Store, M) {
+ hh_store_hit;
+ uu_profileDataHit;
+ po_observeHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(E, {L1_Replacement, PF_L1_Replacement}, M_I) {
+ // silent E replacement??
+ forward_eviction_to_cpu;
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(E, Inv, I) {
+ // don't send data
+ forward_eviction_to_cpu;
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(E, Fwd_GETX, I) {
+ forward_eviction_to_cpu;
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ // Transitions from Modified
+
+ transition(M, {L1_Replacement, PF_L1_Replacement}, M_I) {
+ forward_eviction_to_cpu;
+ i_allocateTBE;
+ g_issuePUTX; // send data, but hold in case forwarded request
+ ff_deallocateL1CacheBlock;
+ }
+
+ transition(M_I, WB_Ack, I) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(M, Inv, I) {
+ forward_eviction_to_cpu;
+ f_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, Inv, SINK_WB_ACK) {
+ ft_sendDataToL2_fromTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ forward_eviction_to_cpu;
+ d_sendDataToRequestor;
+ l_popRequestQueue;
+ }
+
+ transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
+ d_sendDataToRequestor;
+ d2_sendDataToL2;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, Fwd_GETX, SINK_WB_ACK) {
+ dt_sendDataToRequestor_fromTBE;
+ l_popRequestQueue;
+ }
+
+ transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
+ dt_sendDataToRequestor_fromTBE;
+ d2t_sendDataToL2_fromTBE;
+ l_popRequestQueue;
+ }
+
+ // Transitions from IS
+ transition({IS, IS_I}, Inv, IS_I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(IS, Data_all_Acks, S) {
+ u_writeDataToL1Cache;
+ hx_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(PF_IS, Data_all_Acks, S) {
+ u_writeDataToL1Cache;
+ s_deallocateTBE;
+ mp_markPrefetched;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS_I, Data_all_Acks, I) {
+ u_writeDataToL1Cache;
+ hx_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(PF_IS_I, Data_all_Acks, I) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, DataS_fromL1, S) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ hx_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(PF_IS, DataS_fromL1, S) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS_I, DataS_fromL1, I) {
+ u_writeDataToL1Cache;
+ j_sendUnblock;
+ hx_load_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(PF_IS_I, DataS_fromL1, I) {
+ j_sendUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // directory is blocked when sending exclusive data
+ transition(IS_I, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ hx_load_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // directory is blocked when sending exclusive data
+ transition(PF_IS_I, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ hx_load_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(PF_IS, Data_Exclusive, E) {
+ u_writeDataToL1Cache;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ mp_markPrefetched;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from IM
+ transition(IM, Inv, IM) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition({PF_IM, PF_SM}, Inv, PF_IM) {
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(IM, Data, SM) {
+ u_writeDataToL1Cache;
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(PF_IM, Data, PF_SM) {
+ u_writeDataToL1Cache;
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IM, Data_all_Acks, M) {
+ u_writeDataToL1Cache;
+ hhx_store_hit;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(PF_IM, Data_all_Acks, M) {
+ u_writeDataToL1Cache;
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ mp_markPrefetched;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // transitions from SM
+ transition(SM, Inv, IM) {
+ forward_eviction_to_cpu;
+ fi_sendInvAck;
+ dg_invalidate_sc;
+ l_popRequestQueue;
+ }
+
+ transition({SM, IM, PF_SM, PF_IM}, Ack) {
+ q_updateAckCount;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(SM, Ack_all, M) {
+ jj_sendExclusiveUnblock;
+ hhx_store_hit;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(PF_SM, Ack_all, M) {
+ jj_sendExclusiveUnblock;
+ s_deallocateTBE;
+ mp_markPrefetched;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(SINK_WB_ACK, Inv){
+ fi_sendInvAck;
+ l_popRequestQueue;
+ }
+
+ transition(SINK_WB_ACK, WB_Ack, I){
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L2Cache, "MESI Directory L2 Cache CMP")
+ : CacheMemory * L2cache;
+ Cycles l2_request_latency := 2;
+ Cycles l2_response_latency := 2;
+ Cycles to_l1_latency := 1;
+
+ // Message Queues
+ // From local bank of L2 cache TO the network
+ MessageBuffer * DirRequestFromL2Cache, network="To", virtual_network="0",
+ vnet_type="request"; // this L2 bank -> Memory
+
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="2",
+ vnet_type="request"; // this L2 bank -> a local L1
+
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="1",
+ vnet_type="response"; // this L2 bank -> a local L1 || Memory
+
+ // FROM the network to this local bank of L2 cache
+ MessageBuffer * unblockToL2Cache, network="From", virtual_network="2",
+ vnet_type="unblock"; // a local L1 || Memory -> this L2 bank
+
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
+ vnet_type="request"; // a local L1 -> this L2 bank
+
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="1",
+ vnet_type="response"; // a local L1 || Memory -> this L2 bank
+{
+ // STATES
+ state_declaration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
+ // Base states
+ NP, AccessPermission:Invalid, desc="Not present in either cache";
+ SS, AccessPermission:Read_Only, desc="L2 cache entry Shared, also present in one or more L1s";
+ M, AccessPermission:Read_Write, desc="L2 cache entry Modified, not present in any L1s", format="!b";
+ MT, AccessPermission:Maybe_Stale, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
+
+ // L2 replacement
+ M_I, AccessPermission:Busy, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
+ MT_I, AccessPermission:Busy, desc="L2 cache replacing, getting data from exclusive";
+ MCT_I, AccessPermission:Busy, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
+ I_I, AccessPermission:Busy, desc="L2 replacing clean data, need to inv sharers and then drop data";
+ S_I, AccessPermission:Busy, desc="L2 replacing dirty data, collecting acks from L1s";
+
+ // Transient States for fetching data from memory
+ ISS, AccessPermission:Busy, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
+ IS, AccessPermission:Busy, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
+ IM, AccessPermission:Busy, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
+
+ // Blocking states
+ SS_MB, AccessPermission:Busy, desc="Blocked for L1_GETX from SS";
+ MT_MB, AccessPermission:Busy, desc="Blocked for L1_GETX from MT";
+
+ MT_IIB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
+ MT_IB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
+ MT_SB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
+
+ }
+
+ // EVENTS
+ enumeration(Event, desc="L2 Cache events") {
+ // L2 events
+
+ // events initiated by the local L1s
+ L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
+ L1_GETS, desc="a L1D GETS request for a block maped to us";
+ L1_GETX, desc="a L1D GETX request for a block maped to us";
+ L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
+
+ L1_PUTX, desc="L1 replacing data";
+ L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+ L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
+
+ // events from memory controller
+ Mem_Data, desc="data from memory", format="!r";
+ Mem_Ack, desc="ack from memory", format="!r";
+
+ // M->S data writeback
+ WB_Data, desc="data from L1";
+ WB_Data_clean, desc="clean data from L1";
+ Ack, desc="writeback ack";
+ Ack_all, desc="writeback ack";
+
+ Unblock, desc="Unblock from L1 requestor";
+ Exclusive_Unblock, desc="Unblock from L1 requestor";
+
+ MEM_Inv, desc="Invalidation from directory";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ NetDest Sharers, desc="tracks the L1 shares on-chip";
+ MachineID Exclusive, desc="Exclusive holder of block";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, default="false", desc="data is dirty";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Addr addr, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, default="false", desc="Data is Dirty";
+
+ NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ int pendingAcks, desc="number of pending acks for invalidates during writeback";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ Cycles ticksToCycles(Tick t);
+
+ void set_cache_entry(AbstractCacheEntry a);
+ void unset_cache_entry();
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpBuffers(Addr a);
+ void profileMsgDelay(int virtualNetworkType, Cycles c);
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ // inclusive cache, returns L2 entries only
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L2cache[addr]);
+ }
+
+ bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.Sharers.isElement(requestor);
+ } else {
+ return false;
+ }
+ }
+
+ void addSharer(Addr addr, MachineID requestor, Entry cache_entry) {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "machineID: %s, requestor: %s, address: %#x\n",
+ machineID, requestor, addr);
+ cache_entry.Sharers.add(requestor);
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:NP;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ // MUST CHANGE
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
+ return L2Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(cache_entry.CacheState));
+ return L2Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L2Cache_State_to_permission(state));
+ }
+ }
+
+ Event L1Cache_request_type_to_event(CoherenceRequestType type, Addr addr,
+ MachineID requestor, Entry cache_entry) {
+ if(type == CoherenceRequestType:GETS) {
+ return Event:L1_GETS;
+ } else if(type == CoherenceRequestType:GET_INSTR) {
+ return Event:L1_GET_INSTR;
+ } else if (type == CoherenceRequestType:GETX) {
+ return Event:L1_GETX;
+ } else if (type == CoherenceRequestType:UPGRADE) {
+ if ( is_valid(cache_entry) && cache_entry.Sharers.isElement(requestor) ) {
+ return Event:L1_UPGRADE;
+ } else {
+ return Event:L1_GETX;
+ }
+ } else if (type == CoherenceRequestType:PUTX) {
+ if (isSharer(addr, requestor, cache_entry)) {
+ return Event:L1_PUTX;
+ } else {
+ return Event:L1_PUTX_old;
+ }
+ } else {
+ DPRINTF(RubySlicc, "address: %#x, Request Type: %s\n", addr, type);
+ error("Invalid L1 forwarded request type");
+ }
+ }
+
+ int getPendingAcks(TBE tbe) {
+ return tbe.pendingAcks;
+ }
+
+ bool isDirty(Entry cache_entry) {
+ assert(is_valid(cache_entry));
+ return cache_entry.Dirty;
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(L1RequestL2Network_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(DirRequestL2Network_out, RequestMsg, DirRequestFromL2Cache);
+ out_port(responseL2Network_out, ResponseMsg, responseFromL2Cache);
+
+
+ in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
+ if(L1unblockNetwork_in.isReady(clockEdge())) {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+ DPRINTF(RubySlicc, "Addr: %#x State: %s Sender: %s Type: %s Dest: %s\n",
+ in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
+ in_msg.Sender, in_msg.Type, in_msg.Destination);
+
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
+ trigger(Event:Exclusive_Unblock, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ trigger(Event:Unblock, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("unknown unblock message");
+ }
+ }
+ }
+ }
+
+ // Response L2 Network - response msg to this particular L2 bank
+ in_port(responseL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
+ if (responseL2Network_in.isReady(clockEdge())) {
+ peek(responseL2Network_in, ResponseMsg) {
+ // test wether it's from a local L1 or an off chip source
+ assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+ if(in_msg.Type == CoherenceResponseType:DATA) {
+ if (in_msg.Dirty) {
+ trigger(Event:WB_Data, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:WB_Data_clean, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ if ((getPendingAcks(tbe) - in_msg.AckCount) == 0) {
+ trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ error("unknown message type");
+ }
+
+ } else { // external message
+ if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Mem_Data, in_msg.addr, cache_entry, tbe);
+ } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
+ trigger(Event:Mem_Ack, in_msg.addr, cache_entry, tbe);
+ } else if(in_msg.Type == CoherenceResponseType:INV) {
+ trigger(Event:MEM_Inv, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("unknown message type");
+ }
+ }
+ }
+ } // if not ready, do nothing
+ }
+
+ // L1 Request
+ in_port(L1RequestL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
+ if(L1RequestL2Network_in.isReady(clockEdge())) {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ DPRINTF(RubySlicc, "Addr: %#x State: %s Req: %s Type: %s Dest: %s\n",
+ in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
+ in_msg.Requestor, in_msg.Type, in_msg.Destination);
+
+ assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
+ assert(in_msg.Destination.isElement(machineID));
+
+ if (is_valid(cache_entry)) {
+ // The L2 contains the block, so proceeded with handling the request
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.addr,
+ in_msg.Requestor, cache_entry),
+ in_msg.addr, cache_entry, tbe);
+ } else {
+ if (L2cache.cacheAvail(in_msg.addr)) {
+ // L2 does't have the line, but we have space for it in the L2
+ trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.addr,
+ in_msg.Requestor, cache_entry),
+ in_msg.addr, cache_entry, tbe);
+ } else {
+ // No room in the L2, so we need to make room before handling the request
+ Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
+ if (isDirty(L2cache_entry)) {
+ trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ } else {
+ trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+
+ action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(DirRequestL2Network_out, RequestMsg, l2_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+ }
+
+ action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(cache_entry.Exclusive);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(c_exclusiveReplacement, "c", desc="Send data to memory") {
+ enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(c_exclusiveCleanReplacement, "cc", desc="Send ack to memory for clean replacement") {
+ enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
+ enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ out_msg.AckCount := 0 - cache_entry.Sharers.count();
+ if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ }
+ }
+ }
+
+ action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ out_msg.AckCount := 0 - cache_entry.Sharers.count();
+ if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
+ out_msg.AckCount := out_msg.AckCount + 1;
+ }
+ }
+ }
+ }
+
+ action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(responseL2Network_out, ResponseMsg, l2_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.AckCount := 0;
+ }
+ }
+ }
+
+ action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
+ assert(is_valid(tbe));
+ assert(tbe.L1_GetS_IDs.count() > 0);
+ enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
+ assert(is_valid(tbe));
+ assert(tbe.L1_GetS_IDs.count() == 1);
+ enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
+ enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
+ assert(is_valid(tbe));
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(tbe.L1_GetX_ID);
+ DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ DPRINTF(RubySlicc, "Address: %#x, Destination: %s, DataBlock: %s\n",
+ out_msg.addr, out_msg.Destination, out_msg.DataBlk);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
+ enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination := cache_entry.Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := cache_entry.Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(L1RequestL2Network_out, RequestMsg, to_l1_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := cache_entry.Sharers;
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ // OTHER ACTIONS
+ action(i_allocateTBE, "i", desc="Allocate TBE for request") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.L1_GetS_IDs.clear();
+ tbe.DataBlk := cache_entry.DataBlk;
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.pendingAcks := cache_entry.Sharers.count();
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
+ Tick delay := L1RequestL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
+ }
+
+ action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
+ Tick delay := L1unblockNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
+ }
+
+ action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
+ Tick delay := responseL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
+ }
+
+ action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
+ peek(responseL2Network_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ if (in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ if (in_msg.Dirty) {
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(q_updateAck, "q", desc="update pending ack count") {
+ peek(responseL2Network_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
+ APPEND_TRANSITION_COMMENT(in_msg.AckCount);
+ APPEND_TRANSITION_COMMENT(" p: ");
+ APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
+ }
+ }
+
+ action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
+ peek(responseL2Network_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.L1_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.L1_GetX_ID := in_msg.Requestor;
+ }
+ }
+
+ action(set_setMRU, "\set", desc="set the MRU entry") {
+ L2cache.setMRU(address);
+ }
+
+ action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ }
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cache.deallocate(address);
+ unset_cache_entry();
+ }
+
+ action(t_sendWBAck, "t", desc="Send writeback ACK") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:WB_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ enqueue(responseL2Network_out, ResponseMsg, to_l1_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ // upgrader doesn't get ack from itself, hence the + 1
+ out_msg.AckCount := 0 - cache_entry.Sharers.count() + 1;
+ }
+ }
+ }
+
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cache.demand_misses;
+ }
+
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cache.demand_hits;
+ }
+
+ action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ addSharer(address, in_msg.Requestor, cache_entry);
+ APPEND_TRANSITION_COMMENT( cache_entry.Sharers );
+ }
+ }
+
+ action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ addSharer(address, in_msg.Sender, cache_entry);
+ }
+ }
+
+ action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.Sharers.clear();
+ }
+ }
+
+ action(mm_markExclusive, "\m", desc="set the exclusive owner") {
+ peek(L1RequestL2Network_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.Sharers.clear();
+ cache_entry.Exclusive := in_msg.Requestor;
+ addSharer(address, in_msg.Requestor, cache_entry);
+ }
+ }
+
+ action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
+ peek(L1unblockNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.Sharers.clear();
+ cache_entry.Exclusive := in_msg.Sender;
+ addSharer(address, in_msg.Sender, cache_entry);
+ }
+ }
+
+ action(zz_stallAndWaitL1RequestQueue, "zz", desc="recycle L1 request queue") {
+ stall_and_wait(L1RequestL2Network_in, address);
+ }
+
+ action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
+ responseL2Network_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+
+ //===============================================
+ // BASE STATE - I
+
+ // Transitions from I (Idle)
+ transition({NP, IS, ISS, IM, SS, M, M_I, I_I, S_I, MT_IB, MT_SB}, L1_PUTX) {
+ t_sendWBAck;
+ jj_popL1RequestQueue;
+ }
+
+ transition({NP, SS, M, MT, M_I, I_I, S_I, IS, ISS, IM, MT_IB, MT_SB}, L1_PUTX_old) {
+ t_sendWBAck;
+ jj_popL1RequestQueue;
+ }
+
+ transition({IM, IS, ISS, SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L2_Replacement, L2_Replacement_clean}) {
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ transition({IM, IS, ISS, SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, MEM_Inv) {
+ zn_recycleResponseNetwork;
+ }
+
+ transition({I_I, S_I, M_I, MT_I, MCT_I, NP}, MEM_Inv) {
+ o_popIncomingResponseQueue;
+ }
+
+
+ transition({SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE}) {
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+
+ transition(NP, L1_GETS, ISS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GET_INSTR, IS) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ nn_addSharer;
+ i_allocateTBE;
+ ss_recordGetSL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition(NP, L1_GETX, IM) {
+ qq_allocateL2CacheBlock;
+ ll_clearSharers;
+ // nn_addSharer;
+ i_allocateTBE;
+ xx_recordGetXL1ID;
+ a_issueFetchToMemory;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+
+ // transitions from IS/IM
+
+ transition(ISS, Mem_Data, MT_MB) {
+ m_writeDataToCache;
+ ex_sendExclusiveDataToGetSRequestors;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(IS, Mem_Data, SS) {
+ m_writeDataToCache;
+ e_sendDataToGetSRequestors;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IM, Mem_Data, MT_MB) {
+ m_writeDataToCache;
+ ee_sendDataToGetXRequestor;
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({IS, ISS}, {L1_GETS, L1_GET_INSTR}, IS) {
+ nn_addSharer;
+ ss_recordGetSL1ID;
+ uu_profileMiss;
+ jj_popL1RequestQueue;
+ }
+
+ transition({IS, ISS}, L1_GETX) {
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR}) {
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ // transitions from SS
+ transition(SS, {L1_GETS, L1_GET_INSTR}) {
+ ds_sendSharedDataToRequestor;
+ nn_addSharer;
+ set_setMRU;
+ uu_profileHit;
+ jj_popL1RequestQueue;
+ }
+
+
+ transition(SS, L1_GETX, SS_MB) {
+ d_sendDataToRequestor;
+ // fw_sendFwdInvToSharers;
+ fwm_sendFwdInvToSharersMinusRequestor;
+ set_setMRU;
+ uu_profileHit;
+ jj_popL1RequestQueue;
+ }
+
+ transition(SS, L1_UPGRADE, SS_MB) {
+ fwm_sendFwdInvToSharersMinusRequestor;
+ ts_sendInvAckToUpgrader;
+ set_setMRU;
+ uu_profileHit;
+ jj_popL1RequestQueue;
+ }
+
+ transition(SS, L2_Replacement_clean, I_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SS, {L2_Replacement, MEM_Inv}, S_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+
+ transition(M, L1_GETX, MT_MB) {
+ d_sendDataToRequestor;
+ set_setMRU;
+ uu_profileHit;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L1_GET_INSTR, SS) {
+ d_sendDataToRequestor;
+ nn_addSharer;
+ set_setMRU;
+ uu_profileHit;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, L1_GETS, MT_MB) {
+ dd_sendExclusiveDataToRequestor;
+ set_setMRU;
+ uu_profileHit;
+ jj_popL1RequestQueue;
+ }
+
+ transition(M, {L2_Replacement, MEM_Inv}, M_I) {
+ i_allocateTBE;
+ c_exclusiveReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement_clean, M_I) {
+ i_allocateTBE;
+ c_exclusiveCleanReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+
+ // transitions from MT
+
+ transition(MT, L1_GETX, MT_MB) {
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+
+ transition(MT, {L1_GETS, L1_GET_INSTR}, MT_IIB) {
+ b_forwardRequestToExclusive;
+ uu_profileMiss;
+ set_setMRU;
+ jj_popL1RequestQueue;
+ }
+
+ transition(MT, {L2_Replacement, MEM_Inv}, MT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L2_Replacement_clean, MCT_I) {
+ i_allocateTBE;
+ f_sendInvToSharers;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(MT, L1_PUTX, M) {
+ ll_clearSharers;
+ mr_writeDataToCacheFromRequest;
+ t_sendWBAck;
+ jj_popL1RequestQueue;
+ }
+
+ transition({SS_MB,MT_MB}, Exclusive_Unblock, MT) {
+ // update actual directory
+ mmu_markExclusiveFromUnblock;
+ k_popUnblockQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MT_IIB, {L1_PUTX, L1_PUTX_old}){
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ transition(MT_IIB, Unblock, MT_IB) {
+ nnu_addSharerFromUnblock;
+ k_popUnblockQueue;
+ }
+
+ transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
+ m_writeDataToCache;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MT_SB, Unblock, SS) {
+ nnu_addSharerFromUnblock;
+ k_popUnblockQueue;
+ kd_wakeUpDependents;
+ }
+
+ // writeback states
+ transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR}) {
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ transition(I_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(I_I, Ack_all, M_I) {
+ c_exclusiveCleanReplacement;
+ o_popIncomingResponseQueue;
+ }
+
+ transition({MT_I, MCT_I}, WB_Data, M_I) {
+ qq_writeDataToTBE;
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MCT_I, {WB_Data_clean, Ack_all}, M_I) {
+ c_exclusiveCleanReplacement;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MCT_I, {L1_PUTX, L1_PUTX_old}){
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ // L1 never changed Dirty data
+ transition(MT_I, {WB_Data_clean, Ack_all}, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(MT_I, {L1_PUTX, L1_PUTX_old}){
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ // possible race between unblock and immediate replacement
+ transition({MT_MB,SS_MB}, {L1_PUTX, L1_PUTX_old}) {
+ zz_stallAndWaitL1RequestQueue;
+ }
+
+ transition(S_I, Ack) {
+ q_updateAck;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(S_I, Ack_all, M_I) {
+ ct_exclusiveReplacementFromTBE;
+ o_popIncomingResponseQueue;
+ }
+
+ transition(M_I, Mem_Ack, NP) {
+ s_deallocateTBE;
+ o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:Directory, "MESI Two Level directory protocol")
+ : DirectoryMemory * directory;
+ Cycles to_mem_ctrl_latency := 1;
+ Cycles directory_latency := 6;
+
+ MessageBuffer * requestToDir, network="From", virtual_network="0",
+ vnet_type="request";
+ MessageBuffer * responseToDir, network="From", virtual_network="1",
+ vnet_type="response";
+ MessageBuffer * responseFromDir, network="To", virtual_network="1",
+ vnet_type="response";
+
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
+ ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
+ ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
+
+ M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
+ IM, AccessPermission:Busy, desc="Intermediate State I>M";
+ MI, AccessPermission:Busy, desc="Intermediate State M>I";
+ M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
+ M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
+ M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
+ M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ Fetch, desc="A memory fetch arrives";
+ Data, desc="writeback data arrives";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+//added by SS for dma
+ DMA_READ, desc="A DMA Read memory request";
+ DMA_WRITE, desc="A DMA Write memory request";
+ CleanReplacement, desc="Clean Replacement in L2 cache";
+
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ MachineID Owner;
+ }
+
+ // TBE entries for DMA requests
+ structure(TBE, desc="TBE entries for outstanding DMA requests") {
+ Addr PhysicalAddress, desc="physical address";
+ State TBEState, desc="Transient State";
+ DataBlock DataBlk, desc="Data to be written (DMA write only)";
+ int Len, desc="...";
+ MachineID Requestor, desc="The DMA engine that sent the request";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ bool functionalRead(Packet *pkt);
+ int functionalWrite(Packet *pkt);
+ }
+
+
+ // ** OBJECTS **
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ void set_tbe(TBE tbe);
+ void unset_tbe();
+ void wakeUpBuffers(Addr a);
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+
+ if (is_valid(dir_entry)) {
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ State getState(TBE tbe, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (directory.isPresent(addr)) {
+ return getDirectoryEntry(addr).DirectoryState;
+ } else {
+ return State:I;
+ }
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (directory.isPresent(addr)) {
+ getDirectoryEntry(addr).DirectoryState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
+ return Directory_State_to_permission(tbe.TBEState);
+ }
+
+ if(directory.isPresent(addr)) {
+ DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ if (directory.isPresent(addr)) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+ }
+
+ bool isGETRequest(CoherenceRequestType type) {
+ return (type == CoherenceRequestType:GETS) ||
+ (type == CoherenceRequestType:GET_INSTR) ||
+ (type == CoherenceRequestType:GETX);
+ }
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (isGETRequest(in_msg.Type)) {
+ trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
+ trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
+ trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
+ trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+
+ // Actions
+ action(a_sendAck, "a", desc="Send ack to L2") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Sender);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+
+ Entry e := getDirectoryEntry(in_msg.addr);
+ e.Owner := in_msg.OriginalRequestorMachId;
+ }
+ }
+ }
+
+ // Actions
+ action(aa_sendAck, "aa", desc="Send ack to L2") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:MEMORY_ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestNetwork_in, RequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWrite(in_msg.Sender, address, to_mem_ctrl_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+//added by SS for dma
+ action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
+ peek(requestNetwork_in, RequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
+ }
+ }
+
+ action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
+ out_msg.Destination.add(tbe.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest_partial, "qwp",
+ desc="Queue off-chip writeback request") {
+ peek(requestNetwork_in, RequestMsg) {
+ queueMemoryWritePartial(machineID, address, to_mem_ctrl_latency,
+ in_msg.DataBlk, in_msg.Len);
+ }
+ }
+
+ action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
+ enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Destination.add(tbe.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
+ stall_and_wait(requestNetwork_in, address);
+ }
+
+ action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:INV;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(getDirectoryEntry(address).Owner);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+
+ action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
+ out_msg.Destination.add(tbe.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE") {
+ peek(requestNetwork_in, RequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.PhysicalAddress := in_msg.addr;
+ tbe.Len := in_msg.Len;
+ tbe.Requestor := in_msg.Requestor;
+ }
+ }
+
+ action(qw_queueMemoryWBRequest_partialTBE, "qwt",
+ desc="Queue off-chip writeback request") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWritePartial(in_msg.Sender, tbe.PhysicalAddress,
+ to_mem_ctrl_latency, tbe.DataBlk, tbe.Len);
+ }
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+
+ // TRANSITIONS
+
+ transition(I, Fetch, IM) {
+ qf_queueMemoryFetchRequest;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M, Fetch) {
+ inv_sendCacheInvalidate;
+ z_stallAndWaitRequest;
+ }
+
+ transition(IM, Memory_Data, M) {
+ d_sendData;
+ l_popMemQueue;
+ kd_wakeUpDependents;
+ }
+//added by SS
+ transition(M, CleanReplacement, I) {
+ a_sendAck;
+ k_popIncomingResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(M, Data, MI) {
+ qw_queueMemoryWBRequest;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(MI, Memory_Ack, I) {
+ aa_sendAck;
+ l_popMemQueue;
+ kd_wakeUpDependents;
+ }
+
+
+//added by SS for dma support
+ transition(I, DMA_READ, ID) {
+ v_allocateTBE;
+ qf_queueMemoryFetchRequestDMA;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(ID, Memory_Data, I) {
+ dr_sendDMAData;
+ w_deallocateTBE;
+ l_popMemQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(I, DMA_WRITE, ID_W) {
+ v_allocateTBE;
+ qw_queueMemoryWBRequest_partial;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(ID_W, Memory_Ack, I) {
+ da_sendDMAAck;
+ w_deallocateTBE;
+ l_popMemQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
+ z_stallAndWaitRequest;
+ }
+
+ transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
+ zz_recycleDMAQueue;
+ }
+
+
+ transition(M, DMA_READ, M_DRD) {
+ v_allocateTBE;
+ inv_sendCacheInvalidate;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M_DRD, Data, M_DRDI) {
+ drp_sendDMAData;
+ w_deallocateTBE;
+ qw_queueMemoryWBRequest;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(M_DRDI, Memory_Ack, I) {
+ aa_sendAck;
+ l_popMemQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(M, DMA_WRITE, M_DWR) {
+ v_allocateTBE;
+ inv_sendCacheInvalidate;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(M_DWR, Data, M_DWRI) {
+ qw_queueMemoryWBRequest_partialTBE;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(M_DWRI, Memory_Ack, I) {
+ aa_sendAck;
+ da_sendDMAAck;
+ w_deallocateTBE;
+ l_popMemQueue;
+ kd_wakeUpDependents;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
+ * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:DMA, "DMA Controller")
+: DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
+
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ vnet_type="request";
+ MessageBuffer * mandatoryQueue;
+{
+ state_declaration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, AccessPermission:Invalid, desc="Ready to accept a new request";
+ BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
+ BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ Ack, desc="DMA write to memory completed";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+
+ TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ State getState(TBE tbe, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else {
+ return State:READY;
+ }
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("DMA does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("DMA does not support functional write.");
+ }
+
+ out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
+ peek( dmaResponseQueue_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(requestToDir_out, RequestMsg, request_latency) {
+ out_msg.addr := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:DMA_READ;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(requestToDir_out, RequestMsg, request_latency) {
+ out_msg.addr := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:DMA_WRITE;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback(address);
+ }
+
+ action(d_dataCallback, "d", desc="Write data to dma sequencer") {
+ dma_sequencer.dataCallback(tbe.DataBlk, address);
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ assert(is_valid(tbe));
+ peek( dmaResponseQueue_in, ResponseMsg) {
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue(clockEdge());
+ }
+
+ action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
+ stall_and_wait(dmaRequestQueue_in, address);
+ }
+
+ action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
+ }
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ v_allocateTBE;
+ s_sendReadRequest;
+ p_popRequestQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ v_allocateTBE;
+ s_sendWriteRequest;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Data, READY) {
+ t_updateTBEData;
+ d_dataCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition(BUSY_WR, Ack, READY) {
+ a_ackCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
+ zz_stallAndWaitRequestQueue;
+ }
+
+}
--- /dev/null
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ UPGRADE, desc="UPGRADE to exclusive";
+ GETS, desc="Get Shared";
+ GET_INSTR, desc="Get Instruction";
+ INV, desc="INValidate";
+ PUTX, desc="Replacement message";
+
+ WB_ACK, desc="Writeback ack";
+
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ MEMORY_ACK, desc="Ack from memory controller";
+ DATA, desc="Data block for L1 cache in S state";
+ DATA_EXCLUSIVE, desc="Data block for L1 cache in M/E state";
+ MEMORY_DATA, desc="Data block from / to main memory";
+ ACK, desc="Generic invalidate ack";
+ WB_ACK, desc="writeback ack";
+ UNBLOCK, desc="unblock";
+ EXCLUSIVE_UNBLOCK, desc="exclusive unblock";
+ INV, desc="Invalidate from directory";
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ MachineID Requestor , desc="What component request";
+ NetDest Destination, desc="What components receive the request, includes MachineType and num";
+ MessageSizeType MessageSize, desc="size category of the message";
+ DataBlock DataBlk, desc="Data for the cache line (if PUTX)";
+ int Len;
+ bool Dirty, default="false", desc="Dirty bit";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+
+ bool functionalRead(Packet *pkt) {
+ // Only PUTX messages contains the data block
+ if (Type == CoherenceRequestType:PUTX) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="What component sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="Data for the cache line";
+ bool Dirty, default="false", desc="Dirty bit";
+ int AckCount, default="0", desc="number of acks in this message";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ // Valid data block is only present in message with following types
+ if (Type == CoherenceResponseType:DATA ||
+ Type == CoherenceResponseType:DATA_EXCLUSIVE ||
+ Type == CoherenceResponseType:MEMORY_DATA) {
+
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
--- /dev/null
+protocol "MESI_Two_Level";
+include "RubySlicc_interfaces.slicc";
+include "MESI_Two_Level-msg.sm";
+include "MESI_Two_Level-L1cache.sm";
+include "MESI_Two_Level-L2cache.sm";
+include "MESI_Two_Level-dir.sm";
+include "MESI_Two_Level-dma.sm";
--- /dev/null
+/*
+ * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
+ * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L1Cache, "MI Example L1 Cache")
+ : Sequencer * sequencer;
+ CacheMemory * cacheMemory;
+ Cycles cache_response_latency := 12;
+ Cycles issue_latency := 2;
+ bool send_evictions;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="2",
+ vnet_type="request";
+ MessageBuffer * responseFromCache, network="To", virtual_network="4",
+ vnet_type="response";
+
+ MessageBuffer * forwardToCache, network="From", virtual_network="3",
+ vnet_type="forward";
+ MessageBuffer * responseToCache, network="From", virtual_network="4",
+ vnet_type="response";
+
+ MessageBuffer * mandatoryQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states") {
+ I, AccessPermission:Invalid, desc="Not Present/Invalid";
+ II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
+ M, AccessPermission:Read_Write, desc="Modified";
+ MI, AccessPermission:Busy, desc="Modified, issued PUT";
+ MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
+
+ IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
+ IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ // From processor
+
+ Load, desc="Load request from processor";
+ Ifetch, desc="Ifetch request from processor";
+ Store, desc="Store request from processor";
+
+ Data, desc="Data from network";
+ Fwd_GETX, desc="Forward from network";
+
+ Inv, desc="Invalidate request from dir";
+
+ Replacement, desc="Replace a block";
+ Writeback_Ack, desc="Ack from the directory for a writeback";
+ Writeback_Nack, desc="Nack from the directory for a writeback";
+ }
+
+ // STRUCTURE DEFINITIONS
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="Data in the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+
+ // STRUCTURES
+ TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+
+ // PROTOTYPES
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ void set_cache_entry(AbstractCacheEntry a);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void profileMsgDelay(int virtualNetworkType, Cycles b);
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", cacheMemory.lookup(address));
+ }
+
+ // FUNCTIONS
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Load;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ }
+ else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ else {
+ return State:I;
+ }
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ return L1Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return L1Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L1Cache_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ // NETWORK PORTS
+
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+
+ in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
+ if (forwardRequestNetwork_in.isReady(clockEdge())) {
+ peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
+ }
+ else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
+ }
+ else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
+ }
+ else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
+ }
+ else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToCache) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ }
+ else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+
+ Entry cache_entry := getCacheEntry(in_msg.LineAddress);
+ if (is_invalid(cache_entry) &&
+ cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
+ // make room for the block
+ // Check if the line we want to evict is not locked
+ Addr addr := cacheMemory.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, addr);
+ trigger(Event:Replacement, addr,
+ getCacheEntry(addr),
+ TBEs[addr]);
+ }
+ else {
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
+ cache_entry, TBEs[in_msg.LineAddress]);
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueRequest, "a", desc="Issue a request") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Control;
+ }
+ }
+
+ action(b_issuePUT, "b", desc="Issue a PUT request") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Data;
+ }
+ }
+
+ action(e_sendData, "e", desc="Send data from cache to requestor") {
+ peek(forwardRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
+ peek(forwardRequestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
+ if (is_valid(cache_entry)) {
+ } else {
+ set_cache_entry(cacheMemory.allocate(address, new Entry));
+ }
+ }
+
+ action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
+ if (is_valid(cache_entry)) {
+ cacheMemory.deallocate(address);
+ unset_cache_entry();
+ }
+ }
+
+ action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop the response queue") {
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
+ }
+
+ action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
+ Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
+ }
+
+ action(p_profileMiss, "pi", desc="Profile cache miss") {
+ ++cacheMemory.demand_misses;
+ }
+
+ action(p_profileHit, "ph", desc="Profile cache miss") {
+ ++cacheMemory.demand_hits;
+ }
+
+ action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
+ cacheMemory.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, false);
+ }
+
+ action(rx_load_hit, "rx", desc="External load completed.") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
+ cacheMemory.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, true,
+ machineIDToMachineType(in_msg.Sender));
+ }
+ }
+
+ action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
+ cacheMemory.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk, false);
+ }
+
+ action(sx_store_hit, "sx", desc="External store completed.") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
+ cacheMemory.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk, true,
+ machineIDToMachineType(in_msg.Sender));
+ }
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to the cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE") {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
+ assert(is_valid(cache_entry));
+ assert(is_valid(tbe));
+ tbe.DataBlk := cache_entry.DataBlk;
+ }
+
+ action(z_stall, "z", desc="stall") {
+ // do nothing
+ }
+
+ // TRANSITIONS
+
+ transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
+ z_stall;
+ }
+
+ transition({IS, IM}, {Fwd_GETX, Inv}) {
+ z_stall;
+ }
+
+ transition(MI, Inv) {
+ o_popForwardedRequestQueue;
+ }
+
+ transition(M, Store) {
+ s_store_hit;
+ p_profileHit;
+ m_popMandatoryQueue;
+ }
+
+ transition(M, {Load, Ifetch}) {
+ r_load_hit;
+ p_profileHit;
+ m_popMandatoryQueue;
+ }
+
+ transition(I, Inv) {
+ o_popForwardedRequestQueue;
+ }
+
+ transition(I, Store, IM) {
+ v_allocateTBE;
+ i_allocateL1CacheBlock;
+ a_issueRequest;
+ p_profileMiss;
+ m_popMandatoryQueue;
+ }
+
+ transition(I, {Load, Ifetch}, IS) {
+ v_allocateTBE;
+ i_allocateL1CacheBlock;
+ a_issueRequest;
+ p_profileMiss;
+ m_popMandatoryQueue;
+ }
+
+ transition(IS, Data, M) {
+ u_writeDataToCache;
+ rx_load_hit;
+ w_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data, M) {
+ u_writeDataToCache;
+ sx_store_hit;
+ w_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ e_sendData;
+ forward_eviction_to_cpu;
+ o_popForwardedRequestQueue;
+ }
+
+ transition(I, Replacement) {
+ h_deallocateL1CacheBlock;
+ }
+
+ transition(M, {Replacement,Inv}, MI) {
+ v_allocateTBE;
+ b_issuePUT;
+ x_copyDataFromCacheToTBE;
+ forward_eviction_to_cpu;
+ h_deallocateL1CacheBlock;
+ }
+
+ transition(MI, Writeback_Ack, I) {
+ w_deallocateTBE;
+ o_popForwardedRequestQueue;
+ }
+
+ transition(MI, Fwd_GETX, II) {
+ ee_sendDataFromTBE;
+ o_popForwardedRequestQueue;
+ }
+
+ transition(MI, Writeback_Nack, MII) {
+ o_popForwardedRequestQueue;
+ }
+
+ transition(MII, Fwd_GETX, I) {
+ ee_sendDataFromTBE;
+ w_deallocateTBE;
+ o_popForwardedRequestQueue;
+ }
+
+ transition(II, Writeback_Nack, I) {
+ w_deallocateTBE;
+ o_popForwardedRequestQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
+ * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:Directory, "Directory protocol")
+ : DirectoryMemory * directory;
+ Cycles directory_latency := 12;
+ Cycles to_memory_controller_latency := 1;
+
+ MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+ vnet_type="forward";
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ vnet_type="response";
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+ vnet_type="response";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ vnet_type="request";
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ vnet_type="request";
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, AccessPermission:Read_Write, desc="Invalid";
+ M, AccessPermission:Invalid, desc="Modified";
+
+ M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
+ M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
+
+ M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
+ M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
+
+ IM, AccessPermission:Busy, desc="Intermediate state I-->M";
+ MI, AccessPermission:Busy, desc="Intermediate state M-->I";
+ ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
+ ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ // processor requests
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUTX, desc="A PUTX arrives";
+ PUTX_NotOwner, desc="A PUTX arrives";
+
+ // DMA requests
+ DMA_READ, desc="A DMA Read memory request";
+ DMA_WRITE, desc="A DMA Write memory request";
+
+ // Memory Controller
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ }
+
+ // TBE entries for DMA requests
+ structure(TBE, desc="TBE entries for outstanding DMA requests") {
+ Addr PhysicalAddress, desc="physical address";
+ State TBEState, desc="Transient State";
+ DataBlock DataBlk, desc="Data to be written (DMA write only)";
+ int Len, desc="...";
+ MachineID DmaRequestor, desc="DMA requestor";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ // ** OBJECTS **
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ Tick cyclesToTicks(Cycles c);
+ void set_tbe(TBE b);
+ void unset_tbe();
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+
+ if (is_valid(dir_entry)) {
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ State getState(TBE tbe, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (directory.isPresent(addr)) {
+ return getDirectoryEntry(addr).DirectoryState;
+ } else {
+ return State:I;
+ }
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (directory.isPresent(addr)) {
+
+ if (state == State:M) {
+ assert(getDirectoryEntry(addr).Owner.count() == 1);
+ assert(getDirectoryEntry(addr).Sharers.count() == 0);
+ }
+
+ getDirectoryEntry(addr).DirectoryState := state;
+
+ if (state == State:I) {
+ assert(getDirectoryEntry(addr).Owner.count() == 0);
+ assert(getDirectoryEntry(addr).Sharers.count() == 0);
+ }
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ return Directory_State_to_permission(tbe.TBEState);
+ }
+
+ if(directory.isPresent(addr)) {
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ if (directory.isPresent(addr)) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
+ out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
+
+ // ** IN_PORTS **
+ in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ TBE tbe := TBEs[in_msg.LineAddress];
+ if (in_msg.Type == DMARequestType:READ) {
+ trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
+ } else if (in_msg.Type == DMARequestType:WRITE) {
+ trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir) {
+ if (requestQueue_in.isReady(clockEdge())) {
+ peek(requestQueue_in, RequestMsg) {
+ TBE tbe := TBEs[in_msg.addr];
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.addr, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.addr, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ if (getDirectoryEntry(in_msg.addr).Owner.isElement(in_msg.Requestor)) {
+ trigger(Event:PUTX, in_msg.addr, tbe);
+ } else {
+ trigger(Event:PUTX_NotOwner, in_msg.addr, tbe);
+ }
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+//added by SS
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ TBE tbe := TBEs[in_msg.addr];
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.addr, tbe);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.addr, tbe);
+ } else {
+ DPRINTF(RubySlicc,"%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.OriginalRequestorMachId;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ getDirectoryEntry(address).Owner.clear();
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ assert(is_valid(tbe));
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+
+
+ action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ assert(is_valid(tbe));
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+
+ // we send the entire data block and rely on the dma controller
+ // to split it up if need be
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ assert(is_valid(tbe));
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:ACK;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ getDirectoryEntry(address).Owner.clear();
+ getDirectoryEntry(address).Owner.add(in_msg.Requestor);
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT("Own: ");
+ APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.addr).Owner);
+ APPEND_TRANSITION_COMMENT("Req: ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination := getDirectoryEntry(in_msg.addr).Owner;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.PhysicalAddress := in_msg.PhysicalAddress;
+ tbe.Len := in_msg.Len;
+ tbe.DmaRequestor := in_msg.Requestor;
+ }
+ }
+
+ action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.DmaRequestor := in_msg.Requestor;
+ }
+ }
+
+ action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
+ peek(requestQueue_in, RequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(z_recycleRequestQueue, "z", desc="recycle request queue") {
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
+ }
+ }
+
+ action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
+ }
+ }
+
+ action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ queueMemoryWritePartial(in_msg.Requestor, address,
+ to_memory_controller_latency, in_msg.DataBlk,
+ in_msg.Len);
+ }
+ }
+
+ action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
+ peek(requestQueue_in, RequestMsg) {
+ queueMemoryWritePartial(in_msg.Requestor, address,
+ to_memory_controller_latency, tbe.DataBlk,
+ tbe.Len);
+ }
+ }
+
+ action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
+ peek(requestQueue_in, RequestMsg) {
+ queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ // TRANSITIONS
+ transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
+ z_recycleRequestQueue;
+ }
+
+ transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
+ z_recycleRequestQueue;
+ }
+
+ transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
+ y_recycleDMARequestQueue;
+ }
+
+
+ transition(I, GETX, IM) {
+ //d_sendData;
+ v_allocateTBEFromRequestNet;
+ qf_queueMemoryFetchRequest;
+ e_ownerIsRequestor;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(IM, Memory_Data, M) {
+ d_sendData;
+ //e_ownerIsRequestor;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+
+ transition(I, DMA_READ, ID) {
+ //dr_sendDMAData;
+ r_allocateTbeForDmaRead;
+ qf_queueMemoryFetchRequestDMA;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(ID, Memory_Data, I) {
+ dr_sendDMAData;
+ //p_popIncomingDMARequestQueue;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+
+
+ transition(I, DMA_WRITE, ID_W) {
+ v_allocateTBE;
+ qw_queueMemoryWBRequest_partial;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(ID_W, Memory_Ack, I) {
+ da_sendDMAAck;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ transition(M, DMA_READ, M_DRD) {
+ v_allocateTBE;
+ inv_sendCacheInvalidate;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(M_DRD, PUTX, M_DRDI) {
+ drp_sendDMAData;
+ c_clearOwner;
+ l_queueMemoryWBRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M_DRDI, Memory_Ack, I) {
+ l_sendWriteBackAck;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+
+ transition(M, DMA_WRITE, M_DWR) {
+ v_allocateTBE;
+ inv_sendCacheInvalidate;
+ p_popIncomingDMARequestQueue;
+ }
+
+ transition(M_DWR, PUTX, M_DWRI) {
+ qw_queueMemoryWBRequest_partialTBE;
+ c_clearOwner;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M_DWRI, Memory_Ack, I) {
+ l_sendWriteBackAck;
+ da_sendDMAAck;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ transition(M, GETX, M) {
+ f_forwardRequest;
+ e_ownerIsRequestor;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX, MI) {
+ c_clearOwner;
+ v_allocateTBEFromRequestNet;
+ l_queueMemoryWBRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(MI, Memory_Ack, I) {
+ l_sendWriteBackAck;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ transition(M, PUTX_NotOwner, M) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, PUTX_NotOwner, I) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
+ * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:DMA, "DMA Controller")
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
+
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ vnet_type="request";
+ MessageBuffer * mandatoryQueue;
+{
+ state_declaration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, AccessPermission:Invalid, desc="Ready to accept a new request";
+ BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
+ BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ Ack, desc="DMA write to memory completed";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+
+ TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ State getState(TBE tbe, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else {
+ return State:READY;
+ }
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("DMA does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("DMA does not support functional write.");
+ }
+
+ out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
+ peek( dmaResponseQueue_in, DMAResponseMsg) {
+ if (in_msg.Type == DMAResponseType:ACK) {
+ trigger(Event:Ack, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == DMAResponseType:DATA) {
+ trigger(Event:Data, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.LineAddress := in_msg.LineAddress;
+ out_msg.Type := DMARequestType:READ;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.LineAddress := in_msg.LineAddress;
+ out_msg.Type := DMARequestType:WRITE;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback(address);
+ }
+
+ action(d_dataCallback, "d", desc="Write data to dma sequencer") {
+ dma_sequencer.dataCallback(tbe.DataBlk, address);
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ assert(is_valid(tbe));
+ peek( dmaResponseQueue_in, DMAResponseMsg) {
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue(clockEdge());
+ }
+
+ action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
+ stall_and_wait(dmaRequestQueue_in, address);
+ }
+
+ action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
+ }
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ v_allocateTBE;
+ s_sendReadRequest;
+ p_popRequestQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ v_allocateTBE;
+ s_sendWriteRequest;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Data, READY) {
+ t_updateTBEData;
+ d_dataCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition(BUSY_WR, Ack, READY) {
+ a_ackCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
+ zz_stallAndWaitRequestQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+ PUTX, desc="Put eXclusive";
+ WB_ACK, desc="Writeback ack";
+ WB_NACK, desc="Writeback neg. ack";
+ INV, desc="Invalidation";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment, responder doesn't have a copy";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE_CLEAN, desc="Data, no other processor has a copy, data is clean";
+ DATA_EXCLUSIVE_DIRTY, desc="Data, no other processor has a copy, data is dirty";
+ UNBLOCK, desc="Unblock";
+ UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
+ WRITEBACK_CLEAN, desc="Clean writeback (no data)";
+ WRITEBACK_DIRTY, desc="Dirty writeback (contains data)";
+ WRITEBACK, desc="Generic writeback (contains data)";
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="data for the cache line";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ // Valid data block is only present in PUTX messages
+ if (Type == CoherenceRequestType:PUTX) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should read
+ // data block from only those messages that contain valid data
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+// ResponseMsg (and also unblock requests)
+structure(ResponseMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ // A check on message type should appear here so that only those
+ // messages that contain data
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should read
+ // data block from only those messages that contain valid data
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
+ READ, desc="Memory Read";
+ WRITE, desc="Memory Write";
+ NULL, desc="Invalid";
+}
+
+enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
+ DATA, desc="DATA read";
+ ACK, desc="ACK write";
+ NULL, desc="Invalid";
+}
+
+structure(DMARequestMsg, desc="...", interface="Message") {
+ DMARequestType Type, desc="Request type (read/write)";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ Addr LineAddress, desc="Line address for this request";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ int Len, desc="The length of the request";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ return testAndRead(LineAddress, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(LineAddress, DataBlk, pkt);
+ }
+}
+
+structure(DMAResponseMsg, desc="...", interface="Message") {
+ DMAResponseType Type, desc="Response type (DATA/ACK)";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ Addr LineAddress, desc="Line address for this request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ return testAndRead(LineAddress, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(LineAddress, DataBlk, pkt);
+ }
+}
--- /dev/null
+protocol "MI_example";
+include "RubySlicc_interfaces.slicc";
+include "MI_example-msg.sm";
+include "MI_example-cache.sm";
+include "MI_example-dir.sm";
+include "MI_example-dma.sm";
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:CorePair, "CP-like Core Coherence")
+ : Sequencer * sequencer;
+ Sequencer * sequencer1;
+ CacheMemory * L1Icache;
+ CacheMemory * L1D0cache;
+ CacheMemory * L1D1cache;
+ CacheMemory * L2cache; // func mem logic looks in this CacheMemory
+ bool send_evictions := "False";
+ Cycles issue_latency := 5; // time to send data down to NB
+ Cycles l2_hit_latency := 18;
+
+ // BEGIN Core Buffers
+
+ // To the Network
+ MessageBuffer * requestFromCore, network="To", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseFromCore, network="To", virtual_network="2", vnet_type="response";
+ MessageBuffer * unblockFromCore, network="To", virtual_network="4", vnet_type="unblock";
+
+ // From the Network
+ MessageBuffer * probeToCore, network="From", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseToCore, network="From", virtual_network="2", vnet_type="response";
+
+ MessageBuffer * mandatoryQueue;
+
+ MessageBuffer * triggerQueue, ordered="true";
+
+ // END Core Buffers
+
+{
+ // BEGIN STATES
+ state_declaration(State, desc="Cache states", default="CorePair_State_I") {
+
+ // Base States
+ I, AccessPermission:Invalid, desc="Invalid";
+ S, AccessPermission:Read_Only, desc="Shared";
+ E0, AccessPermission:Read_Write, desc="Exclusive with Cluster 0 ownership";
+ E1, AccessPermission:Read_Write, desc="Exclusive with Cluster 1 ownership";
+ Es, AccessPermission:Read_Write, desc="Exclusive in core";
+ O, AccessPermission:Read_Only, desc="Owner state in core, both clusters and other cores may be sharing line";
+ Ms, AccessPermission:Read_Write, desc="Modified in core, both clusters may be sharing line";
+ M0, AccessPermission:Read_Write, desc="Modified with cluster ownership";
+ M1, AccessPermission:Read_Write, desc="Modified with cluster ownership";
+
+ // Transient States
+ I_M0, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
+ I_M1, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
+ I_M0M1, AccessPermission:Busy, desc="Was in I_M0, got a store request from other cluster as well";
+ I_M1M0, AccessPermission:Busy, desc="Was in I_M1, got a store request from other cluster as well";
+ I_M0Ms, AccessPermission:Busy, desc="Was in I_M0, got a load request from other cluster as well";
+ I_M1Ms, AccessPermission:Busy, desc="Was in I_M1, got a load request from other cluster as well";
+ I_E0S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
+ I_E1S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
+ I_ES, AccessPermission:Busy, desc="S_F got hit by invalidating probe, RdBlk response needs to go to both clusters";
+
+ IF_E0S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E0S but expecting a L2_to_L1D0 trigger, just drop when receive";
+ IF_E1S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E1S but expecting a L2_to_L1D1 trigger, just drop when receive";
+ IF_ES, AccessPermission:Busy, desc="same, but waiting for two fills";
+ IF0_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
+ IF1_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
+ F_S0, AccessPermission:Busy, desc="same, but going to S0 when trigger received";
+ F_S1, AccessPermission:Busy, desc="same, but going to S1 when trigger received";
+
+ ES_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for clean writeback ack";
+ MO_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for dirty writeback ack";
+ MO_S0, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
+ MO_S1, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
+ S_F0, AccessPermission:Read_Only, desc="Shared, filling L1";
+ S_F1, AccessPermission:Read_Only, desc="Shared, filling L1";
+ S_F, AccessPermission:Read_Only, desc="Shared, filling L1";
+ O_F0, AccessPermission:Read_Only, desc="Owned, filling L1";
+ O_F1, AccessPermission:Read_Only, desc="Owned, filling L1";
+ O_F, AccessPermission:Read_Only, desc="Owned, filling L1";
+ Si_F0, AccessPermission:Read_Only, desc="Shared, filling icache";
+ Si_F1, AccessPermission:Read_Only, desc="Shared, filling icache";
+ S_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ S_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ O_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ O_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ S0, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 0, waiting for response";
+ S1, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 1, waiting for response";
+
+ Es_F0, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
+ Es_F1, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
+ Es_F, AccessPermission:Read_Write, desc="Es, other cluster read, filling";
+ E0_F, AccessPermission:Read_Write, desc="E0, cluster read, filling";
+ E1_F, AccessPermission:Read_Write, desc="...";
+ E0_Es, AccessPermission:Read_Write, desc="...";
+ E1_Es, AccessPermission:Read_Write, desc="...";
+ Ms_F0, AccessPermission:Read_Write, desc="...";
+ Ms_F1, AccessPermission:Read_Write, desc="...";
+ Ms_F, AccessPermission:Read_Write, desc="...";
+ M0_F, AccessPermission:Read_Write, desc="...";
+ M0_Ms, AccessPermission:Read_Write, desc="...";
+ M1_F, AccessPermission:Read_Write, desc="...";
+ M1_Ms, AccessPermission:Read_Write, desc="...";
+
+ I_C, AccessPermission:Invalid, desc="Invalid, but waiting for WBAck from NB from canceled writeback";
+ S0_C, AccessPermission:Busy, desc="MO_S0 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
+ S1_C, AccessPermission:Busy, desc="MO_S1 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
+ S_C, AccessPermission:Busy, desc="S*_C got NB_AckS, still waiting for WBAck";
+
+ } // END STATES
+
+ // BEGIN EVENTS
+ enumeration(Event, desc="CP Events") {
+ // CP Initiated events
+ C0_Load_L1miss, desc="Cluster 0 load, L1 missed";
+ C0_Load_L1hit, desc="Cluster 0 load, L1 hit";
+ C1_Load_L1miss, desc="Cluster 1 load L1 missed";
+ C1_Load_L1hit, desc="Cluster 1 load L1 hit";
+ Ifetch0_L1hit, desc="Instruction fetch, hit in the L1";
+ Ifetch1_L1hit, desc="Instruction fetch, hit in the L1";
+ Ifetch0_L1miss, desc="Instruction fetch, missed in the L1";
+ Ifetch1_L1miss, desc="Instruction fetch, missed in the L1";
+ C0_Store_L1miss, desc="Cluster 0 store missed in L1";
+ C0_Store_L1hit, desc="Cluster 0 store hit in L1";
+ C1_Store_L1miss, desc="Cluster 1 store missed in L1";
+ C1_Store_L1hit, desc="Cluster 1 store hit in L1";
+ // NB Initiated events
+ NB_AckS, desc="NB Ack to Core Request";
+ NB_AckM, desc="NB Ack to Core Request";
+ NB_AckE, desc="NB Ack to Core Request";
+
+ NB_AckWB, desc="NB Ack for writeback";
+
+ // Memory System initiatied events
+ L1I_Repl, desc="Replace address from L1I"; // Presumed clean
+ L1D0_Repl, desc="Replace address from L1D0"; // Presumed clean
+ L1D1_Repl, desc="Replace address from L1D1"; // Presumed clean
+ L2_Repl, desc="Replace address from L2";
+
+ L2_to_L1D0, desc="L1 fill from L2";
+ L2_to_L1D1, desc="L1 fill from L2";
+ L2_to_L1I, desc="L1 fill from L2";
+
+ // Probe Events
+ PrbInvData, desc="probe, return O or M data";
+ PrbInv, desc="probe, no need for data";
+ PrbShrData, desc="probe downgrade, return O or M data";
+
+ } // END EVENTS
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ L1D0DataArrayRead, desc="Read the data array";
+ L1D0DataArrayWrite, desc="Write the data array";
+ L1D0TagArrayRead, desc="Read the data array";
+ L1D0TagArrayWrite, desc="Write the data array";
+ L1D1DataArrayRead, desc="Read the data array";
+ L1D1DataArrayWrite, desc="Write the data array";
+ L1D1TagArrayRead, desc="Read the data array";
+ L1D1TagArrayWrite, desc="Write the data array";
+ L1IDataArrayRead, desc="Read the data array";
+ L1IDataArrayWrite, desc="Write the data array";
+ L1ITagArrayRead, desc="Read the data array";
+ L1ITagArrayWrite, desc="Write the data array";
+ L2DataArrayRead, desc="Read the data array";
+ L2DataArrayWrite, desc="Write the data array";
+ L2TagArrayRead, desc="Read the data array";
+ L2TagArrayWrite, desc="Write the data array";
+ }
+
+
+ // BEGIN STRUCTURE DEFINITIONS
+
+
+ // Cache Entry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ bool FromL2, default="false", desc="block just moved from L2";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
+ bool Shared, desc="Victim hit by shared probe";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<CorePair_TBE>", constructor="m_number_of_TBEs";
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ // END STRUCTURE DEFINITIONS
+
+ // BEGIN INTERNAL FUNCTIONS
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ bool addressInCore(Addr addr) {
+ return (L2cache.isTagPresent(addr) || L1Icache.isTagPresent(addr) || L1D0cache.isTagPresent(addr) || L1D1cache.isTagPresent(addr));
+ }
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
+ return L2cache_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return tbe.DataBlk;
+ } else {
+ return getCacheEntry(addr).DataBlk;
+ }
+ }
+
+ Entry getL1CacheEntry(Addr addr, int cluster), return_by_pointer="yes" {
+ if (cluster == 0) {
+ Entry L1D0_entry := static_cast(Entry, "pointer", L1D0cache.lookup(addr));
+ return L1D0_entry;
+ } else {
+ Entry L1D1_entry := static_cast(Entry, "pointer", L1D1cache.lookup(addr));
+ return L1D1_entry;
+ }
+ }
+
+ Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry c_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ return c_entry;
+ }
+
+ bool presentOrAvail2(Addr addr) {
+ return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
+ }
+
+ bool presentOrAvailI(Addr addr) {
+ return L1Icache.isTagPresent(addr) || L1Icache.cacheAvail(addr);
+ }
+
+ bool presentOrAvailD0(Addr addr) {
+ return L1D0cache.isTagPresent(addr) || L1D0cache.cacheAvail(addr);
+ }
+
+ bool presentOrAvailD1(Addr addr) {
+ return L1D1cache.isTagPresent(addr) || L1D1cache.cacheAvail(addr);
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return CorePair_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return CorePair_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(CorePair_State_to_permission(state));
+ }
+ }
+
+ MachineType testAndClearLocalHit(Entry cache_entry) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.FromL2) {
+ cache_entry.FromL2 := false;
+ return MachineType:L2Cache;
+ } else {
+ return MachineType:L1Cache;
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L1D0DataArrayRead) {
+ L1D0cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L1D0DataArrayWrite) {
+ L1D0cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L1D0TagArrayRead) {
+ L1D0cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L1D0TagArrayWrite) {
+ L1D0cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ } else if (request_type == RequestType:L1D1DataArrayRead) {
+ L1D1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L1D1DataArrayWrite) {
+ L1D1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L1D1TagArrayRead) {
+ L1D1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L1D1TagArrayWrite) {
+ L1D1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ } else if (request_type == RequestType:L1IDataArrayRead) {
+ L1Icache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L1IDataArrayWrite) {
+ L1Icache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L1ITagArrayRead) {
+ L1Icache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L1ITagArrayWrite) {
+ L1Icache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ } else if (request_type == RequestType:L2DataArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L2DataArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L2TagArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L2TagArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L2DataArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L2DataArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L2TagArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L2TagArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D0DataArrayRead) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D0DataArrayWrite) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D0TagArrayRead) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D0TagArrayWrite) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D1DataArrayRead) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D1DataArrayWrite) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D1TagArrayRead) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D1TagArrayWrite) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1IDataArrayRead) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1IDataArrayWrite) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1ITagArrayRead) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1ITagArrayWrite) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+
+ } else {
+ return true;
+ }
+ }
+
+ // END INTERNAL FUNCTIONS
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, CPURequestMsg, requestFromCore);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCore);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
+
+ // ** IN_PORTS **
+
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, block_on="addr") {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == TriggerType:L2_to_L1) {
+ if (in_msg.Dest == CacheId:L1I) {
+ trigger(Event:L2_to_L1I, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Dest == CacheId:L1D0) {
+ trigger(Event:L2_to_L1D0, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Dest == CacheId:L1D1) {
+ trigger(Event:L2_to_L1D1, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("unexpected trigger dest");
+ }
+ }
+ }
+ }
+ }
+
+
+ in_port(probeNetwork_in, NBProbeRequestMsg, probeToCore) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, NBProbeRequestMsg, block_on="addr") {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ assert(in_msg.ReturnData);
+ trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+
+ // ResponseNetwork
+ in_port(responseToCore_in, ResponseMsg, responseToCore) {
+ if (responseToCore_in.isReady(clockEdge())) {
+ peek(responseToCore_in, ResponseMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == CoherenceResponseType:NBSysResp) {
+ if (in_msg.State == CoherenceState:Modified) {
+ trigger(Event:NB_AckM, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.State == CoherenceState:Shared) {
+ trigger(Event:NB_AckS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.State == CoherenceState:Exclusive) {
+ trigger(Event:NB_AckE, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
+ trigger(Event:NB_AckWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ // Nothing from the Unblock Network
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+
+ Entry cache_entry := getCacheEntry(in_msg.LineAddress);
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
+
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // FETCH ACCESS
+
+ if (L1Icache.isTagPresent(in_msg.LineAddress)) {
+ if (mod(in_msg.contextId, 2) == 0) {
+ trigger(Event:Ifetch0_L1hit, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ trigger(Event:Ifetch1_L1hit, in_msg.LineAddress, cache_entry, tbe);
+ }
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ if (presentOrAvailI(in_msg.LineAddress)) {
+ if (mod(in_msg.contextId, 2) == 0) {
+ trigger(Event:Ifetch0_L1miss, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ trigger(Event:Ifetch1_L1miss, in_msg.LineAddress, cache_entry,
+ tbe);
+ }
+ } else {
+ // Check if the line we want to evict is not locked
+ Addr victim := L1Icache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, victim);
+ trigger(Event:L1I_Repl, victim,
+ getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else { // Not present or avail in L2
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ } else {
+ // DATA ACCESS
+ if (mod(in_msg.contextId, 2) == 1) {
+ if (L1D1cache.isTagPresent(in_msg.LineAddress)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C1_Load_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ // Stores must write through, make sure L2 avail.
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ trigger(Event:C1_Store_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ if (presentOrAvailD1(in_msg.LineAddress)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C1_Load_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:C1_Store_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ }
+ } else {
+ // Check if the line we want to evict is not locked
+ Addr victim := L1D1cache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, victim);
+ trigger(Event:L1D1_Repl, victim,
+ getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else { // not present or avail in L2
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ }
+ } else {
+ Entry L1D0cache_entry := getL1CacheEntry(in_msg.LineAddress, 0);
+ if (is_valid(L1D0cache_entry)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C0_Load_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ trigger(Event:C0_Store_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ if (presentOrAvailD0(in_msg.LineAddress)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C0_Load_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:C0_Store_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ }
+ } else {
+ // Check if the line we want to evict is not locked
+ Addr victim := L1D0cache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, victim);
+ trigger(Event:L1D0_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+ action(ii_invIcache, "ii", desc="invalidate iCache") {
+ if (L1Icache.isTagPresent(address)) {
+ L1Icache.deallocate(address);
+ }
+ }
+
+ action(i0_invCluster, "i0", desc="invalidate cluster 0") {
+ if (L1D0cache.isTagPresent(address)) {
+ L1D0cache.deallocate(address);
+ }
+ }
+
+ action(i1_invCluster, "i1", desc="invalidate cluster 1") {
+ if (L1D1cache.isTagPresent(address)) {
+ L1D1cache.deallocate(address);
+ }
+ }
+
+ action(ib_invBothClusters, "ib", desc="invalidate both clusters") {
+ if (L1D0cache.isTagPresent(address)) {
+ L1D0cache.deallocate(address);
+ }
+ if (L1D1cache.isTagPresent(address)) {
+ L1D1cache.deallocate(address);
+ }
+ }
+
+ action(i2_invL2, "i2", desc="invalidate L2") {
+ if(is_valid(cache_entry)) {
+ L2cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(mru_setMRU, "mru", desc="Update LRU state") {
+ L2cache.setMRU(address);
+ }
+
+ action(mruD1_setD1cacheMRU, "mruD1", desc="Update LRU state") {
+ L1D1cache.setMRU(address);
+ }
+
+ action(mruD0_setD0cacheMRU, "mruD0", desc="Update LRU state") {
+ L1D0cache.setMRU(address);
+ }
+
+ action(mruI_setIcacheMRU, "mruI", desc="Update LRU state") {
+ L1Icache.setMRU(address);
+ }
+
+ action(n_issueRdBlk, "n", desc="Issue RdBlk") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlk;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ DPRINTF(RubySlicc,"%s\n",out_msg.Destination);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkM;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(vd_victim, "vd", desc="Victimize M/O L2 Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ assert(is_valid(cache_entry));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicDirty;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:O) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ }
+ }
+
+ action(vc_victim, "vc", desc="Victimize E/S L2 Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicClean;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:S) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ }
+ }
+
+ action(a0_allocateL1D, "a0", desc="Allocate L1D0 Block") {
+ if (L1D0cache.isTagPresent(address) == false) {
+ L1D0cache.allocateVoid(address, new Entry);
+ }
+ }
+
+ action(a1_allocateL1D, "a1", desc="Allocate L1D1 Block") {
+ if (L1D1cache.isTagPresent(address) == false) {
+ L1D1cache.allocateVoid(address, new Entry);
+ }
+ }
+
+ action(ai_allocateL1I, "ai", desc="Allocate L1I Block") {
+ if (L1Icache.isTagPresent(address) == false) {
+ L1Icache.allocateVoid(address, new Entry);
+ }
+ }
+
+ action(a2_allocateL2, "a2", desc="Allocate L2 Block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.Shared := false;
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
+ responseToCore_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="Pop Trigger Queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(il0_loadDone, "il0", desc="Cluster 0 i load done") {
+ Entry entry := getICacheEntry(address);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(il1_loadDone, "il1", desc="Cluster 1 i load done") {
+ Entry entry := getICacheEntry(address);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(l0_loadDone, "l0", desc="Cluster 0 load done") {
+ Entry entry := getL1CacheEntry(address, 0);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(l1_loadDone, "l1", desc="Cluster 1 load done") {
+ Entry entry := getL1CacheEntry(address, 1);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(xl0_loadDone, "xl0", desc="Cluster 0 load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ DPRINTF(ProtocolTrace, "CP Load Done 0 -- address %s, data: %s\n", address, l2entry.DataBlk);
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(xl1_loadDone, "xl1", desc="Cluster 1 load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(xi0_loadDone, "xi0", desc="Cluster 0 i-load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(xi1_loadDone, "xi1", desc="Cluster 1 i-load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(s0_storeDone, "s0", desc="Cluster 0 store done") {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ sequencer.writeCallback(address,
+ cache_entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ cache_entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+
+ action(s1_storeDone, "s1", desc="Cluster 1 store done") {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ sequencer1.writeCallback(address,
+ cache_entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ cache_entry.Dirty := true;
+ entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+
+ action(xs0_storeDone, "xs0", desc="Cluster 0 store done") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ sequencer.writeCallback(address,
+ cache_entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ cache_entry.Dirty := true;
+ entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+ }
+
+ action(xs1_storeDone, "xs1", desc="Cluster 1 store done") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ sequencer1.writeCallback(address,
+ cache_entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ cache_entry.Dirty := true;
+ entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+ }
+
+ action(forward_eviction_to_cpu0, "fec0", desc="sends eviction information to processor0") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(forward_eviction_to_cpu1, "fec1", desc="sends eviction information to processor1") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
+ sequencer1.evictionCallback(address);
+ }
+ }
+
+ action(ci_copyL2ToL1, "ci", desc="copy L2 data to L1") {
+ Entry entry := getICacheEntry(address);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.Dirty := cache_entry.Dirty;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.FromL2 := true;
+ }
+
+ action(c0_copyL2ToL1, "c0", desc="copy L2 data to L1") {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.Dirty := cache_entry.Dirty;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.FromL2 := true;
+ }
+
+ action(c1_copyL2ToL1, "c1", desc="copy L2 data to L1") {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.Dirty := cache_entry.Dirty;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.FromL2 := true;
+ }
+
+ action(fi_L2ToL1, "fi", desc="L2 to L1 inst fill") {
+ enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L2_to_L1;
+ out_msg.Dest := CacheId:L1I;
+ }
+ }
+
+ action(f0_L2ToL1, "f0", desc="L2 to L1 data fill") {
+ enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L2_to_L1;
+ out_msg.Dest := CacheId:L1D0;
+ }
+ }
+
+ action(f1_L2ToL1, "f1", desc="L2 to L1 data fill") {
+ enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L2_to_L1;
+ out_msg.Dest := CacheId:L1D1;
+ }
+ }
+
+ action(wi_writeIcache, "wi", desc="write data to icache (and l2)") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getICacheEntry(address);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.DataBlk := in_msg.DataBlk;
+ entry.Dirty := in_msg.Dirty;
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(w0_writeDcache, "w0", desc="write data to dcache 0 (and l2)") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ DPRINTF(ProtocolTrace, "CP writeD0: address %s, data: %s\n", address, in_msg.DataBlk);
+ entry.DataBlk := in_msg.DataBlk;
+ entry.Dirty := in_msg.Dirty;
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(w1_writeDcache, "w1", desc="write data to dcache 1 (and l2)") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.DataBlk := in_msg.DataBlk;
+ entry.Dirty := in_msg.Dirty;
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
+ peek(responseToCore_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:StaleNotif;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(wb_data, "wb", desc="write back data") {
+ peek(responseToCore_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUData;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Shared) {
+ out_msg.NbReqShared := true;
+ } else {
+ out_msg.NbReqShared := false;
+ }
+ out_msg.State := CoherenceState:Shared; // faux info
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Ntsl := true;
+ out_msg.Hit := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ph_sendProbeResponseHit, "ph", desc="send probe ack PrbShrData, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ assert(addressInCore(address) || is_valid(tbe));
+ out_msg.Dirty := false; // only true if sending back data i think
+ out_msg.Hit := true;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pb_sendProbeResponseBackprobe, "pb", desc="send probe ack PrbShrData, no data, check for L1 residence") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ if (addressInCore(address)) {
+ out_msg.Hit := true;
+ } else {
+ out_msg.Hit := false;
+ }
+ out_msg.Dirty := false; // not sending back data, so def. not dirty
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ assert(tbe.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(s_setSharedFlip, "s", desc="hit by shared probe, status may be different") {
+ assert(is_valid(tbe));
+ tbe.Shared := true;
+ }
+
+ action(uu_sendUnblock, "uu", desc="state changed, unblock") {
+ enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(l2m_profileMiss, "l2m", desc="l2m miss profile") {
+ ++L2cache.demand_misses;
+ }
+
+ action(l10m_profileMiss, "l10m", desc="l10m miss profile") {
+ ++L1D0cache.demand_misses;
+ }
+
+ action(l11m_profileMiss, "l11m", desc="l11m miss profile") {
+ ++L1D1cache.demand_misses;
+ }
+
+ action(l1im_profileMiss, "l1lm", desc="l1im miss profile") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
+ probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(xx_recycleResponseQueue, "xx", desc="recycle response queue") {
+ responseToCore_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ // END ACTIONS
+
+ // BEGIN TRANSITIONS
+
+ // transitions from base
+ transition(I, C0_Load_L1miss, I_E0S) {L1D0TagArrayRead, L2TagArrayRead} {
+ // track misses, if implemented
+ // since in I state, L2 miss as well
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ a2_allocateL2;
+ i1_invCluster;
+ ii_invIcache;
+ n_issueRdBlk;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, C1_Load_L1miss, I_E1S) {L1D1TagArrayRead, L2TagArrayRead} {
+ // track misses, if implemented
+ // since in I state, L2 miss as well
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ a2_allocateL2;
+ i0_invCluster;
+ ii_invIcache;
+ n_issueRdBlk;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch0_L1miss, S0) {L1ITagArrayRead,L2TagArrayRead} {
+ // track misses, if implemented
+ // L2 miss as well
+ l2m_profileMiss;
+ l1im_profileMiss;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
+ // track misses, if implemented
+ // L2 miss as well
+ l2m_profileMiss;
+ l1im_profileMiss;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, C0_Store_L1miss, I_M0) {L1D0TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ a2_allocateL2;
+ i1_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, C1_Store_L1miss, I_M1) {L1D0TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ a2_allocateL2;
+ i0_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, C0_Load_L1miss, S_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, C1_Load_L1miss, S_F1) {L1D1TagArrayRead,L2TagArrayRead, L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, Ifetch0_L1miss, Si_F0) {L1ITagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l1im_profileMiss;
+ ai_allocateL1I;
+ fi_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, Ifetch1_L1miss, Si_F1) {L1ITagArrayRead,L2TagArrayRead, L2DataArrayRead} {
+ l1im_profileMiss;
+ ai_allocateL1I;
+ fi_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition({S}, {C0_Store_L1hit, C0_Store_L1miss}, S_M0) {L1D0TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ mruD0_setD0cacheMRU;
+ i1_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition({S}, {C1_Store_L1hit, C1_Store_L1miss}, S_M1) {L1D1TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ mruD1_setD1cacheMRU;
+ i0_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, C0_Load_L1miss, Es_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
+ a0_allocateL1D;
+ l10m_profileMiss;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, C1_Load_L1miss, Es_F1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, Ifetch0_L1miss, S0) {L1ITagArrayRead, L1ITagArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ // THES SHOULD NOT BE INSTANTANEOUS BUT OH WELL FOR NOW
+ transition(Es, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone; // instantaneous L1/L2 dirty - no writethrough delay
+ mruD0_setD0cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
+ a1_allocateL1D;
+ i0_invCluster;
+ s1_storeDone;
+ mruD1_setD1cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, C0_Load_L1miss, E0_F) {L1D0TagArrayRead,L2TagArrayRead, L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, C1_Load_L1miss, E0_Es) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, Ifetch0_L1miss, S0) {L2TagArrayRead, L1ITagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i0_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i0_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a0_allocateL1D;
+ s0_storeDone;
+ mruD0_setD0cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, C1_Store_L1miss, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1TagArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ i0_invCluster;
+ s1_storeDone;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, C1_Load_L1miss, E1_F) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, C0_Load_L1miss, E1_Es) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l11m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i1_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, Ifetch0_L1miss, S0) {L2TagArrayRead, L1ITagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i1_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite} {
+ a1_allocateL1D;
+ s1_storeDone;
+ mruD1_setD1cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, C0_Store_L1miss, M0) {L1D0TagArrayRead, L2TagArrayRead, L2TagArrayWrite, L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition({O}, {C0_Store_L1hit, C0_Store_L1miss}, O_M0) {L1D0TagArrayRead,L2TagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue CtoD
+ l10m_profileMiss;
+ a0_allocateL1D;
+ mruD0_setD0cacheMRU;
+ i1_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition({O}, {C1_Store_L1hit, C1_Store_L1miss}, O_M1) {L1D1TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l11m_profileMiss;
+ a1_allocateL1D;
+ mruD1_setD1cacheMRU;
+ i0_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(O, C0_Load_L1miss, O_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(O, C1_Load_L1miss, O_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, C0_Load_L1miss, Ms_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, C1_Load_L1miss, Ms_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms, M0, M1, O}, Ifetch0_L1miss, MO_S0) {L1ITagArrayRead, L2DataArrayRead, L2TagArrayRead} {
+ l2m_profileMiss; // permissions miss
+ l1im_profileMiss;
+ ai_allocateL1I;
+ t_allocateTBE;
+ ib_invBothClusters;
+ vd_victim;
+// i2_invL2;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms, M0, M1, O}, Ifetch1_L1miss, MO_S1) {L1ITagArrayRead, L2TagArrayRead, L2DataArrayRead } {
+ l2m_profileMiss; // permissions miss
+ l1im_profileMiss;
+ ai_allocateL1I;
+ t_allocateTBE;
+ ib_invBothClusters;
+ vd_victim;
+// i2_invL2;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone;
+ mruD0_setD0cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a1_allocateL1D;
+ i0_invCluster;
+ s1_storeDone;
+ mruD1_setD1cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, C0_Load_L1miss, M0_F) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, C1_Load_L1miss, M0_Ms) {L2TagArrayRead, L2DataArrayRead,L1D0TagArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, {C0_Store_L1hit, C0_Store_L1miss}) {L1D0TagArrayRead,L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead} {
+ a0_allocateL1D;
+ s0_storeDone;
+ mruD0_setD0cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
+ a1_allocateL1D;
+ i0_invCluster;
+ s1_storeDone;
+ mruD1_setD1cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, C0_Load_L1miss, M1_Ms) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, C1_Load_L1miss, M1_F) {L1D1TagArrayRead,L2TagArrayRead, L2DataArrayRead} {
+ a1_allocateL1D;
+ f1_L2ToL1;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone;
+ mruD0_setD0cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, {C1_Store_L1hit, C1_Store_L1miss}) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayWrite} {
+ a1_allocateL1D;
+ s1_storeDone;
+ mruD1_setD1cacheMRU;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ // end transitions from base
+
+ // Begin simple hit transitions
+ transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es,
+ Ms_F1, M0_Ms}, C0_Load_L1hit) {L1D0TagArrayRead, L1D0DataArrayRead} {
+ // track hits, if implemented
+ l0_loadDone;
+ mruD0_setD0cacheMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es,
+ Ms_F0, M1_Ms}, C1_Load_L1hit) {L1D1TagArrayRead, L1D1DataArrayRead} {
+ // track hits, if implemented
+ l1_loadDone;
+ mruD1_setD1cacheMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, S_C, S_F0, S_F1, S_F}, Ifetch0_L1hit) {L1ITagArrayRead, L1IDataArrayRead} {
+ // track hits, if implemented
+ il0_loadDone;
+ mruI_setIcacheMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, S_C, S_F0, S_F1, S_F}, Ifetch1_L1hit) {L1ITagArrayRead, L1IDataArrayWrite} {
+ // track hits, if implemented
+ il1_loadDone;
+ mruI_setIcacheMRU;
+ p_popMandatoryQueue;
+ }
+
+ // end simple hit transitions
+
+ // Transitions from transient states
+
+ // recycles
+ transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
+ E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, C0_Load_L1hit) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M1,
+ O_M1, S0, S1, I_C, S0_C, S1_C, S_C}, C0_Load_L1miss) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
+ IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
+ E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, C1_Load_L1hit) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M0,
+ O_M0, S0, S1, I_C, S0_C, S1_C, S_C}, C1_Load_L1miss) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, {Ifetch0_L1hit, Ifetch1_L1hit}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES,
+ IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, ES_I, MO_I, S_F0, S_F1, S_F,
+ O_F0, O_F1, O_F, S_M0, S_M1, O_M0, O_M1, Es_F0, Es_F1, Es_F, E0_F,
+ E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, I_C,
+ S_C}, {Ifetch0_L1miss, Ifetch1_L1miss}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_E1S, IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, S_F1, O_F1,
+ Si_F0, Si_F1, S_M1, O_M1, S0, S1, Es_F1, E1_F, E0_Es, Ms_F1, M0_Ms,
+ M1_F, I_C, S0_C, S1_C, S_C}, {C0_Store_L1miss}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_E0S, IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1 S_F0, O_F0,
+ Si_F0, Si_F1, S_M0, O_M0, S0, S1, Es_F0, E0_F, E1_Es, Ms_F0, M0_F,
+ M1_Ms, I_C, S0_C, S1_C, S_C}, {C1_Store_L1miss}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M0, O_M0, Es_F0, Es_F1, Es_F, E0_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_Ms}, {C0_Store_L1hit}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M1,
+ O_M1, Es_F0, Es_F1, Es_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F,
+ M0_Ms, M1_F, M1_Ms}, {C1_Store_L1hit}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
+ E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, L1D0_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
+ IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
+ E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, L1D1_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, L1I_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({S_C, S0_C, S1_C, S0, S1, Si_F0, Si_F1, I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, S_M0, O_M0, S_M1, O_M1, Es_F0, Es_F1, Es_F, E0_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, MO_S0, MO_S1, IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, L2_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, {NB_AckS,
+ PrbInvData, PrbInv, PrbShrData}) {} {
+ yy_recycleProbeQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
+ }
+
+ transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES}, NB_AckE) {} {
+ xx_recycleResponseQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
+ }
+
+ transition({E0_Es, E1_F, Es_F1}, C0_Load_L1miss, Es_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(S_F1, C0_Load_L1miss, S_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(O_F1, C0_Load_L1miss, O_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms_F1, M0_Ms, M1_F}, C0_Load_L1miss, Ms_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M0, C1_Load_L1miss, I_M0Ms) {} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M1, C0_Load_L1miss, I_M1Ms) {} {
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M0, C1_Store_L1miss, I_M0M1) {} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M1, C0_Store_L1miss, I_M1M0) {} {
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ mru_setMRU;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_E0S, C1_Load_L1miss, I_ES) {} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_E1S, C0_Load_L1miss, I_ES) {} {
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition({E1_Es, E0_F, Es_F0}, C1_Load_L1miss, Es_F) {L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(S_F0, C1_Load_L1miss, S_F) {L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(O_F0, C1_Load_L1miss, O_F) {L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms_F0, M1_Ms, M0_F}, C1_Load_L1miss, Ms_F) { L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es, Ms_F1, M0_Ms}, L1D0_Repl) {L1D0TagArrayRead} {
+ i0_invCluster;
+ }
+
+ transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es, Ms_F0, M1_Ms}, L1D1_Repl) {L1D1TagArrayRead} {
+ i1_invCluster;
+ }
+
+ transition({S, S_C, S_F0, S_F1}, L1I_Repl) {L1ITagArrayRead} {
+ ii_invIcache;
+ }
+
+ transition({S, E0, E1, Es}, L2_Repl, ES_I) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead, L1D1TagArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ t_allocateTBE;
+ vc_victim;
+ ib_invBothClusters;
+ i2_invL2;
+ ii_invIcache;
+ }
+
+ transition({Ms, M0, M1, O}, L2_Repl, MO_I) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead, L1D1TagArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ t_allocateTBE;
+ vd_victim;
+ i2_invL2;
+ ib_invBothClusters; // nothing will happen for D0 on M1, vice versa
+ }
+
+ transition(S0, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ wi_writeIcache;
+ xi0_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S1, NB_AckS, S) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ wi_writeIcache;
+ xi1_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S0_C, NB_AckS, S_C) {L1D0DataArrayWrite,L2DataArrayWrite} {
+ wi_writeIcache;
+ xi0_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S1_C, NB_AckS, S_C) {L1D1DataArrayWrite, L2DataArrayWrite} {
+ wi_writeIcache;
+ xi1_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M0, NB_AckM, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xs0_storeDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w1_writeDcache;
+ xs1_storeDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ // THESE MO->M1 should not be instantaneous but oh well for now.
+ transition(I_M0M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xs0_storeDone;
+ uu_sendUnblock;
+ i0_invCluster;
+ s1_storeDone;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M1M0, NB_AckM, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w1_writeDcache;
+ xs1_storeDone;
+ uu_sendUnblock;
+ i1_invCluster;
+ s0_storeDone;
+ pr_popResponseQueue;
+ }
+
+ // Above shoudl be more like this, which has some latency to xfer to L1
+ transition(I_M0Ms, NB_AckM, M0_Ms) {L1D0DataArrayWrite,L2DataArrayWrite} {
+ w0_writeDcache;
+ xs0_storeDone;
+ uu_sendUnblock;
+ f1_L2ToL1;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M1Ms, NB_AckM, M1_Ms) {L1D1DataArrayWrite, L2DataArrayWrite} {
+ w1_writeDcache;
+ xs1_storeDone;
+ uu_sendUnblock;
+ f0_L2ToL1;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E0S, NB_AckE, E0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xl0_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E1S, NB_AckE, E1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w1_writeDcache;
+ xl1_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_ES, NB_AckE, Es) {L1D1DataArrayWrite, L1D1TagArrayWrite, L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite } {
+ w0_writeDcache;
+ xl0_loadDone;
+ w1_writeDcache;
+ xl1_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E0S, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xl0_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E1S, NB_AckS, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
+ w1_writeDcache;
+ xl1_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_ES, NB_AckS, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
+ w0_writeDcache;
+ xl0_loadDone;
+ w1_writeDcache;
+ xl1_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S_F0, L2_to_L1D0, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_F1, L2_to_L1D1, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Si_F0, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ ci_copyL2ToL1;
+ mru_setMRU;
+ il0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Si_F1, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ ci_copyL2ToL1;
+ mru_setMRU;
+ il1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_F, L2_to_L1D0, S_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_F, L2_to_L1D1, S_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F0, L2_to_L1D0, O) { L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F1, L2_to_L1D1, O) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F, L2_to_L1D0, O_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F, L2_to_L1D1, O_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M1_F, L2_to_L1D1, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M0_F, L2_to_L1D0, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F0, L2_to_L1D0, Ms) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F1, L2_to_L1D1, Ms) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F, L2_to_L1D0, Ms_F1) {L1D0DataArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F, L2_to_L1D1, Ms_F0) {L1IDataArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M1_Ms, L2_to_L1D0, Ms) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M0_Ms, L2_to_L1D1, Ms) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F0, L2_to_L1D0, Es) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F1, L2_to_L1D1, Es) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F, L2_to_L1D0, Es_F1) {L2TagArrayRead, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F, L2_to_L1D1, Es_F0) {L2TagArrayRead, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E0_F, L2_to_L1D0, E0) {L2TagArrayRead, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E1_F, L2_to_L1D1, E1) {L2TagArrayRead, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E1_Es, L2_to_L1D0, Es) {L2TagArrayRead, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ mru_setMRU;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E0_Es, L2_to_L1D1, Es) {L2TagArrayRead, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ mru_setMRU;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_E0S, L2_to_L1D0, I_E0S) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_E1S, L2_to_L1D1, I_E1S) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_ES, L2_to_L1D0, IF1_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_ES, L2_to_L1D1, IF0_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF0_ES, L2_to_L1D0, I_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF1_ES, L2_to_L1D1, I_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(F_S0, L2_to_L1I, S0) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(F_S1, L2_to_L1I, S1) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition({S_M0, O_M0}, NB_AckM, M0) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ mru_setMRU;
+ xs0_storeDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition({S_M1, O_M1}, NB_AckM, M1) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ mru_setMRU;
+ xs1_storeDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(MO_I, NB_AckWB, I) {L2TagArrayWrite} {
+ wb_data;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(ES_I, NB_AckWB, I) {L2TagArrayWrite} {
+ wb_data;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(MO_S0, NB_AckWB, S0) {L2TagArrayWrite} {
+ wb_data;
+ i2_invL2;
+ a2_allocateL2;
+ d_deallocateTBE; // FOO
+ nS_issueRdBlkS;
+ pr_popResponseQueue;
+ }
+
+ transition(MO_S1, NB_AckWB, S1) {L2TagArrayWrite} {
+ wb_data;
+ i2_invL2;
+ a2_allocateL2;
+ d_deallocateTBE; // FOO
+ nS_issueRdBlkS;
+ pr_popResponseQueue;
+ }
+
+ // Writeback cancel "ack"
+ transition(I_C, NB_AckWB, I) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(S0_C, NB_AckWB, S0) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ pr_popResponseQueue;
+ }
+
+ transition(S1_C, NB_AckWB, S1) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ pr_popResponseQueue;
+ }
+
+ transition(S_C, NB_AckWB, S) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ pr_popResponseQueue;
+ }
+
+ // Begin Probe Transitions
+
+ transition({Ms, M0, M1, O}, PrbInvData, I) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ i2_invL2;
+ ib_invBothClusters;
+ pp_popProbeQueue;
+ }
+
+ transition({Es, E0, E1, S, I}, PrbInvData, I) {L2TagArrayRead, L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ ib_invBothClusters;
+ ii_invIcache; // only relevant for S
+ pp_popProbeQueue;
+ }
+
+ transition(S_C, PrbInvData, I_C) {L2TagArrayWrite} {
+ t_allocateTBE;
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbInvData, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms, M0, M1, O, Es, E0, E1, S, I}, PrbInv, I) {L2TagArrayRead, L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2; // nothing will happen in I
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(S_C, PrbInv, I_C) {L2TagArrayWrite} {
+ t_allocateTBE;
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbInv, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms, M0, M1, O}, PrbShrData, O) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({Es, E0, E1, S}, PrbShrData, S) {L2TagArrayRead, L2TagArrayWrite} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition(S_C, PrbShrData) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({I, I_C}, PrbShrData) {L2TagArrayRead} {
+ pb_sendProbeResponseBackprobe;
+ pp_popProbeQueue;
+ }
+
+ transition({I_M0, I_E0S}, {PrbInv, PrbInvData}) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters; // must invalidate current data (only relevant for I_M0)
+ a0_allocateL1D; // but make sure there is room for incoming data when it arrives
+ pp_popProbeQueue;
+ }
+
+ transition({I_M1, I_E1S}, {PrbInv, PrbInvData}) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters; // must invalidate current data (only relevant for I_M1)
+ a1_allocateL1D; // but make sure there is room for incoming data when it arrives
+ pp_popProbeQueue;
+ }
+
+ transition({I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_ES}, {PrbInv, PrbInvData, PrbShrData}) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ a0_allocateL1D;
+ a1_allocateL1D;
+ pp_popProbeQueue;
+ }
+
+ transition({I_M0, I_E0S, I_M1, I_E1S}, PrbShrData) {} {
+ pb_sendProbeResponseBackprobe;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbInvData, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInvData, I_C) {} {
+ pdt_sendProbeResponseDataFromTBE;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInv, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbInv, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbShrData, ES_I) {} {
+ ph_sendProbeResponseHit;
+ s_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbShrData, MO_I) {} {
+ pdt_sendProbeResponseDataFromTBE;
+ s_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S0, PrbInvData, S0_C) {L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdt_sendProbeResponseDataFromTBE;
+ i2_invL2;
+ a2_allocateL2;
+ d_deallocateTBE;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S1, PrbInvData, S1_C) {L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdt_sendProbeResponseDataFromTBE;
+ i2_invL2;
+ a2_allocateL2;
+ d_deallocateTBE;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S0, PrbInv, S0_C) {L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ a2_allocateL2;
+ d_deallocateTBE;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S1, PrbInv, S1_C) {L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ a2_allocateL2;
+ d_deallocateTBE;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition({MO_S0, MO_S1}, PrbShrData) {} {
+ pdt_sendProbeResponseDataFromTBE;
+ s_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F0, Es_F0, E0_F, E1_Es}, {PrbInvData, PrbInv}, IF_E0S) {}{
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ // invalidate everything you've got
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ // but make sure you have room for what you need from the fill
+ a0_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F1, Es_F1, E1_F, E0_Es}, {PrbInvData, PrbInv}, IF_E1S) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ // invalidate everything you've got
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ // but make sure you have room for what you need from the fill
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F, Es_F}, {PrbInvData, PrbInv}, IF_ES) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ // invalidate everything you've got
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ // but make sure you have room for what you need from the fill
+ a0_allocateL1D;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition(Si_F0, {PrbInvData, PrbInv}, F_S0) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition(Si_F1, {PrbInvData, PrbInv}, F_S1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition({Es_F0, E0_F, E1_Es}, PrbShrData, S_F0) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({Es_F1, E1_F, E0_Es}, PrbShrData, S_F1) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition(Es_F, PrbShrData, S_F) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F0, S_F1, S_F, Si_F0, Si_F1}, PrbShrData) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition(S_M0, PrbInvData, I_M0) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition(O_M0, PrbInvData, I_M0) {L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdm_sendProbeResponseDataMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M0, O_M0}, {PrbInv}, I_M0) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition(S_M1, PrbInvData, I_M1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition(O_M1, PrbInvData, I_M1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdm_sendProbeResponseDataMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M1, O_M1}, {PrbInv}, I_M1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S0, S0_C}, {PrbInvData, PrbInv}) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S1, S1_C}, {PrbInvData, PrbInv}) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M0, S_M1}, PrbShrData) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({O_M0, O_M1}, PrbShrData) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({S0, S1, S0_C, S1_C}, PrbShrData) {} {
+ pb_sendProbeResponseBackprobe;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F0, M0_F, M1_Ms, O_F0}, PrbInvData, IF_E0S) { L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F1, M1_F, M0_Ms, O_F1}, PrbInvData, IF_E1S) {L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ ib_invBothClusters;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F, O_F}, PrbInvData, IF_ES) {L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F0, M0_F, M1_Ms, O_F0}, PrbInv, IF_E0S) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F1, M1_F, M0_Ms, O_F1}, PrbInv, IF_E1S) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F, O_F}, PrbInv, IF_ES) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F0, M0_F, M1_Ms}, PrbShrData, O_F0) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F1, M1_F, M0_Ms}, PrbShrData, O_F1) {} {
+ }
+
+ transition({Ms_F}, PrbShrData, O_F) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({O_F0, O_F1, O_F}, PrbShrData) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ // END TRANSITIONS
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:L3Cache, "L3")
+ : CacheMemory * L3cache;
+ WireBuffer * reqToDir;
+ WireBuffer * respToDir;
+ WireBuffer * l3UnblockToDir;
+ WireBuffer * reqToL3;
+ WireBuffer * probeToL3;
+ WireBuffer * respToL3;
+ Cycles l3_request_latency := 1;
+ Cycles l3_response_latency := 35;
+
+ // To the general response network
+ MessageBuffer * responseFromL3, network="To", virtual_network="2", ordered="false", vnet_type="response";
+
+ // From the general response network
+ MessageBuffer * responseToL3, network="From", virtual_network="2", ordered="false", vnet_type="response";
+
+{
+ // EVENTS
+ enumeration(Event, desc="L3 Events") {
+ // Requests coming from the Cores
+ RdBlk, desc="CPU RdBlk event";
+ RdBlkM, desc="CPU RdBlkM event";
+ RdBlkS, desc="CPU RdBlkS event";
+ CtoD, desc="Change to Dirty request";
+ WrVicBlk, desc="L2 Victim (dirty)";
+ WrVicBlkShared, desc="L2 Victim (dirty)";
+ ClVicBlk, desc="L2 Victim (clean)";
+ ClVicBlkShared, desc="L2 Victim (clean)";
+
+ CPUData, desc="WB data from CPU";
+ CPUDataShared, desc="WB data from CPU, NBReqShared 1";
+ StaleWB, desc="WB stale; no data";
+
+ L3_Repl, desc="L3 Replacement";
+
+ // Probes
+ PrbInvData, desc="Invalidating probe, return dirty data";
+ PrbInv, desc="Invalidating probe, no need to return data";
+ PrbShrData, desc="Downgrading probe, return data";
+
+ // Coming from Memory Controller
+ WBAck, desc="ack from memory";
+
+ CancelWB, desc="Cancel WB from L2";
+ }
+
+ // STATES
+ // Base States:
+ state_declaration(State, desc="L3 State", default="L3Cache_State_I") {
+ M, AccessPermission:Read_Write, desc="Modified"; // No other cache has copy, memory stale
+ O, AccessPermission:Read_Only, desc="Owned"; // Correct most recent copy, others may exist in S
+ E, AccessPermission:Read_Write, desc="Exclusive"; // Correct, most recent, and only copy (and == Memory)
+ S, AccessPermission:Read_Only, desc="Shared"; // Correct, most recent. If no one in O, then == Memory
+ I, AccessPermission:Invalid, desc="Invalid";
+
+ I_M, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
+ I_O, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
+ I_E, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
+ I_S, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
+ S_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to M";
+ S_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
+ S_E, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to E";
+ S_S, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to S";
+ E_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
+ E_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
+ E_E, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
+ E_S, AccessPermission:Busy, desc="Shared, received WrVicBlk, sent Ack, waiting for Data";
+ O_M, AccessPermission:Busy, desc="...";
+ O_O, AccessPermission:Busy, desc="...";
+ O_E, AccessPermission:Busy, desc="...";
+ O_S, AccessPermission:Busy, desc="...";
+ M_M, AccessPermission:Busy, desc="...";
+ M_O, AccessPermission:Busy, desc="...";
+ M_E, AccessPermission:Busy, desc="...";
+ M_S, AccessPermission:Busy, desc="...";
+ D_I, AccessPermission:Invalid, desc="drop WB data on the floor when receive";
+ MOD_I, AccessPermission:Busy, desc="drop WB data on the floor, waiting for WBAck from Mem";
+ MO_I, AccessPermission:Busy, desc="M or O, received L3_Repl, waiting for WBAck from Mem";
+ I_I, AccessPermission:Busy, desc="I_MO received L3_Repl";
+ I_CD, AccessPermission:Busy, desc="I_I received WBAck, now just waiting for CPUData";
+ I_C, AccessPermission:Invalid, desc="sent cancel, just waiting to receive mem wb ack so nothing gets confused";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+ // STRUCTURES
+
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff from memory?)";
+ DataBlock DataBlk, desc="Data for the block";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, desc="Is the data dirty?";
+ bool Shared, desc="Victim hit by shared probe";
+ MachineID From, desc="Waiting for writeback from...";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<L3Cache_TBE>", constructor="m_number_of_TBEs";
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ // FUNCTION DEFINITIONS
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L3cache.lookup(addr));
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ return getCacheEntry(addr).DataBlk;
+ }
+
+ bool presentOrAvail(Addr addr) {
+ return L3cache.isTagPresent(addr) || L3cache.cacheAvail(addr);
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes +
+ functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return L3Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return L3Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L3Cache_State_to_permission(state));
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ return true;
+ }
+
+
+ // OUT PORTS
+ out_port(requestNetwork_out, CPURequestMsg, reqToDir);
+ out_port(L3Resp_out, ResponseMsg, respToDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL3);
+ out_port(unblockNetwork_out, UnblockMsg, l3UnblockToDir);
+
+ // IN PORTS
+ in_port(NBResponse_in, ResponseMsg, respToL3) {
+ if (NBResponse_in.isReady(clockEdge())) {
+ peek(NBResponse_in, ResponseMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
+ trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ error("Error on NBResponse Type");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToL3) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:CPUData) {
+ if (in_msg.NbReqShared) {
+ trigger(Event:CPUDataShared, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:CPUData, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
+ trigger(Event:StaleWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ error("Error on NBResponse Type");
+ }
+ }
+ }
+ }
+
+ // probe network
+ in_port(probeNetwork_in, NBProbeRequestMsg, probeToL3) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ if (in_msg.ReturnData) {
+ trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Don't think I should get any of these");
+ }
+ }
+ }
+ }
+ }
+
+ // Request Network
+ in_port(requestNetwork_in, CPURequestMsg, reqToL3) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, CPURequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
+ trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (presentOrAvail(in_msg.addr)) {
+ if (in_msg.Shared) {
+ trigger(Event:ClVicBlkShared, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:ClVicBlk, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L3cache.cacheProbe(in_msg.addr);
+ trigger(Event:L3_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ if (presentOrAvail(in_msg.addr)) {
+ if (in_msg.Shared) {
+ trigger(Event:WrVicBlkShared, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L3cache.cacheProbe(in_msg.addr);
+ trigger(Event:L3_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else if (in_msg.Type == CoherenceRequestType:WrCancel) {
+ if (is_valid(tbe) && tbe.From == in_msg.Requestor) {
+ trigger(Event:CancelWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+ }
+ }
+ }
+ }
+
+ // BEGIN ACTIONS
+
+ action(i_invL3, "i", desc="invalidate L3 cache block") {
+ if (is_valid(cache_entry)) {
+ L3cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(rm_sendResponseM, "rm", desc="send Modified response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.State := CoherenceState:Modified;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(rs_sendResponseS, "rs", desc="send Shared response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.State := CoherenceState:Shared;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+
+ action(r_requestToMem, "r", desc="Miss in L3, pass on") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Shared := false; // unneeded for this request
+ out_msg.MessageSize := in_msg.MessageSize;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ if (is_valid(cache_entry)) {
+ tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
+ tbe.Dirty := cache_entry.Dirty;
+ }
+ tbe.From := machineID;
+ }
+
+ action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(vd_vicDirty, "vd", desc="Victimize dirty L3 data") {
+ enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:VicDirty;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(w_sendResponseWBAck, "w", desc="send WB Ack") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysWBAck;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ph_sendProbeResponseHit, "ph", desc="send probe ack, no data") {
+ enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := true;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pm_sendProbeResponseMiss, "pm", desc="send probe ack, no data") {
+ enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
+ enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
+ enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ assert(tbe.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.State := CoherenceState:NA;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(mc_cancelMemWriteback, "mc", desc="send writeback cancel to memory") {
+ enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WrCancel;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(a_allocateBlock, "a", desc="allocate L3 block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L3cache.allocate(address, new Entry));
+ }
+ }
+
+ action(d_writeData, "d", desc="write data to L3") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ cache_entry.DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Writing to L3: %s\n", in_msg);
+ }
+ }
+
+ action(rd_copyDataFromRequest, "rd", desc="write data to L3") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := true;
+ }
+ }
+
+ action(f_setFrom, "f", desc="set who WB is expected to come from") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ tbe.From := in_msg.Requestor;
+ }
+ }
+
+ action(rf_resetFrom, "rf", desc="reset From") {
+ tbe.From := machineID;
+ }
+
+ action(wb_data, "wb", desc="write back data") {
+ enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUData;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Shared) {
+ out_msg.NbReqShared := true;
+ } else {
+ out_msg.NbReqShared := false;
+ }
+ out_msg.State := CoherenceState:Shared; // faux info
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(wt_writeDataToTBE, "wt", desc="write WB data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(uu_sendUnblock, "uu", desc="state changed, unblock") {
+ enqueue(unblockNetwork_out, UnblockMsg, l3_request_latency) {
+ out_msg.addr := address;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
+ L3cache.setMRU(address);
+ }
+
+ action(p_popRequestQueue, "p", desc="pop request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pn_popNBResponseQueue, "pn", desc="pop NB response queue") {
+ NBResponse_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(zz_recycleRequestQueue, "\z", desc="recycle request queue") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+
+ // END ACTIONS
+
+ // BEGIN TRANSITIONS
+
+ // transitions from base
+
+ transition({I, I_C}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {TagArrayRead} {
+ r_requestToMem;
+ p_popRequestQueue;
+ }
+
+ transition(O, RdBlk ) {TagArrayRead, DataArrayRead} {
+ rs_sendResponseS;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+ transition(M, RdBlk, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+
+ transition(S, RdBlk) {TagArrayRead, DataArrayRead} {
+ rs_sendResponseS;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+ transition(E, RdBlk, S) {TagArrayRead, DataArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+
+ transition({M, O}, RdBlkS, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+
+ transition({E, S}, RdBlkS, S) {TagArrayRead, DataArrayRead, TagArrayWrite} {
+ rs_sendResponseS;
+ ut_updateTag;
+ p_popRequestQueue;
+ }
+
+ transition(M, RdBlkM, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ rm_sendResponseM;
+ i_invL3;
+ p_popRequestQueue;
+ }
+
+ transition({O, S}, {RdBlkM, CtoD}) {TagArrayRead} {
+ r_requestToMem; // can't handle this, just forward
+ p_popRequestQueue;
+ }
+
+ transition(E, RdBlkM, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ rm_sendResponseM;
+ i_invL3;
+ p_popRequestQueue;
+ }
+
+ transition({I}, WrVicBlk, I_M) {TagArrayRead, TagArrayWrite} {
+ a_allocateBlock;
+ t_allocateTBE;
+ f_setFrom;
+// rd_copyDataFromRequest;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(I_C, {WrVicBlk, WrVicBlkShared, ClVicBlk, ClVicBlkShared}) {} {
+ zz_recycleRequestQueue;
+ }
+
+ transition({I}, WrVicBlkShared, I_O) {TagArrayRead, TagArrayWrite} {
+ a_allocateBlock;
+ t_allocateTBE;
+ f_setFrom;
+// rd_copyDataFromRequest;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(S, WrVicBlkShared, S_O) {TagArrayRead, TagArrayWrite} {
+// rd_copyDataFromRequest;
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(S, WrVicBlk, S_M) {TagArrayRead, TagArrayWrite} { // should be technically not possible, but assume the data comes back with shared bit flipped
+// rd_copyDataFromRequest;
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(E, WrVicBlk, E_M) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(E, WrVicBlkShared, E_O) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(O, WrVicBlk, O_M) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(O, WrVicBlkShared, O_O) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(M, WrVicBlk, M_M) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(M, WrVicBlkShared, M_O) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition({I}, ClVicBlk, I_E) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ a_allocateBlock;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition({I}, ClVicBlkShared, I_S) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ a_allocateBlock;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(S, ClVicBlk, S_E) {TagArrayRead, TagArrayWrite} { // technically impossible, assume data comes back with shared bit flipped
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(S, ClVicBlkShared, S_S) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(E, ClVicBlk, E_E) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(E, ClVicBlkShared, E_S) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(O, ClVicBlk, O_E) {TagArrayRead, TagArrayWrite} { // technically impossible, but assume data comes back with shared bit flipped
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(O, ClVicBlkShared, O_S) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(M, ClVicBlk, M_E) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(M, ClVicBlkShared, M_S) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition({MO_I}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {} {
+ r_requestToMem;
+ p_popRequestQueue;
+ }
+
+ transition(MO_I, {WrVicBlkShared, WrVicBlk, ClVicBlk, ClVicBlkShared}, MOD_I) {TagArrayWrite} {
+ f_setFrom;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(I_M, CPUData, M) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_O, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E, CPUData, E) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E, CPUDataShared, S) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(I_S, {CPUData, CPUDataShared}, S) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ pr_popResponseQueue;
+ }
+
+ transition(S_M, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(S_O, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(S_E, CPUDataShared, S) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(S_S, {CPUData, CPUDataShared}, S) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(O_E, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition(O_S, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ d_writeData;
+ ut_updateTag; // update tag on writeback hits.
+ pr_popResponseQueue;
+ }
+
+ transition({D_I}, {CPUData, CPUDataShared}, I) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(MOD_I, {CPUData, CPUDataShared}, MO_I) {TagArrayWrite} {
+ uu_sendUnblock;
+ rf_resetFrom;
+ pr_popResponseQueue;
+ }
+
+ transition(I_I, {CPUData, CPUDataShared}, MO_I) {TagArrayWrite, DataArrayRead} {
+ uu_sendUnblock;
+ wt_writeDataToTBE;
+ rf_resetFrom;
+ pr_popResponseQueue;
+ }
+
+ transition(I_CD, {CPUData, CPUDataShared}, I) {DataArrayRead, TagArrayWrite} {
+ uu_sendUnblock;
+ wt_writeDataToTBE;
+ wb_data;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition({M, O}, L3_Repl, MO_I) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ vd_vicDirty;
+ i_invL3;
+ }
+
+ transition({E, S,}, L3_Repl, I) {TagArrayRead, TagArrayWrite} {
+ i_invL3;
+ }
+
+ transition({I_M, I_O, S_M, S_O, E_M, E_O}, L3_Repl) {} {
+ zz_recycleRequestQueue;
+ }
+
+ transition({O_M, O_O, O_E, O_S, M_M, M_O, M_E, M_S}, L3_Repl) {} {
+ zz_recycleRequestQueue;
+ }
+
+ transition({I_E, I_S, S_E, S_S, E_E, E_S}, L3_Repl) {} {
+ zz_recycleRequestQueue;
+ }
+
+ transition({M, O}, PrbInvData, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
+ pd_sendProbeResponseData;
+ i_invL3;
+ pp_popProbeQueue;
+ }
+
+ transition({E, S, I}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ i_invL3; // nothing will happen in I
+ pp_popProbeQueue;
+ }
+
+ transition({M, O, E, S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ i_invL3; // nothing will happen in I
+ pp_popProbeQueue;
+ }
+
+ transition({M, O}, PrbShrData, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({E, S}, PrbShrData, S) {TagArrayRead, TagArrayWrite} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition(I, PrbShrData) {TagArrayRead} {
+ pm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInvData, I_C) {TagArrayWrite, DataArrayRead} {
+ pdt_sendProbeResponseDataFromTBE;
+ mc_cancelMemWriteback;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInv, I_C) {TagArrayWrite} {
+ pi_sendProbeResponseInv;
+ mc_cancelMemWriteback;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbShrData) {DataArrayRead} {
+ pdt_sendProbeResponseDataFromTBE;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, {PrbInvData, PrbInv}) {} {
+ pi_sendProbeResponseInv;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbShrData) {} {
+ pm_sendProbeResponseMiss;
+ pp_popProbeQueue;
+ }
+
+ transition(I_I, {WBAck}, I_CD) {TagArrayWrite} {
+ pn_popNBResponseQueue;
+ }
+
+ transition(MOD_I, WBAck, D_I) {DataArrayRead} {
+ wb_data;
+ pn_popNBResponseQueue;
+ }
+
+ transition(MO_I, WBAck, I) {DataArrayRead, TagArrayWrite} {
+ wb_data;
+ dt_deallocateTBE;
+ pn_popNBResponseQueue;
+ }
+
+ transition(I_C, {WBAck}, I) {TagArrayWrite} {
+ dt_deallocateTBE;
+ pn_popNBResponseQueue;
+ }
+
+ transition({I_M, I_O, I_E, I_S}, CancelWB, I) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ i_invL3;
+ p_popRequestQueue;
+ }
+
+ transition({S_S, S_O, S_M, S_E}, CancelWB, S) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition({E_M, E_O, E_E, E_S}, CancelWB, E) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition({O_M, O_O, O_E, O_S}, CancelWB, O) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition({M_M, M_O, M_E, M_S}, CancelWB, M) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(D_I, CancelWB, I) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(MOD_I, CancelWB, MO_I) {TagArrayWrite} {
+ uu_sendUnblock;
+ rf_resetFrom;
+ p_popRequestQueue;
+ }
+
+ transition(I_I, CancelWB, I_C) {TagArrayWrite} {
+ uu_sendUnblock;
+ rf_resetFrom;
+ mc_cancelMemWriteback;
+ p_popRequestQueue;
+ }
+
+ transition(I_CD, CancelWB, I) {TagArrayWrite} {
+ uu_sendUnblock;
+ dt_deallocateTBE;
+ mc_cancelMemWriteback;
+ p_popRequestQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:CorePair, "CP-like Core Coherence")
+ : Sequencer * sequencer;
+ Sequencer * sequencer1;
+ CacheMemory * L1Icache;
+ CacheMemory * L1D0cache;
+ CacheMemory * L1D1cache;
+ CacheMemory * L2cache;
+ int regionBufferNum;
+ bool send_evictions := "False";
+ Cycles issue_latency := 5;
+ Cycles l2_hit_latency := 18;
+
+ // BEGIN Core Buffers
+
+ // To the Network
+ MessageBuffer * requestFromCore, network="To", virtual_network="0", ordered="true", vnet_type="request";
+ MessageBuffer * responseFromCore, network="To", virtual_network="2", ordered="false", vnet_type="response";
+ MessageBuffer * unblockFromCore, network="To", virtual_network="4", ordered="false", vnet_type="unblock";
+
+ // From the Network
+ MessageBuffer * probeToCore, network="From", virtual_network="0", ordered="false", vnet_type="request";
+ MessageBuffer * responseToCore, network="From", virtual_network="2", ordered="false", vnet_type="response";
+
+ MessageBuffer * mandatoryQueue, ordered="false";
+ MessageBuffer * triggerQueue, ordered="true";
+
+ // END Core Buffers
+
+{
+ // BEGIN STATES
+ state_declaration(State, desc="Cache states", default="CorePair_State_I") {
+
+ I, AccessPermission:Invalid, desc="Invalid";
+ S, AccessPermission:Read_Only, desc="Shared";
+ E0, AccessPermission:Read_Write, desc="Exclusive with Cluster 0 ownership";
+ E1, AccessPermission:Read_Write, desc="Exclusive with Cluster 1 ownership";
+ Es, AccessPermission:Read_Write, desc="Exclusive in core";
+ O, AccessPermission:Read_Only, desc="Owner state in core, both clusters and other cores may be sharing line";
+ Ms, AccessPermission:Read_Write, desc="Modified in core, both clusters may be sharing line";
+ M0, AccessPermission:Read_Write, desc="Modified with cluster ownership";
+ M1, AccessPermission:Read_Write, desc="Modified with cluster ownership";
+
+ // Transient States
+ I_M0, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
+ I_M1, AccessPermission:Busy, desc="Invalid, issued RdBlkM, have not seen response yet";
+ I_M0M1, AccessPermission:Busy, desc="Was in I_M0, got a store request from other cluster as well";
+ I_M1M0, AccessPermission:Busy, desc="Was in I_M1, got a store request from other cluster as well";
+ I_M0Ms, AccessPermission:Busy, desc="Was in I_M0, got a load request from other cluster as well";
+ I_M1Ms, AccessPermission:Busy, desc="Was in I_M1, got a load request from other cluster as well";
+ I_E0S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
+ I_E1S, AccessPermission:Busy, desc="Invalid, issued RdBlk, have not seen response yet";
+ I_ES, AccessPermission:Busy, desc="S_F got hit by invalidating probe, RdBlk response needs to go to both clusters";
+
+ IF_E0S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E0S but expecting a L2_to_L1D0 trigger, just drop when receive";
+ IF_E1S, AccessPermission:Busy, desc="something got hit with Probe Invalidate, now just I_E1S but expecting a L2_to_L1D1 trigger, just drop when receive";
+ IF_ES, AccessPermission:Busy, desc="same, but waiting for two fills";
+ IF0_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
+ IF1_ES, AccessPermission:Busy, desc="same, but waiting for two fills, got one";
+ F_S0, AccessPermission:Busy, desc="same, but going to S0 when trigger received";
+ F_S1, AccessPermission:Busy, desc="same, but going to S1 when trigger received";
+
+ ES_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for clean writeback ack";
+ MO_I, AccessPermission:Read_Only, desc="L2 replacement, waiting for dirty writeback ack";
+ MO_S0, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
+ MO_S1, AccessPermission:Read_Only, desc="M/O got Ifetch Miss, must write back first, then send RdBlkS";
+ S_F0, AccessPermission:Read_Only, desc="Shared, filling L1";
+ S_F1, AccessPermission:Read_Only, desc="Shared, filling L1";
+ S_F, AccessPermission:Read_Only, desc="Shared, filling L1";
+ O_F0, AccessPermission:Read_Only, desc="Owned, filling L1";
+ O_F1, AccessPermission:Read_Only, desc="Owned, filling L1";
+ O_F, AccessPermission:Read_Only, desc="Owned, filling L1";
+ Si_F0, AccessPermission:Read_Only, desc="Shared, filling icache";
+ Si_F1, AccessPermission:Read_Only, desc="Shared, filling icache";
+ S_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ S_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ O_M0, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ O_M1, AccessPermission:Read_Only, desc="Shared, issued CtoD, have not seen response yet";
+ S0, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 0, waiting for response";
+ S1, AccessPermission:Busy, desc="RdBlkS on behalf of cluster 1, waiting for response";
+
+ Es_F0, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
+ Es_F1, AccessPermission:Read_Write, desc="Es, Cluster read, filling";
+ Es_F, AccessPermission:Read_Write, desc="Es, other cluster read, filling";
+ E0_F, AccessPermission:Read_Write, desc="E0, cluster read, filling";
+ E1_F, AccessPermission:Read_Write, desc="...";
+ E0_Es, AccessPermission:Read_Write, desc="...";
+ E1_Es, AccessPermission:Read_Write, desc="...";
+ Ms_F0, AccessPermission:Read_Write, desc="...";
+ Ms_F1, AccessPermission:Read_Write, desc="...";
+ Ms_F, AccessPermission:Read_Write, desc="...";
+ M0_F, AccessPermission:Read_Write, desc="...";
+ M0_Ms, AccessPermission:Read_Write, desc="...";
+ M1_F, AccessPermission:Read_Write, desc="...";
+ M1_Ms, AccessPermission:Read_Write, desc="...";
+
+ I_C, AccessPermission:Invalid, desc="Invalid, but waiting for WBAck from NB from canceled writeback";
+ S0_C, AccessPermission:Busy, desc="MO_S0 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
+ S1_C, AccessPermission:Busy, desc="MO_S1 hit by invalidating probe, waiting for WBAck form NB for canceled WB";
+ S_C, AccessPermission:Busy, desc="S*_C got NB_AckS, still waiting for WBAck";
+
+ } // END STATES
+
+ // BEGIN EVENTS
+ enumeration(Event, desc="CP Events") {
+ // CP Initiated events
+ C0_Load_L1miss, desc="Cluster 0 load, L1 missed";
+ C0_Load_L1hit, desc="Cluster 0 load, L1 hit";
+ C1_Load_L1miss, desc="Cluster 1 load L1 missed";
+ C1_Load_L1hit, desc="Cluster 1 load L1 hit";
+ Ifetch0_L1hit, desc="Instruction fetch, hit in the L1";
+ Ifetch1_L1hit, desc="Instruction fetch, hit in the L1";
+ Ifetch0_L1miss, desc="Instruction fetch, missed in the L1";
+ Ifetch1_L1miss, desc="Instruction fetch, missed in the L1";
+ C0_Store_L1miss, desc="Cluster 0 store missed in L1";
+ C0_Store_L1hit, desc="Cluster 0 store hit in L1";
+ C1_Store_L1miss, desc="Cluster 1 store missed in L1";
+ C1_Store_L1hit, desc="Cluster 1 store hit in L1";
+ // NB Initiated events
+ NB_AckS, desc="NB Ack to Core Request";
+ NB_AckM, desc="NB Ack to Core Request";
+ NB_AckE, desc="NB Ack to Core Request";
+
+ NB_AckWB, desc="NB Ack for writeback";
+
+ // Memory System initiatied events
+ L1I_Repl, desc="Replace address from L1I"; // Presumed clean
+ L1D0_Repl, desc="Replace address from L1D0"; // Presumed clean
+ L1D1_Repl, desc="Replace address from L1D1"; // Presumed clean
+ L2_Repl, desc="Replace address from L2";
+
+ L2_to_L1D0, desc="L1 fill from L2";
+ L2_to_L1D1, desc="L1 fill from L2";
+ L2_to_L1I, desc="L1 fill from L2";
+
+ // Probe Events
+ PrbInvData, desc="probe, return O or M data";
+ PrbInvDataDemand, desc="probe, return O or M data. Demand request";
+ PrbInv, desc="probe, no need for data";
+ PrbShrData, desc="probe downgrade, return O or M data";
+ PrbShrDataDemand, desc="probe downgrade, return O or M data. Demand request";
+ ForceRepl, desc="probe from r-buf. Act as though a repl";
+ ForceDowngrade, desc="probe from r-buf. Act as though a repl";
+
+ } // END EVENTS
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ L1D0DataArrayRead, desc="Read the data array";
+ L1D0DataArrayWrite, desc="Write the data array";
+ L1D0TagArrayRead, desc="Read the data array";
+ L1D0TagArrayWrite, desc="Write the data array";
+ L1D1DataArrayRead, desc="Read the data array";
+ L1D1DataArrayWrite, desc="Write the data array";
+ L1D1TagArrayRead, desc="Read the data array";
+ L1D1TagArrayWrite, desc="Write the data array";
+ L1IDataArrayRead, desc="Read the data array";
+ L1IDataArrayWrite, desc="Write the data array";
+ L1ITagArrayRead, desc="Read the data array";
+ L1ITagArrayWrite, desc="Write the data array";
+ L2DataArrayRead, desc="Read the data array";
+ L2DataArrayWrite, desc="Write the data array";
+ L2TagArrayRead, desc="Read the data array";
+ L2TagArrayWrite, desc="Write the data array";
+ }
+
+
+ // BEGIN STRUCTURE DEFINITIONS
+
+
+ // Cache Entry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (diff than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ bool FromL2, default="false", desc="block just moved from L2";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
+ bool Shared, desc="Victim hit by shared probe";
+ bool AckNeeded, desc="True if need to ack r-dir";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<CorePair_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ // END STRUCTURE DEFINITIONS
+
+ // BEGIN INTERNAL FUNCTIONS
+
+ MachineID getPeer(MachineID mach) {
+ return createMachineID(MachineType:RegionBuffer, intToID(regionBufferNum));
+ }
+
+ bool addressInCore(Addr addr) {
+ return (L2cache.isTagPresent(addr) || L1Icache.isTagPresent(addr) || L1D0cache.isTagPresent(addr) || L1D1cache.isTagPresent(addr));
+ }
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
+ return L2cache_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return tbe.DataBlk;
+ } else {
+ return getCacheEntry(addr).DataBlk;
+ }
+ }
+
+ Entry getL1CacheEntry(Addr addr, int cluster), return_by_pointer="yes" {
+ if (cluster == 0) {
+ Entry L1D0_entry := static_cast(Entry, "pointer", L1D0cache.lookup(addr));
+ return L1D0_entry;
+ } else {
+ Entry L1D1_entry := static_cast(Entry, "pointer", L1D1cache.lookup(addr));
+ return L1D1_entry;
+ }
+ }
+
+ Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry c_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ return c_entry;
+ }
+
+ bool presentOrAvail2(Addr addr) {
+ return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
+ }
+
+ bool presentOrAvailI(Addr addr) {
+ return L1Icache.isTagPresent(addr) || L1Icache.cacheAvail(addr);
+ }
+
+ bool presentOrAvailD0(Addr addr) {
+ return L1D0cache.isTagPresent(addr) || L1D0cache.cacheAvail(addr);
+ }
+
+ bool presentOrAvailD1(Addr addr) {
+ return L1D1cache.isTagPresent(addr) || L1D1cache.cacheAvail(addr);
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ return CorePair_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return CorePair_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ bool isValid(Addr addr) {
+ AccessPermission perm := getAccessPermission(addr);
+ if (perm == AccessPermission:NotPresent ||
+ perm == AccessPermission:Invalid ||
+ perm == AccessPermission:Busy) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(CorePair_State_to_permission(state));
+ }
+ }
+
+ MachineType testAndClearLocalHit(Entry cache_entry) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.FromL2) {
+ cache_entry.FromL2 := false;
+ return MachineType:L2Cache;
+ } else {
+ return MachineType:L1Cache;
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L1D0DataArrayRead) {
+ L1D0cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L1D0DataArrayWrite) {
+ L1D0cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L1D0TagArrayRead) {
+ L1D0cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L1D0TagArrayWrite) {
+ L1D0cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ } else if (request_type == RequestType:L1D1DataArrayRead) {
+ L1D1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L1D1DataArrayWrite) {
+ L1D1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L1D1TagArrayRead) {
+ L1D1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L1D1TagArrayWrite) {
+ L1D1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ } else if (request_type == RequestType:L1IDataArrayRead) {
+ L1Icache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L1IDataArrayWrite) {
+ L1Icache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L1ITagArrayRead) {
+ L1Icache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L1ITagArrayWrite) {
+ L1Icache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ } else if (request_type == RequestType:L2DataArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L2DataArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L2TagArrayRead) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L2TagArrayWrite) {
+ L2cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L2DataArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L2DataArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L2TagArrayRead) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L2TagArrayWrite) {
+ return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D0DataArrayRead) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D0DataArrayWrite) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D0TagArrayRead) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D0TagArrayWrite) {
+ return L1D0cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D1DataArrayRead) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D1DataArrayWrite) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1D1TagArrayRead) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1D1TagArrayWrite) {
+ return L1D1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1IDataArrayRead) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1IDataArrayWrite) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L1ITagArrayRead) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L1ITagArrayWrite) {
+ return L1Icache.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ return true;
+ }
+ }
+
+ // END INTERNAL FUNCTIONS
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, CPURequestMsg, requestFromCore);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCore);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
+
+ // ** IN_PORTS **
+
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, block_on="addr") {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == TriggerType:L2_to_L1) {
+ if (in_msg.Dest == CacheId:L1I) {
+ trigger(Event:L2_to_L1I, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Dest == CacheId:L1D0) {
+ trigger(Event:L2_to_L1D0, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Dest == CacheId:L1D1) {
+ trigger(Event:L2_to_L1D1, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("unexpected trigger dest");
+ }
+ }
+ }
+ }
+ }
+
+
+ in_port(probeNetwork_in, NBProbeRequestMsg, probeToCore) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, NBProbeRequestMsg, block_on="addr") {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ if (in_msg.DemandRequest) {
+ trigger(Event:PrbInvDataDemand, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.ReturnData) {
+ trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ if (in_msg.DemandRequest) {
+ trigger(Event:PrbShrDataDemand, in_msg.addr, cache_entry, tbe);
+ } else {
+ assert(in_msg.ReturnData);
+ trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == ProbeRequestType:PrbRepl) {
+ trigger(Event:ForceRepl, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == ProbeRequestType:PrbRegDowngrade) {
+ trigger(Event:ForceDowngrade, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unknown probe request");
+ }
+ }
+ }
+ }
+
+
+ // ResponseNetwork
+ in_port(responseToCore_in, ResponseMsg, responseToCore) {
+ if (responseToCore_in.isReady(clockEdge())) {
+ peek(responseToCore_in, ResponseMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+
+ if (in_msg.Type == CoherenceResponseType:NBSysResp) {
+ if (in_msg.State == CoherenceState:Modified) {
+ trigger(Event:NB_AckM, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.State == CoherenceState:Shared) {
+ trigger(Event:NB_AckS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.State == CoherenceState:Exclusive) {
+ trigger(Event:NB_AckE, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
+ trigger(Event:NB_AckWB, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected Response Message to Core");
+ }
+ }
+ }
+ }
+
+ // Nothing from the Unblock Network
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+
+ Entry cache_entry := getCacheEntry(in_msg.LineAddress);
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
+
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // FETCH ACCESS
+
+ if (L1Icache.isTagPresent(in_msg.LineAddress)) {
+ if (mod(in_msg.contextId, 2) == 0) {
+ trigger(Event:Ifetch0_L1hit, in_msg.LineAddress, cache_entry, tbe);
+ } else {
+ trigger(Event:Ifetch1_L1hit, in_msg.LineAddress, cache_entry, tbe);
+ }
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ if (presentOrAvailI(in_msg.LineAddress)) {
+ if (mod(in_msg.contextId, 2) == 0) {
+ trigger(Event:Ifetch0_L1miss, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ trigger(Event:Ifetch1_L1miss, in_msg.LineAddress, cache_entry,
+ tbe);
+ }
+ } else {
+ Addr victim := L1Icache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L1I_Repl, victim,
+ getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else { // Not present or avail in L2
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "Victim for %s L2_Repl(0) is %s\n", in_msg.LineAddress, victim);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ } else {
+ // DATA ACCESS
+ if (mod(in_msg.contextId, 2) == 1) {
+ if (L1D1cache.isTagPresent(in_msg.LineAddress)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C1_Load_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ // Stores must write through, make sure L2 avail.
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ trigger(Event:C1_Store_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "Victim for %s L2_Repl(1) is %s\n", in_msg.LineAddress, victim);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ if (presentOrAvailD1(in_msg.LineAddress)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C1_Load_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:C1_Store_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L1D1cache.cacheProbe(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "Victim for %s L1D1_Repl is %s\n", in_msg.LineAddress, victim);
+ trigger(Event:L1D1_Repl, victim,
+ getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ } else { // not present or avail in L2
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "Victim for %s L2_Repl(2) is %s\n", in_msg.LineAddress, victim);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
+ }
+ }
+ } else {
+ Entry L1D0cache_entry := getL1CacheEntry(in_msg.LineAddress, 0);
+ if (is_valid(L1D0cache_entry)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C0_Load_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ trigger(Event:C0_Store_L1hit, in_msg.LineAddress, cache_entry,
+ tbe);
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "Victim for %s L2_Repl(3) is %s\n", in_msg.LineAddress, victim);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ } else {
+ if (presentOrAvail2(in_msg.LineAddress)) {
+ if (presentOrAvailD0(in_msg.LineAddress)) {
+ if (in_msg.Type == RubyRequestType:LD) {
+ trigger(Event:C0_Load_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:C0_Store_L1miss, in_msg.LineAddress,
+ cache_entry, tbe);
+ }
+ } else {
+ Addr victim := L1D0cache.cacheProbe(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "Victim for %s L1D0_Repl is %s\n", in_msg.LineAddress, victim);
+ trigger(Event:L1D0_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ } else {
+ Addr victim := L2cache.cacheProbe(in_msg.LineAddress);
+ DPRINTF(RubySlicc, "Victim for %s L2_Repl(4) is %s\n", in_msg.LineAddress, victim);
+ trigger(Event:L2_Repl, victim, getCacheEntry(victim),
+ TBEs.lookup(victim));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+ action(ii_invIcache, "ii", desc="invalidate iCache") {
+ if (L1Icache.isTagPresent(address)) {
+ L1Icache.deallocate(address);
+ }
+ }
+
+ action(i0_invCluster, "i0", desc="invalidate cluster 0") {
+ if (L1D0cache.isTagPresent(address)) {
+ L1D0cache.deallocate(address);
+ }
+ }
+
+ action(i1_invCluster, "i1", desc="invalidate cluster 1") {
+ if (L1D1cache.isTagPresent(address)) {
+ L1D1cache.deallocate(address);
+ }
+ }
+
+ action(ib_invBothClusters, "ib", desc="invalidate both clusters") {
+ if (L1D0cache.isTagPresent(address)) {
+ L1D0cache.deallocate(address);
+ }
+ if (L1D1cache.isTagPresent(address)) {
+ L1D1cache.deallocate(address);
+ }
+ }
+
+ action(i2_invL2, "i2", desc="invalidate L2") {
+ if(is_valid(cache_entry)) {
+ L2cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(n_issueRdBlk, "n", desc="Issue RdBlk") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlk;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkM;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(nMs_issueRdBlkMSinked, "nMs", desc="Issue RdBlkM with CtoDSinked") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkM;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.CtoDSinked := true;
+ }
+ }
+
+ action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ action(nSs_issueRdBlkSSinked, "nSs", desc="Issue RdBlkS with CtoDSinked") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:RdBlkS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.CtoDSinked := true;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+
+ action(vd_victim, "vd", desc="Victimize M/O L2 Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ assert(is_valid(cache_entry));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicDirty;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:O) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ }
+ }
+
+ action(vc_victim, "vc", desc="Victimize E/S L2 Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicClean;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:S) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ }
+ }
+
+ // Could send these two directly to dir if we made a new out network on channel 0
+ action(vdf_victimForce, "vdf", desc="Victimize M/O L2 Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ assert(is_valid(cache_entry));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicDirty;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:O) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ out_msg.Private := true;
+ }
+ }
+
+ action(vcf_victimForce, "vcf", desc="Victimize E/S L2 Data") {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Type := CoherenceRequestType:VicClean;
+ out_msg.InitialRequestTime := curCycle();
+ if (cache_entry.CacheState == State:S) {
+ out_msg.Shared := true;
+ } else {
+ out_msg.Shared := false;
+ }
+ out_msg.Private := true;
+ }
+ }
+
+ action(a0_allocateL1D, "a0", desc="Allocate L1D0 Block") {
+ if (L1D0cache.isTagPresent(address) == false) {
+ L1D0cache.allocateVoid(address, new Entry);
+ }
+ }
+
+ action(a1_allocateL1D, "a1", desc="Allocate L1D1 Block") {
+ if (L1D1cache.isTagPresent(address) == false) {
+ L1D1cache.allocateVoid(address, new Entry);
+ }
+ }
+
+ action(ai_allocateL1I, "ai", desc="Allocate L1I Block") {
+ if (L1Icache.isTagPresent(address) == false) {
+ L1Icache.allocateVoid(address, new Entry);
+ }
+ }
+
+ action(a2_allocateL2, "a2", desc="Allocate L2 Block") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.Shared := false;
+ }
+
+ action(d_deallocateTBE, "d", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
+ responseToCore_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="Pop Trigger Queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="pop probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(il0_loadDone, "il0", desc="Cluster 0 i load done") {
+ Entry entry := getICacheEntry(address);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(il1_loadDone, "il1", desc="Cluster 1 i load done") {
+ Entry entry := getICacheEntry(address);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(l0_loadDone, "l0", desc="Cluster 0 load done") {
+ Entry entry := getL1CacheEntry(address, 0);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(l1_loadDone, "l1", desc="Cluster 1 load done") {
+ Entry entry := getL1CacheEntry(address, 1);
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ assert(is_valid(entry));
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ }
+
+ action(xl0_loadDone, "xl0", desc="Cluster 0 load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ DPRINTF(ProtocolTrace, "CP Load Done 0 -- address %s, data: %s\n",
+ address, l2entry.DataBlk);
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ assert(is_valid(l2entry));
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(xl1_loadDone, "xl1", desc="Cluster 1 load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ assert(is_valid(l2entry));
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(xi0_loadDone, "xi0", desc="Cluster 0 i-load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ assert(is_valid(l2entry));
+ sequencer.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(xi1_loadDone, "xi1", desc="Cluster 1 i-load done") {
+ peek(responseToCore_in, ResponseMsg) {
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ Entry l2entry := getCacheEntry(address); // Used for functional accesses
+ // L2 supplies data (functional accesses only look in L2, ok because L1
+ // writes through to L2)
+ assert(is_valid(l2entry));
+ sequencer1.readCallback(address,
+ l2entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ }
+ }
+
+ action(s0_storeDone, "s0", desc="Cluster 0 store done") {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ sequencer.writeCallback(address,
+ cache_entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ cache_entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+
+ action(s1_storeDone, "s1", desc="Cluster 1 store done") {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ sequencer1.writeCallback(address,
+ cache_entry.DataBlk,
+ true,
+ testAndClearLocalHit(entry));
+ cache_entry.Dirty := true;
+ entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+
+ action(xs0_storeDone, "xs0", desc="Cluster 0 store done") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ sequencer.writeCallback(address,
+ cache_entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ cache_entry.Dirty := true;
+ entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+ }
+
+ action(xs1_storeDone, "xs1", desc="Cluster 1 store done") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ assert((machineIDToMachineType(in_msg.Sender) == MachineType:Directory) ||
+ (machineIDToMachineType(in_msg.Sender) == MachineType:L3Cache));
+ sequencer1.writeCallback(address,
+ cache_entry.DataBlk,
+ false,
+ machineIDToMachineType(in_msg.Sender),
+ in_msg.InitialRequestTime,
+ in_msg.ForwardRequestTime,
+ in_msg.ProbeRequestStartTime);
+ cache_entry.Dirty := true;
+ entry.Dirty := true;
+ entry.DataBlk := cache_entry.DataBlk;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+ }
+
+ action(forward_eviction_to_cpu0, "fec0", desc="sends eviction information to processor0") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(forward_eviction_to_cpu1, "fec1", desc="sends eviction information to processor1") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
+ sequencer1.evictionCallback(address);
+ }
+ }
+
+ action(ci_copyL2ToL1, "ci", desc="copy L2 data to L1") {
+ Entry entry := getICacheEntry(address);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.Dirty := cache_entry.Dirty;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.FromL2 := true;
+ }
+
+ action(c0_copyL2ToL1, "c0", desc="copy L2 data to L1") {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.Dirty := cache_entry.Dirty;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.FromL2 := true;
+ }
+
+ action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
+ peek(responseToCore_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:StaleNotif;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(c1_copyL2ToL1, "c1", desc="copy L2 data to L1") {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.Dirty := cache_entry.Dirty;
+ entry.DataBlk := cache_entry.DataBlk;
+ entry.FromL2 := true;
+ }
+
+ action(fi_L2ToL1, "fi", desc="L2 to L1 inst fill") {
+ enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L2_to_L1;
+ out_msg.Dest := CacheId:L1I;
+ }
+ }
+
+ action(f0_L2ToL1, "f0", desc="L2 to L1 data fill") {
+ enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L2_to_L1;
+ out_msg.Dest := CacheId:L1D0;
+ }
+ }
+
+ action(f1_L2ToL1, "f1", desc="L2 to L1 data fill") {
+ enqueue(triggerQueue_out, TriggerMsg, l2_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L2_to_L1;
+ out_msg.Dest := CacheId:L1D1;
+ }
+ }
+
+ action(wi_writeIcache, "wi", desc="write data to icache (and l2)") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getICacheEntry(address);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.DataBlk := in_msg.DataBlk;
+ entry.Dirty := in_msg.Dirty;
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(w0_writeDcache, "w0", desc="write data to dcache 0 (and l2)") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 0);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.DataBlk := in_msg.DataBlk;
+ entry.Dirty := in_msg.Dirty;
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(w1_writeDcache, "w1", desc="write data to dcache 1 (and l2)") {
+ peek(responseToCore_in, ResponseMsg) {
+ Entry entry := getL1CacheEntry(address, 1);
+ assert(is_valid(entry));
+ assert(is_valid(cache_entry));
+ entry.DataBlk := in_msg.DataBlk;
+ entry.Dirty := in_msg.Dirty;
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(wb_data, "wb", desc="write back data") {
+ peek(responseToCore_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUData;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Shared) {
+ out_msg.NbReqShared := true;
+ } else {
+ out_msg.NbReqShared := false;
+ }
+ out_msg.State := CoherenceState:Shared; // faux info
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Hit := false;
+ out_msg.Ntsl := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false;
+ out_msg.Ntsl := true;
+ out_msg.Hit := false;
+ APPEND_TRANSITION_COMMENT("Setting Ms");
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(ph_sendProbeResponseHit, "ph", desc="send probe ack PrbShrData, no data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ assert(addressInCore(address) || is_valid(tbe));
+ out_msg.Dirty := false; // only true if sending back data i think
+ out_msg.Hit := true;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(pb_sendProbeResponseBackprobe, "pb", desc="send probe ack PrbShrData, no data, check for L1 residence") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ if (addressInCore(address)) {
+ out_msg.Hit := true;
+ } else {
+ out_msg.Hit := false;
+ }
+ out_msg.Dirty := false; // not sending back data, so def. not dirty
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ assert(cache_entry.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ APPEND_TRANSITION_COMMENT("Setting Ms");
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ assert(tbe.Dirty);
+ out_msg.Dirty := true;
+ out_msg.Hit := true;
+ out_msg.State := CoherenceState:NA;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.isValid := isValid(address);
+ }
+ }
+
+ action(ra_sendReplAck, "ra", desc="Send ack to r-buf that line is replaced if needed") {
+ if (is_invalid(tbe) || tbe.AckNeeded) {
+ enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:InvAck;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ APPEND_TRANSITION_COMMENT(" Sending ack to r-buf ");
+ } else {
+ APPEND_TRANSITION_COMMENT(" NOT Sending ack to r-buf ");
+ }
+ }
+
+ action(m_markAckNeeded, "m", desc="Mark TBE to send ack when deallocated") {
+ assert(is_valid(tbe));
+ tbe.AckNeeded := true;
+ }
+
+ action(mc_cancelWB, "mc", desc="send writeback cancel to L3") {
+ enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:CPUCancelWB;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(s_setSharedFlip, "s", desc="hit by shared probe, status may be different") {
+ assert(is_valid(tbe));
+ tbe.Shared := true;
+ }
+
+ action(uu_sendUnblock, "uu", desc="state changed, unblock") {
+ enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ out_msg.wasValid := isValid(address);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(sdv_sendDoneValid, "sdv", desc="Request finished, send done ack") {
+ enqueue(unblockNetwork_out, UnblockMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.DoneAck := true;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ } else if (is_valid(cache_entry)) {
+ out_msg.Dirty := cache_entry.Dirty;
+ } else {
+ out_msg.Dirty := false;
+ }
+ out_msg.validToInvalid := false;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(sdi_sendDoneInvalid, "sdi", desc="Request finished, send done ack") {
+ enqueue(unblockNetwork_out, UnblockMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Destination.add(getPeer(machineID));
+ out_msg.DoneAck := true;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ if (is_valid(tbe)) {
+ out_msg.Dirty := tbe.Dirty;
+ } else if (is_valid(cache_entry)) {
+ out_msg.Dirty := cache_entry.Dirty;
+ } else {
+ out_msg.Dirty := false;
+ }
+ out_msg.validToInvalid := true;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(l10m_profileMiss, "l10m", desc="l10m miss profile") {
+ ++L1D0cache.demand_misses;
+ }
+
+ action(l11m_profileMiss, "l11m", desc="l11m miss profile") {
+ ++L1D1cache.demand_misses;
+ }
+
+ action(l1im_profileMiss, "l1lm", desc="l1im miss profile") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(l2m_profileMiss, "l2m", desc="l2m miss profile") {
+ ++L2cache.demand_misses;
+ }
+
+ action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
+ probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+ // END ACTIONS
+
+ // BEGIN TRANSITIONS
+
+ // transitions from base
+ transition(I, C0_Load_L1miss, I_E0S) {L1D0TagArrayRead, L2TagArrayRead} {
+ // track misses, if implemented
+ // since in I state, L2 miss as well
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ l1im_profileMiss;
+ a2_allocateL2;
+ i1_invCluster;
+ ii_invIcache;
+ n_issueRdBlk;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, C1_Load_L1miss, I_E1S) {L1D1TagArrayRead, L2TagArrayRead} {
+ // track misses, if implemented
+ // since in I state, L2 miss as well
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ a2_allocateL2;
+ i0_invCluster;
+ ii_invIcache;
+ n_issueRdBlk;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch0_L1miss, S0) {L1ITagArrayRead, L2TagArrayRead} {
+ // track misses, if implemented
+ // L2 miss as well
+ l10m_profileMiss;
+ l2m_profileMiss;
+ l1im_profileMiss;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
+ l11m_profileMiss;
+ // track misses, if implemented
+ // L2 miss as well
+ l2m_profileMiss;
+ l1im_profileMiss;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, C0_Store_L1miss, I_M0) {L1D0TagArrayRead,L2TagArrayRead} {
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ a2_allocateL2;
+ i1_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(I, C1_Store_L1miss, I_M1) {L1D0TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ a2_allocateL2;
+ i0_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, C0_Load_L1miss, S_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, C1_Load_L1miss, S_F1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, Ifetch0_L1miss, Si_F0) {L1ITagArrayRead,L2TagArrayRead, L2DataArrayRead} {
+ l1im_profileMiss;
+ ai_allocateL1I;
+ fi_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(S, Ifetch1_L1miss, Si_F1) {L1ITagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l1im_profileMiss;
+ ai_allocateL1I;
+ fi_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({S}, {C0_Store_L1hit, C0_Store_L1miss}, S_M0) {L1D0TagArrayRead, L2TagArrayRead}{
+ l2m_profileMiss;
+ l10m_profileMiss;
+ a0_allocateL1D;
+ i1_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition({S}, {C1_Store_L1hit, C1_Store_L1miss}, S_M1) {L1D1TagArrayRead,L2TagArrayRead} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ i0_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+ transition(Es, C0_Load_L1miss, Es_F0) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, C1_Load_L1miss, Es_F1) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} { // can this be folded with S_F?
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, Ifetch0_L1miss, S0) {L1ITagArrayRead, L2TagArrayRead} {
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, Ifetch1_L1miss, S1) {L1ITagArrayRead, L2TagArrayRead} {
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ ib_invBothClusters;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ // THES SHOULD NOT BE INSTANTANEOUS BUT OH WELL FOR NOW
+ transition(Es, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayWrite,L1D0TagArrayRead, L2TagArrayRead, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone; // instantaneous L1/L2 dirty - no writethrough delay
+ p_popMandatoryQueue;
+ }
+
+ transition(Es, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
+ a1_allocateL1D;
+ i0_invCluster;
+ s1_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, C0_Load_L1miss, E0_F) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, C1_Load_L1miss, E0_Es) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, Ifetch0_L1miss, S0) {L2TagArrayRead, L1ITagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i0_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead } {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i0_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a0_allocateL1D;
+ s0_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(E0, C1_Store_L1miss, M1) {L1D0TagArrayRead, L1D0TagArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
+ a1_allocateL1D;
+ l11m_profileMiss;
+ i0_invCluster;
+ s1_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, C1_Load_L1miss, E1_F) {L1D1TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ a1_allocateL1D;
+ l11m_profileMiss;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, C0_Load_L1miss, E1_Es) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ a0_allocateL1D;
+ l10m_profileMiss;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, Ifetch1_L1miss, S1) {L2TagArrayRead, L1ITagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i1_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, Ifetch0_L1miss, S0) {L2TagArrayRead,L1ITagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l1im_profileMiss;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ i1_invCluster;
+ nS_issueRdBlkS;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a1_allocateL1D;
+ s1_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(E1, C0_Store_L1miss, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L2TagArrayRead, L2TagArrayWrite, L2DataArrayWrite} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({O}, {C0_Store_L1hit, C0_Store_L1miss}, O_M0) {L1D0TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue CtoD
+ l10m_profileMiss;
+ a0_allocateL1D;
+ i1_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition({O}, {C1_Store_L1hit, C1_Store_L1miss}, O_M1) {L1D1TagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss; // permissions miss, still issue RdBlkS
+ l11m_profileMiss;
+ a1_allocateL1D;
+ i0_invCluster;
+ ii_invIcache;
+ nM_issueRdBlkM;
+ p_popMandatoryQueue;
+ }
+
+ transition(O, C0_Load_L1miss, O_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(O, C1_Load_L1miss, O_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, C0_Load_L1miss, Ms_F0) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, C1_Load_L1miss, Ms_F1) {L2TagArrayRead, L2DataArrayRead, L1D1TagArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms, M0, M1, O}, Ifetch0_L1miss, MO_S0) {L1ITagArrayRead, L2TagArrayRead} {
+ l2m_profileMiss; // permissions miss
+ l1im_profileMiss;
+ ai_allocateL1I;
+ t_allocateTBE;
+ ib_invBothClusters;
+ vd_victim;
+// i2_invL2;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms, M0, M1, O}, Ifetch1_L1miss, MO_S1) {L1ITagArrayRead L2TagArrayRead } {
+ l2m_profileMiss; // permissions miss
+ l10m_profileMiss;
+ ai_allocateL1I;
+ t_allocateTBE;
+ ib_invBothClusters;
+ vd_victim;
+// i2_invL2;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(Ms, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D1TagArrayRead, L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a1_allocateL1D;
+ i0_invCluster;
+ s1_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, C0_Load_L1miss, M0_F) {L1D0TagArrayRead, L2TagArrayRead, L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, C1_Load_L1miss, M0_Ms) {L2TagArrayRead, L2DataArrayRead,L1D1TagArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, {C0_Store_L1hit, C0_Store_L1miss}) {L1D0TagArrayRead, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead} {
+ a0_allocateL1D;
+ s0_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(M0, {C1_Store_L1hit, C1_Store_L1miss}, M1) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
+ a1_allocateL1D;
+ i0_invCluster;
+ s1_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, C0_Load_L1miss, M1_Ms) {L2TagArrayRead, L2DataArrayRead, L1D0TagArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, C1_Load_L1miss, M1_F) {L1D1TagArrayRead L2TagArrayRead, L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, {C0_Store_L1hit, C0_Store_L1miss}, M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayRead, L2DataArrayWrite, L2TagArrayWrite} {
+ a0_allocateL1D;
+ i1_invCluster;
+ s0_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ transition(M1, {C1_Store_L1hit, C1_Store_L1miss}) {L1D1TagArrayRead, L1D1DataArrayWrite, L2TagArrayRead, L2DataArrayWrite} {
+ a1_allocateL1D;
+ s1_storeDone;
+ p_popMandatoryQueue;
+ }
+
+ // end transitions from base
+
+ // Begin simple hit transitions
+ transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es,
+ Ms_F1, M0_Ms}, C0_Load_L1hit) {L1D0TagArrayRead, L1D0DataArrayRead} {
+ // track hits, if implemented
+ l0_loadDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es,
+ Ms_F0, M1_Ms}, C1_Load_L1hit) {L1D1TagArrayRead, L1D1DataArrayRead} {
+ // track hits, if implemented
+ l1_loadDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, S_C, S_F0, S_F1, S_F}, Ifetch0_L1hit) {L1ITagArrayRead, L1IDataArrayRead} {
+ // track hits, if implemented
+ il0_loadDone;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, S_C, S_F0, S_F1, S_F}, Ifetch1_L1hit) {L1ITagArrayRead, L1IDataArrayWrite} {
+ // track hits, if implemented
+ il1_loadDone;
+ p_popMandatoryQueue;
+ }
+
+ // end simple hit transitions
+
+ // Transitions from transient states
+
+ // recycles
+ transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
+ E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, C0_Load_L1hit) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M1,
+ O_M1, S0, S1, I_C, S0_C, S1_C, S_C}, C0_Load_L1miss) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
+ IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
+ E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, C1_Load_L1hit) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, Si_F0, Si_F1, S_M0,
+ O_M0, S0, S1, I_C, S0_C, S1_C, S_C}, C1_Load_L1miss) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, {Ifetch0_L1hit, Ifetch1_L1hit}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES,
+ IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, ES_I, MO_I, S_F0, S_F1, S_F,
+ O_F0, O_F1, O_F, S_M0, S_M1, O_M0, O_M1, Es_F0, Es_F1, Es_F, E0_F,
+ E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, I_C,
+ S_C}, {Ifetch0_L1miss, Ifetch1_L1miss}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_E1S, IF_E1S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1, S_F1, O_F1,
+ Si_F0, Si_F1, S_M1, O_M1, S0, S1, Es_F1, E1_F, E0_Es, Ms_F1, M0_Ms,
+ M1_F, I_C, S0_C, S1_C, S_C}, {C0_Store_L1miss}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_E0S, IF_E0S, F_S0, F_S1, ES_I, MO_I, MO_S0, MO_S1 S_F0, O_F0,
+ Si_F0, Si_F1, S_M0, O_M0, S0, S1, Es_F0, E0_F, E1_Es, Ms_F0, M0_F,
+ M1_Ms, I_C, S0_C, S1_C, S_C}, {C1_Store_L1miss}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M0, O_M0, Es_F0, Es_F1, Es_F, E0_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_Ms}, {C0_Store_L1hit}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, Si_F0, Si_F1, S_M1,
+ O_M1, Es_F0, Es_F1, Es_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F,
+ M0_Ms, M1_F, M1_Ms}, {C1_Store_L1hit}) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M0, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_ES, IF_E0S, IF_ES,
+ IF0_ES, IF1_ES, S_F0, S_F, O_F0, O_F, S_M0, O_M0, Es_F0, Es_F, E0_F,
+ E1_Es, Ms_F0, Ms_F, M0_F, M1_Ms}, L1D0_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E1S, I_ES, IF_E1S, IF_ES,
+ IF0_ES, IF1_ES, S_F1, S_F, O_F1, O_F, S_M1, O_M1, Es_F1, Es_F, E1_F,
+ E0_Es, Ms_F1, Ms_F, M0_Ms, M1_F}, L1D1_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({F_S0, F_S1, MO_S0, MO_S1, Si_F0, Si_F1, S0, S1, S0_C, S1_C}, L1I_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({S_C, S0_C, S1_C, S0, S1, Si_F0, Si_F1, I_M0, I_M1, I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_E0S, I_E1S, I_ES, S_F0, S_F1, S_F, O_F0, O_F1, O_F, S_M0, O_M0, S_M1, O_M1, Es_F0, Es_F1, Es_F, E0_F, E1_F, E0_Es, E1_Es, Ms_F0, Ms_F1, Ms_F, M0_F, M0_Ms, M1_F, M1_Ms, MO_S0, MO_S1, IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, L2_Repl) {} {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES, F_S0, F_S1}, {NB_AckS,
+ PrbInvData, PrbInvDataDemand, PrbInv, PrbShrData, PrbShrDataDemand}) {} {
+ zz_recycleMandatoryQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
+ }
+
+ transition({IF_E0S, IF_E1S, IF_ES, IF0_ES, IF1_ES}, NB_AckE) {} {
+ zz_recycleMandatoryQueue; // these should be resolved soon, but I didn't want to add more states, though technically they could be solved now, and probes really could be solved but i don't think it's really necessary.
+ }
+
+ transition({E0_Es, E1_F, Es_F1}, C0_Load_L1miss, Es_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(S_F1, C0_Load_L1miss, S_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(O_F1, C0_Load_L1miss, O_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms_F1, M0_Ms, M1_F}, C0_Load_L1miss, Ms_F) {L2DataArrayRead} {
+ l10m_profileMiss;
+ a0_allocateL1D;
+ f0_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M0, C1_Load_L1miss, I_M0Ms){
+ l11m_profileMiss;
+ l2m_profileMiss;
+ a1_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M1, C0_Load_L1miss, I_M1Ms){
+ l10m_profileMiss;
+ l2m_profileMiss;
+ a0_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M0, C1_Store_L1miss, I_M0M1) {
+ l11m_profileMiss;
+ l2m_profileMiss;
+ a1_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_M1, C0_Store_L1miss, I_M1M0) {L1D0TagArrayRead, L1D0TagArrayWrite, L2TagArrayRead, L2TagArrayWrite} {
+ l2m_profileMiss;
+ a0_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_E0S, C1_Load_L1miss, I_ES) {} {
+ l2m_profileMiss;
+ l11m_profileMiss;
+ a1_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition(I_E1S, C0_Load_L1miss, I_ES) {} {
+ l2m_profileMiss;
+ l10m_profileMiss;
+ l2m_profileMiss;
+ a0_allocateL1D;
+ p_popMandatoryQueue;
+ }
+
+ transition({E1_Es, E0_F, Es_F0}, C1_Load_L1miss, Es_F) {L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(S_F0, C1_Load_L1miss, S_F) { L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition(O_F0, C1_Load_L1miss, O_F) {L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({Ms_F0, M1_Ms, M0_F}, C1_Load_L1miss, Ms_F) {L2DataArrayRead} {
+ l11m_profileMiss;
+ a1_allocateL1D;
+ f1_L2ToL1;
+ p_popMandatoryQueue;
+ }
+
+ transition({S, Es, E0, O, Ms, M0, O_F1, S_F1, Si_F0, Si_F1, Es_F1, E0_Es, Ms_F1, M0_Ms}, L1D0_Repl) {L1D0TagArrayRead} {
+ i0_invCluster;
+ }
+
+ transition({S, Es, E1, O, Ms, M1, O_F0, S_F0, Si_F0, Si_F1, Es_F0, E1_Es, Ms_F0, M1_Ms}, L1D1_Repl) {L1D1TagArrayRead} {
+ i1_invCluster;
+ }
+
+ transition({S, S_C, S_F0, S_F1}, L1I_Repl) {L1ITagArrayRead} {
+ ii_invIcache;
+ }
+
+ transition({S, E0, E1, Es}, L2_Repl, ES_I) {L2TagArrayRead,L1D0TagArrayRead, L1D1TagArrayRead, L1ITagArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ t_allocateTBE;
+ vc_victim;
+ ib_invBothClusters;
+ i2_invL2;
+ ii_invIcache;
+ }
+
+ transition({Ms, M0, M1, O}, L2_Repl, MO_I) {L2TagArrayRead, L2TagArrayWrite, L1D0TagArrayRead, L1D1TagArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ t_allocateTBE;
+ vd_victim;
+ i2_invL2;
+ ib_invBothClusters; // nothing will happen for D0 on M1, vice versa
+ }
+
+ transition(S0, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ wi_writeIcache;
+ xi0_loadDone;
+ uu_sendUnblock;
+ sdv_sendDoneValid;
+ pr_popResponseQueue;
+ }
+
+ transition(S1, NB_AckS, S) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ wi_writeIcache;
+ xi1_loadDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S0_C, NB_AckS, S_C) { L1IDataArrayWrite,L2DataArrayWrite} {
+ // does not need send done since the rdblks was "sinked"
+ wi_writeIcache;
+ xi0_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S1_C, NB_AckS, S_C) { L1D1DataArrayWrite,L2DataArrayWrite} {
+ wi_writeIcache;
+ xi1_loadDone;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M0, NB_AckM, M0) { L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xs0_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
+ w1_writeDcache;
+ xs1_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ // THESE MO->M1 should not be instantaneous but oh well for now.
+ transition(I_M0M1, NB_AckM, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xs0_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ i0_invCluster;
+ s1_storeDone;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M1M0, NB_AckM, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite,L2DataArrayWrite, L2TagArrayWrite} {
+ w1_writeDcache;
+ xs1_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ i1_invCluster;
+ s0_storeDone;
+ pr_popResponseQueue;
+ }
+
+ // Above shoudl be more like this, which has some latency to xfer to L1
+ transition(I_M0Ms, NB_AckM, M0_Ms) {L1D0DataArrayWrite,L2DataArrayWrite} {
+ w0_writeDcache;
+ xs0_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ f1_L2ToL1;
+ pr_popResponseQueue;
+ }
+
+ transition(I_M1Ms, NB_AckM, M1_Ms) {L1D1DataArrayWrite,L2DataArrayWrite} {
+ w1_writeDcache;
+ xs1_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ f0_L2ToL1;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E0S, NB_AckE, E0) {L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xl0_loadDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E1S, NB_AckE, E1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w1_writeDcache;
+ xl1_loadDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_ES, NB_AckE, Es) {L1D1DataArrayWrite, L1D1TagArrayWrite, L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite } {
+ w0_writeDcache;
+ xl0_loadDone;
+ w1_writeDcache;
+ xl1_loadDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E0S, NB_AckS, S) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ w0_writeDcache;
+ xl0_loadDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_E1S, NB_AckS, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
+ w1_writeDcache;
+ xl1_loadDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(I_ES, NB_AckS, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayWrite} {
+ w0_writeDcache;
+ xl0_loadDone;
+ w1_writeDcache;
+ xl1_loadDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(S_F0, L2_to_L1D0, S) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_F1, L2_to_L1D1, S) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Si_F0, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ ci_copyL2ToL1;
+ il0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Si_F1, L2_to_L1I, S) {L1ITagArrayWrite, L1IDataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ ci_copyL2ToL1;
+ il1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_F, L2_to_L1D0, S_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_F, L2_to_L1D1, S_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F0, L2_to_L1D0, O) { L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F1, L2_to_L1D1, O) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F, L2_to_L1D0, O_F1) { L1D0DataArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(O_F, L2_to_L1D1, O_F0) { L1D1DataArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M1_F, L2_to_L1D1, M1) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M0_F, L2_to_L1D0, M0) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F0, L2_to_L1D0, Ms) {L1D0DataArrayWrite, L1D0TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F1, L2_to_L1D1, Ms) {L1D1DataArrayWrite, L1D1TagArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F, L2_to_L1D0, Ms_F1) {L1D0DataArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Ms_F, L2_to_L1D1, Ms_F0) {L1IDataArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M1_Ms, L2_to_L1D0, Ms) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(M0_Ms, L2_to_L1D1, Ms) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F0, L2_to_L1D0, Es) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F1, L2_to_L1D1, Es) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2TagArrayWrite, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F, L2_to_L1D0, Es_F1) {L2TagArrayRead, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(Es_F, L2_to_L1D1, Es_F0) {L2TagArrayRead, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E0_F, L2_to_L1D0, E0) {L2TagArrayRead, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E1_F, L2_to_L1D1, E1) {L2TagArrayRead, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E1_Es, L2_to_L1D0, Es) {L2TagArrayRead, L2DataArrayRead} {
+ c0_copyL2ToL1;
+ l0_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(E0_Es, L2_to_L1D1, Es) {L2TagArrayRead, L2DataArrayRead} {
+ c1_copyL2ToL1;
+ l1_loadDone;
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_E0S, L2_to_L1D0, I_E0S) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_E1S, L2_to_L1D1, I_E1S) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_ES, L2_to_L1D0, IF1_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF_ES, L2_to_L1D1, IF0_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF0_ES, L2_to_L1D0, I_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(IF1_ES, L2_to_L1D1, I_ES) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(F_S0, L2_to_L1I, S0) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(F_S1, L2_to_L1I, S1) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition({S_M0, O_M0}, NB_AckM, M0) {L1D0TagArrayWrite, L1D0DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ xs0_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition({S_M1, O_M1}, NB_AckM, M1) {L1D1TagArrayWrite, L1D1DataArrayWrite, L2DataArrayWrite, L2TagArrayWrite} {
+ xs1_storeDone;
+ sdv_sendDoneValid;
+ uu_sendUnblock;
+ pr_popResponseQueue;
+ }
+
+ transition(MO_I, NB_AckWB, I) {L2TagArrayWrite} {
+ wb_data;
+ ra_sendReplAck;
+ sdi_sendDoneInvalid;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(ES_I, NB_AckWB, I) {L2TagArrayWrite} {
+ wb_data;
+ ra_sendReplAck;
+ sdi_sendDoneInvalid;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(MO_S0, NB_AckWB, S0) {L2TagArrayWrite} {
+ wb_data;
+ i2_invL2;
+ a2_allocateL2;
+ sdv_sendDoneValid;
+ nS_issueRdBlkS;
+ d_deallocateTBE; // FOO
+ pr_popResponseQueue;
+ }
+
+ transition(MO_S1, NB_AckWB, S1) {L2TagArrayWrite} {
+ wb_data;
+ i2_invL2;
+ a2_allocateL2;
+ sdv_sendDoneValid;
+ nS_issueRdBlkS;
+ d_deallocateTBE; // FOO
+ pr_popResponseQueue;
+ }
+
+ // Writeback cancel "ack"
+ transition(I_C, NB_AckWB, I) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ sdi_sendDoneInvalid;
+ d_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(S0_C, NB_AckWB, S0) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ sdv_sendDoneValid;
+ pr_popResponseQueue;
+ }
+
+ transition(S1_C, NB_AckWB, S1) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ sdv_sendDoneValid;
+ pr_popResponseQueue;
+ }
+
+ transition(S_C, NB_AckWB, S) {L2TagArrayWrite} {
+ ss_sendStaleNotification;
+ sdv_sendDoneValid;
+ pr_popResponseQueue;
+ }
+
+ // Begin Probe Transitions
+
+ transition({Ms, M0, M1, O}, {PrbInvData, PrbInvDataDemand}, I) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ i2_invL2;
+ ib_invBothClusters;
+ pp_popProbeQueue;
+ }
+
+ transition({Es, E0, E1, S, I}, {PrbInvData, PrbInvDataDemand}, I) {L2TagArrayRead, L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ ib_invBothClusters;
+ ii_invIcache; // only relevant for S
+ pp_popProbeQueue;
+ }
+
+ transition(S_C, {PrbInvData, PrbInvDataDemand}, I_C) {L2TagArrayWrite} {
+ t_allocateTBE;
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, {PrbInvData, PrbInvDataDemand}, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms, M0, M1, O, Es, E0, E1, S, I}, PrbInv, I) {L2TagArrayRead, L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2; // nothing will happen in I
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(S_C, PrbInv, I_C) {L2TagArrayWrite} {
+ t_allocateTBE;
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(I_C, PrbInv, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms, M0, M1, O}, {PrbShrData, PrbShrDataDemand}, O) {L2TagArrayRead, L2TagArrayWrite, L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({Es, E0, E1, S}, {PrbShrData, PrbShrDataDemand}, S) {L2TagArrayRead, L2TagArrayWrite} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition(S_C, {PrbShrData, PrbShrDataDemand}) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({I, I_C}, {PrbShrData, PrbShrDataDemand}) {L2TagArrayRead} {
+ pb_sendProbeResponseBackprobe;
+ pp_popProbeQueue;
+ }
+
+ transition({I_M0, I_E0S}, {PrbInv, PrbInvData, PrbInvDataDemand}) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters; // must invalidate current data (only relevant for I_M0)
+ a0_allocateL1D; // but make sure there is room for incoming data when it arrives
+ pp_popProbeQueue;
+ }
+
+ transition({I_M1, I_E1S}, {PrbInv, PrbInvData, PrbInvDataDemand}) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters; // must invalidate current data (only relevant for I_M1)
+ a1_allocateL1D; // but make sure there is room for incoming data when it arrives
+ pp_popProbeQueue;
+ }
+
+ transition({I_M0M1, I_M1M0, I_M0Ms, I_M1Ms, I_ES}, {PrbInv, PrbInvData, PrbInvDataDemand, PrbShrData, PrbShrDataDemand}) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ a0_allocateL1D;
+ a1_allocateL1D;
+ pp_popProbeQueue;
+ }
+
+ transition({I_M0, I_E0S, I_M1, I_E1S}, {PrbShrData, PrbShrDataDemand}) {} {
+ pb_sendProbeResponseBackprobe;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, {PrbInvData, PrbInvDataDemand}, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, {PrbInvData, PrbInvDataDemand}, I_C) {} {
+ pdt_sendProbeResponseDataFromTBE;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, PrbInv, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, PrbInv, I_C) {} {
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ pp_popProbeQueue;
+ }
+
+ transition(ES_I, {PrbShrData, PrbShrDataDemand}, ES_I) {} {
+ ph_sendProbeResponseHit;
+ s_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_I, {PrbShrData, PrbShrDataDemand}, MO_I) {} {
+ pdt_sendProbeResponseDataFromTBE;
+ s_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S0, {PrbInvData, PrbInvDataDemand}, S0_C) {L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdt_sendProbeResponseDataFromTBE;
+ i2_invL2;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ d_deallocateTBE;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S1, {PrbInvData, PrbInvDataDemand}, S1_C) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdt_sendProbeResponseDataFromTBE;
+ i2_invL2;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ d_deallocateTBE;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S0, PrbInv, S0_C) {L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ d_deallocateTBE;
+ pp_popProbeQueue;
+ }
+
+ transition(MO_S1, PrbInv, S1_C) {L2TagArrayWrite} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ i2_invL2;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ d_deallocateTBE;
+ pp_popProbeQueue;
+ }
+
+ transition({MO_S0, MO_S1}, {PrbShrData, PrbShrDataDemand}) {} {
+ pdt_sendProbeResponseDataFromTBE;
+ s_setSharedFlip;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F0, Es_F0, E0_F, E1_Es}, {PrbInvData, PrbInvDataDemand, PrbInv}, IF_E0S) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ // invalidate everything you've got
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ // but make sure you have room for what you need from the fill
+ a0_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F1, Es_F1, E1_F, E0_Es}, {PrbInvData, PrbInvDataDemand, PrbInv}, IF_E1S) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ // invalidate everything you've got
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ // but make sure you have room for what you need from the fill
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F, Es_F}, {PrbInvData, PrbInvDataDemand, PrbInv}, IF_ES) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ // invalidate everything you've got
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ // but make sure you have room for what you need from the fill
+ a0_allocateL1D;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition(Si_F0, {PrbInvData, PrbInvDataDemand, PrbInv}, F_S0) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition(Si_F1, {PrbInvData, PrbInvDataDemand, PrbInv}, F_S1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ nS_issueRdBlkS;
+ pp_popProbeQueue;
+ }
+
+ transition({Es_F0, E0_F, E1_Es}, {PrbShrData, PrbShrDataDemand}, S_F0) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({Es_F1, E1_F, E0_Es}, {PrbShrData, PrbShrDataDemand}, S_F1) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition(Es_F, {PrbShrData, PrbShrDataDemand}, S_F) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({S_F0, S_F1, S_F, Si_F0, Si_F1}, {PrbShrData, PrbShrDataDemand}) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition(S_M0, {PrbInvData, PrbInvDataDemand}, I_M0) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition(O_M0, {PrbInvData, PrbInvDataDemand}, I_M0) {L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdm_sendProbeResponseDataMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M0, O_M0}, {PrbInv}, I_M0) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition(S_M1, {PrbInvData, PrbInvDataDemand}, I_M1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition(O_M1, {PrbInvData, PrbInvDataDemand}, I_M1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pdm_sendProbeResponseDataMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M1, O_M1}, {PrbInv}, I_M1) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pim_sendProbeResponseInvMs;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S0, S0_C}, {PrbInvData, PrbInvDataDemand, PrbInv}) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S1, S1_C}, {PrbInvData, PrbInvDataDemand, PrbInv}) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ ii_invIcache;
+ i2_invL2;
+ ai_allocateL1I;
+ a2_allocateL2;
+ pp_popProbeQueue;
+ }
+
+ transition({S_M0, S_M1}, {PrbShrData, PrbShrDataDemand}) {} {
+ ph_sendProbeResponseHit;
+ pp_popProbeQueue;
+ }
+
+ transition({O_M0, O_M1}, {PrbShrData, PrbShrDataDemand}) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({S0, S1, S0_C, S1_C}, {PrbShrData, PrbShrDataDemand}) {} {
+ pb_sendProbeResponseBackprobe;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F0, M0_F, M1_Ms, O_F0}, {PrbInvData, PrbInvDataDemand}, IF_E0S) {L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F1, M1_F, M0_Ms, O_F1}, {PrbInvData, PrbInvDataDemand}, IF_E1S) {L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ ib_invBothClusters;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F, O_F}, {PrbInvData, PrbInvDataDemand}, IF_ES) {L2DataArrayRead} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pd_sendProbeResponseData;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F0, M0_F, M1_Ms, O_F0}, PrbInv, IF_E0S) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F1, M1_F, M0_Ms, O_F1}, PrbInv, IF_E1S) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ i2_invL2;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F, O_F}, PrbInv, IF_ES) {} {
+ forward_eviction_to_cpu0;
+ forward_eviction_to_cpu1;
+ pi_sendProbeResponseInv;
+ ib_invBothClusters;
+ i2_invL2;
+ a0_allocateL1D;
+ a1_allocateL1D;
+ a2_allocateL2;
+ n_issueRdBlk;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F0, M0_F, M1_Ms}, {PrbShrData, PrbShrDataDemand}, O_F0) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({Ms_F1, M1_F, M0_Ms}, {PrbShrData, PrbShrDataDemand}, O_F1) {} {
+ }
+
+ transition({Ms_F}, {PrbShrData, PrbShrDataDemand}, O_F) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ transition({O_F0, O_F1, O_F}, {PrbShrData, PrbShrDataDemand}) {L2DataArrayRead} {
+ pd_sendProbeResponseData;
+ pp_popProbeQueue;
+ }
+
+ // END TRANSITIONS
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:Directory, "AMD_Base-like protocol")
+: DirectoryMemory * directory;
+ CacheMemory * L3CacheMemory;
+ Cycles response_latency := 5;
+ Cycles response_latency_regionDir := 1;
+ Cycles l3_hit_latency := 30;
+ bool useL3OnWT := "False";
+ Cycles to_memory_controller_latency := 1;
+
+ // From the Cores
+ MessageBuffer * requestFromCores, network="From", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseFromCores, network="From", virtual_network="2", vnet_type="response";
+ MessageBuffer * unblockFromCores, network="From", virtual_network="4", vnet_type="unblock";
+
+ // To the Cores
+ MessageBuffer * probeToCore, network="To", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseToCore, network="To", virtual_network="2", vnet_type="response";
+
+ // From region buffer
+ MessageBuffer * reqFromRegBuf, network="From", virtual_network="7", vnet_type="request";
+
+ // To Region directory
+ MessageBuffer * reqToRegDir, network="To", virtual_network="5", vnet_type="request";
+ MessageBuffer * reqFromRegDir, network="From", virtual_network="5", vnet_type="request";
+ MessageBuffer * unblockToRegDir, network="To", virtual_network="4", vnet_type="unblock";
+
+ MessageBuffer * triggerQueue;
+ MessageBuffer * L3triggerQueue;
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_U") {
+ U, AccessPermission:Backing_Store, desc="unblocked";
+ BR, AccessPermission:Backing_Store, desc="got CPU read request, blocked while sent to L3";
+ BW, AccessPermission:Backing_Store, desc="got CPU write request, blocked while sent to L3";
+ BL, AccessPermission:Busy, desc="got L3 WB request";
+ // BL is Busy because it's possible for the data only to be in the network
+ // in the WB, L3 has sent it and gone on with its business in possibly I
+ // state.
+ BI, AccessPermission:Backing_Store, desc="Blocked waiting for inv ack from core";
+ BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
+ BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack";
+
+ // These are needed for when a private requests was issued before an inv was received
+ // for writebacks
+ BS_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ BM_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ BP_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
+ // for reads
+ BS_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ BM_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ BP_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ // CPU requests
+ RdBlkS, desc="...";
+ RdBlkM, desc="...";
+ RdBlk, desc="...";
+ WriteThrough, desc="WriteThrough Message";
+ Atomic, desc="Atomic Message";
+
+ RdBlkSP, desc="...";
+ RdBlkMP, desc="...";
+ RdBlkP, desc="...";
+ VicDirtyP, desc="...";
+ VicCleanP, desc="...";
+ WriteThroughP, desc="WriteThrough Message";
+ AtomicP, desc="Atomic Message";
+
+ // writebacks
+ VicDirty, desc="...";
+ VicClean, desc="...";
+ CPUData, desc="WB data from CPU";
+ StaleWB, desc="WB response for a no longer valid request";
+
+ // probe responses
+ CPUPrbResp, desc="Probe Response Msg";
+ LastCPUPrbResp, desc="Last Probe Response Msg";
+
+ ProbeAcksComplete, desc="Probe Acks Complete";
+
+ L3Hit, desc="Hit in L3 return data to core";
+
+ // Memory Controller
+ MemData, desc="Fetched data from memory arrives";
+ WBAck, desc="Writeback Ack from memory arrives";
+
+ CoreUnblock, desc="Core received data, unblock";
+ UnblockWriteThrough, desc="unblock, self triggered";
+
+ StaleVicDirty, desc="Core invalidated before VicDirty processed";
+ StaleVicDirtyP, desc="Core invalidated before VicDirty processed";
+
+ // For region protocol
+ CPUReq, desc="Generic CPU request";
+ Inv, desc="Region dir needs a block invalidated";
+ Downgrade, desc="Region dir needs a block downgraded";
+
+ // For private accesses (bypassed reg-dir)
+ CPUReadP, desc="Initial req from core, sent to L3";
+ CPUWriteP, desc="Initial req from core, sent to L3";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ L3DataArrayRead, desc="Read the data array";
+ L3DataArrayWrite, desc="Write the data array";
+ L3TagArrayRead, desc="Read the data array";
+ L3TagArrayWrite, desc="Write the data array";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore";
+ }
+
+ structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
+ DataBlock DataBlk, desc="data for the block";
+ MachineID LastSender, desc="Mach which this block came from";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block";
+ DataBlock DataBlkAux, desc="Auxiliary data for the block";
+ bool Dirty, desc="Is the data dirty?";
+ int NumPendingAcks, desc="num acks expected";
+ MachineID OriginalRequestor, desc="Original Requestor";
+ MachineID WTRequestor, desc="WT Requestor";
+ bool Cached, desc="data hit in Cache";
+ bool MemData, desc="Got MemData?",default="false";
+ bool wtData, desc="Got write through data?",default="false";
+ bool atomicData, desc="Got Atomic op?",default="false";
+ Cycles InitialRequestTime, desc="...";
+ Cycles ForwardRequestTime, desc="...";
+ Cycles ProbeRequestStartTime, desc="...";
+ bool DemandRequest, desc="for profiling";
+ MachineID LastSender, desc="Mach which this block came from";
+ bool L3Hit, default="false", desc="Was this an L3 hit?";
+ bool TriggeredAcksComplete, default="false", desc="True if already triggered acks complete";
+ WriteMask writeMask, desc="outstanding write through mask";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+
+ if (is_valid(dir_entry)) {
+ //DPRINTF(RubySlicc, "Getting entry %s: %s\n", addr, dir_entry.DataBlk);
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if (is_valid(tbe) && tbe.MemData) {
+ DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
+ return tbe.DataBlk;
+ }
+ DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
+ return getDirectoryEntry(addr).DataBlk;
+ }
+
+ State getState(TBE tbe, CacheEntry entry, Addr addr) {
+ return getDirectoryEntry(addr).DirectoryState;
+ }
+
+ State getStateFromAddr(Addr addr) {
+ return getDirectoryEntry(addr).DirectoryState;
+ }
+
+ void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
+ getDirectoryEntry(addr).DirectoryState := state;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ // For this Directory, all permissions are just tracked in Directory, since
+ // it's not possible to have something in TBE but not Dir, just keep track
+ // of state all in one place.
+ if(directory.isPresent(addr)) {
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ void setAccessPermission(CacheEntry entry, Addr addr, State state) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L3DataArrayRead) {
+ L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L3DataArrayWrite) {
+ L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L3TagArrayRead) {
+ L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L3TagArrayWrite) {
+ L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L3DataArrayRead) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L3DataArrayWrite) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L3TagArrayRead) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L3TagArrayWrite) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
+ out_port(responseNetwork_out, ResponseMsg, responseToCore);
+
+ out_port(requestNetworkReg_out, CPURequestMsg, reqToRegDir);
+ out_port(regAckNetwork_out, UnblockMsg, unblockToRegDir);
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=7) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == TriggerType:AcksComplete) {
+ trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
+ trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
+ } else {
+ error("Unknown trigger msg");
+ }
+ }
+ }
+ }
+
+ in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=6) {
+ if (L3TriggerQueue_in.isReady(clockEdge())) {
+ peek(L3TriggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == TriggerType:L3Hit) {
+ trigger(Event:L3Hit, in_msg.addr, entry, tbe);
+ } else {
+ error("Unknown trigger msg");
+ }
+ }
+ }
+ }
+
+ // Unblock Network
+ in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=5) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
+ peek(unblockNetwork_in, UnblockMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
+ }
+ }
+ }
+
+ // Core response network
+ in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=4) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ DPRINTF(RubySlicc, "core responses %s\n", in_msg);
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
+ if (is_valid(tbe) && tbe.NumPendingAcks == 1
+ && tbe.TriggeredAcksComplete == false) {
+ trigger(Event:LastCPUPrbResp, in_msg.addr, entry, tbe);
+ } else {
+ trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:CPUData) {
+ trigger(Event:CPUData, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
+ trigger(Event:StaleWB, in_msg.addr, entry, tbe);
+ } else {
+ error("Unexpected response type");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=3) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:MemData, in_msg.addr, entry, tbe);
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(regBuf_in, CPURequestMsg, reqFromRegBuf, rank=2) {
+ if (regBuf_in.isReady(clockEdge())) {
+ peek(regBuf_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == CoherenceRequestType:ForceInv) {
+ trigger(Event:Inv, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:ForceDowngrade) {
+ trigger(Event:Downgrade, in_msg.addr, entry, tbe);
+ } else {
+ error("Bad request from region buffer");
+ }
+ }
+ }
+ }
+
+ in_port(regDir_in, CPURequestMsg, reqFromRegDir, rank=1) {
+ if (regDir_in.isReady(clockEdge())) {
+ peek(regDir_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
+ trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:Atomic) {
+ trigger(Event:Atomic, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
+ } else {
+ trigger(Event:VicDirty, in_msg.addr, entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
+ } else {
+ trigger(Event:VicClean, in_msg.addr, entry, tbe);
+ }
+ } else {
+ error("Bad message type fwded from Region Dir");
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Private) {
+ // Bypass the region dir
+ if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlkP, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
+ trigger(Event:RdBlkSP, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ trigger(Event:RdBlkMP, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:Atomic) {
+ trigger(Event:AtomicP, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ trigger(Event:WriteThroughP, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicDirtyP for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirtyP, in_msg.addr, entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
+ trigger(Event:VicDirtyP, in_msg.addr, entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicCleanP for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirtyP, in_msg.addr, entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
+ trigger(Event:VicCleanP, in_msg.addr, entry, tbe);
+ }
+ } else {
+ error("Bad message type for private access");
+ }
+ } else {
+ trigger(Event:CPUReq, in_msg.addr, entry, tbe);
+ }
+ }
+ }
+ }
+
+ // Actions
+ action(s_sendResponseS, "s", desc="send Shared response") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.DemandRequest := tbe.DemandRequest;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Cached) {
+ out_msg.State := CoherenceState:Shared;
+ } else {
+ out_msg.State := CoherenceState:Exclusive;
+ }
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.DemandRequest := tbe.DemandRequest;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(m_sendResponseM, "m", desc="send Modified response") {
+ if (tbe.wtData) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:UnblockWriteThrough;
+ }
+ } else {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := false;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.DemandRequest := tbe.DemandRequest;
+ out_msg.L3Hit := tbe.L3Hit;
+ if (tbe.atomicData) {
+ out_msg.WTRequestor := tbe.WTRequestor;
+ }
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ if (tbe.atomicData) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:UnblockWriteThrough;
+ }
+ }
+ }
+ }
+
+ action(sb_sendResponseSBypass, "sb", desc="send Shared response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.DemandRequest := false;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(esb_sendResponseESBypass, "esb", desc="send Exclusive or Shared response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Cached || in_msg.ForceShared) {
+ out_msg.State := CoherenceState:Shared;
+ } else {
+ out_msg.State := CoherenceState:Exclusive;
+ }
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.DemandRequest := false;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(mbwt_sendResponseWriteThroughBypass, "mbwt", desc="send write through response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysWBAck;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.DemandRequest := false;
+ }
+ } else {
+ assert(in_msg.Type == CoherenceRequestType:Atomic);
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := false;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.DemandRequest := false;
+ out_msg.L3Hit := tbe.L3Hit;
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:UnblockWriteThrough;
+ }
+ }
+ }
+
+ action(mb_sendResponseMBypass, "mb", desc="send Modified response") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := false;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.DemandRequest := false;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := true;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.DemandRequest := tbe.DemandRequest;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(cp_sendResponseCtoDP, "cp", desc="send CtoD Ack") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := true;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.DemandRequest := false;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(w_sendResponseWBAck, "w", desc="send WB Ack") {
+ peek(regDir_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysWBAck;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.DemandRequest := false;
+ }
+ }
+ }
+
+ action(wp_sendResponseWBAckP, "wp", desc="send WB Ack") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysWBAck;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ out_msg.DemandRequest := false;
+ }
+ }
+ }
+
+ action(wc_sendResponseWBAck, "wc", desc="send WB Ack for cancel") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysWBAck;
+ out_msg.Destination.add(in_msg.Sender);
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(ra_ackRegionDir, "ra", desc="Ack region dir") {
+ peek(regDir_in, CPURequestMsg) {
+ if (in_msg.NoAckNeeded == false) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency_regionDir) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DirReadyAck;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+ }
+
+ action(l_queueMemRdReq, "lr", desc="Read data from memory") {
+ peek(regDir_in, CPURequestMsg) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ tbe.DataBlk := entry.DataBlk;
+ tbe.LastSender := entry.LastSender;
+ tbe.L3Hit := true;
+ tbe.MemData := true;
+ DPRINTF(RubySlicc, "L3 data is %s\n", entry.DataBlk);
+ L3CacheMemory.deallocate(address);
+ } else {
+ queueMemoryRead(machineID, address, to_memory_controller_latency);
+ }
+ }
+ }
+
+ action(lrp_queueMemRdReqP, "lrp", desc="Read data from memory") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ tbe.DataBlk := entry.DataBlk;
+ tbe.LastSender := entry.LastSender;
+ tbe.L3Hit := true;
+ tbe.MemData := true;
+ DPRINTF(RubySlicc, "L3 data is %s\n", entry.DataBlk);
+ L3CacheMemory.deallocate(address);
+ } else {
+ queueMemoryRead(machineID, address, to_memory_controller_latency);
+ }
+ }
+ }
+
+ action(dcr_probeInvCoreData, "dcr", desc="probe inv cores, return data") {
+ peek(regBuf_in, CPURequestMsg) {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination := in_msg.Sharers;
+ tbe.NumPendingAcks := tbe.NumPendingAcks + in_msg.Sharers.count();
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ APPEND_TRANSITION_COMMENT(" dcr: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(ddr_probeDownCoreData, "ddr", desc="probe inv cores, return data") {
+ peek(regBuf_in, CPURequestMsg) {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination := in_msg.Sharers;
+ tbe.NumPendingAcks := tbe.NumPendingAcks + in_msg.Sharers.count();
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ APPEND_TRANSITION_COMMENT(" dcr: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
+ peek(requestNetwork_in, CPURequestMsg) { // not the right network?
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+ tbe.NumPendingAcks := tbe.NumPendingAcks +machineCount(MachineType:CorePair) - 1;
+ out_msg.Destination.broadcast(MachineType:TCP);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:TCP);
+ out_msg.Destination.broadcast(MachineType:SQC);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:SQC);
+ out_msg.Destination.remove(in_msg.Requestor);
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
+ peek(requestNetwork_in, CPURequestMsg) { // not the right network?
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := false;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+ tbe.NumPendingAcks := tbe.NumPendingAcks +machineCount(MachineType:CorePair) - 1;
+ out_msg.Destination.broadcast(MachineType:TCP);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:TCP);
+ out_msg.Destination.broadcast(MachineType:SQC);
+ tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:SQC);
+ out_msg.Destination.remove(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(d_writeDataToMemory, "d", desc="Write data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Writing Data: %s to address %s\n", in_msg.DataBlk,
+ in_msg.addr);
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ peek(regDir_in, CPURequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.wtData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ if (in_msg.Type == CoherenceRequestType:Atomic) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.atomicData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
+ tbe.Dirty := false;
+ }
+ tbe.OriginalRequestor := in_msg.Requestor;
+ tbe.NumPendingAcks := 0;
+ tbe.Cached := in_msg.ForceShared;
+ tbe.InitialRequestTime := in_msg.InitialRequestTime;
+ tbe.ForwardRequestTime := curCycle();
+ tbe.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ tbe.DemandRequest := in_msg.DemandRequest;
+ }
+ }
+
+ action(tp_allocateTBEP, "tp", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ peek(requestNetwork_in, CPURequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.wtData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ if (in_msg.Type == CoherenceRequestType:Atomic) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.atomicData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
+ tbe.Dirty := false;
+ }
+ tbe.OriginalRequestor := in_msg.Requestor;
+ tbe.NumPendingAcks := 0;
+ tbe.Cached := in_msg.ForceShared;
+ tbe.InitialRequestTime := in_msg.InitialRequestTime;
+ tbe.ForwardRequestTime := curCycle();
+ tbe.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
+ tbe.DemandRequest := false;
+ }
+ }
+
+ action(sa_setAcks, "sa", desc="setAcks") {
+ peek(regDir_in, CPURequestMsg) {
+ tbe.NumPendingAcks := in_msg.Acks;
+ APPEND_TRANSITION_COMMENT(" waiting for acks ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+ }
+
+ action(tr_allocateTBE, "tr", desc="allocate TBE Entry for Region inv") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.NumPendingAcks := 0;
+ }
+
+ action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(wdp_writeBackDataPrivate, "wdp", desc="Write back data if needed") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.DataBlkAux := getDirectoryEntry(address).DataBlk;
+ tbe.DataBlkAux.copyPartial(in_msg.DataBlk,in_msg.writeMask);
+ getDirectoryEntry(address).DataBlk := tbe.DataBlkAux;
+ } else{
+ assert(in_msg.Type == CoherenceRequestType:Atomic);
+ tbe.DataBlkAux.atomicPartial(getDirectoryEntry(address).DataBlk,in_msg.writeMask);
+ getDirectoryEntry(address).DataBlk := tbe.DataBlkAux;
+ }
+ }
+ }
+
+ action(wd_writeBackData, "wd", desc="Write back data if needed") {
+ if (tbe.wtData) {
+ DataBlock tmp := getDirectoryEntry(address).DataBlk;
+ tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
+ tbe.DataBlk := tmp;
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ } else if (tbe.atomicData) {
+ tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,tbe.writeMask);
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ } else if (tbe.Dirty == true) {
+ APPEND_TRANSITION_COMMENT(" Wrote data back ");
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ }
+ }
+
+ action(wdi_writeBackDataInv, "wdi", desc="Write back inv data if needed") {
+ // Kind of opposite from above...?
+ if (tbe.Dirty == true) {
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ APPEND_TRANSITION_COMMENT("Writing dirty data to dir");
+ DPRINTF(RubySlicc, "Data %s: %s\n", address, tbe.DataBlk);
+ } else {
+ APPEND_TRANSITION_COMMENT("NOT!!! Writing dirty data to dir");
+ }
+ }
+
+ action(wdt_writeBackDataInvNoTBE, "wdt", desc="Write back inv data if needed no TBE") {
+ // Kind of opposite from above...?
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty == true) {
+ getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
+ APPEND_TRANSITION_COMMENT("Writing dirty data to dir");
+ DPRINTF(RubySlicc, "Data %s: %s\n", address, in_msg.DataBlk);
+ } else {
+ APPEND_TRANSITION_COMMENT("NOT!!! Writing dirty data to dir");
+ }
+ }
+ }
+
+ action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
+ peek(memQueue_in, MemoryMsg) {
+ if (tbe.Dirty == false) {
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk;
+ }
+ tbe.MemData := true;
+ }
+ }
+
+ action(ml_writeL3DataToTBE, "ml", desc="write L3 data to TBE") {
+ assert(tbe.Dirty == false);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ tbe.DataBlk := entry.DataBlk;
+ tbe.LastSender := entry.LastSender;
+ tbe.L3Hit := true;
+ tbe.MemData := true;
+ }
+
+ action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty) {
+ DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
+ DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
+ if (tbe.wtData) {
+ DataBlock tmp := in_msg.DataBlk;
+ tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
+ tbe.DataBlk := tmp;
+ } else if (tbe.Dirty) {
+ if(tbe.atomicData == false && tbe.wtData == false) {
+ DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
+ assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
+ }
+ } else {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ tbe.LastSender := in_msg.Sender;
+ }
+ }
+ if (in_msg.Hit) {
+ tbe.Cached := true;
+ }
+ }
+ }
+
+ action(yc_writeCPUDataToTBE, "yc", desc="write CPU Data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty) {
+ DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
+ DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
+ if (tbe.Dirty) {
+ DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
+ assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
+ }
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := false;
+ tbe.LastSender := in_msg.Sender;
+ }
+ }
+ }
+
+ action(x_decrementAcks, "x", desc="decrement Acks pending") {
+ if (tbe.NumPendingAcks > 0) {
+ tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
+ } else {
+ APPEND_TRANSITION_COMMENT(" Double ack! ");
+ }
+ assert(tbe.NumPendingAcks >= 0);
+ APPEND_TRANSITION_COMMENT(" Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(o_checkForCompletion, "o", desc="check for ack completion") {
+ if (tbe.NumPendingAcks == 0 && tbe.TriggeredAcksComplete == false) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ tbe.TriggeredAcksComplete := true;
+ }
+ APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(ont_checkForCompletionNoTrigger, "ont", desc="check for ack completion, no trigger") {
+ if (tbe.NumPendingAcks == 0 && tbe.TriggeredAcksComplete == false) {
+ tbe.TriggeredAcksComplete := true;
+ }
+ APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(rvp_removeVicDirtyIgnore, "rvp", desc="Remove ignored core") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
+ }
+ }
+
+ action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
+ peek(regDir_in, CPURequestMsg) {
+ getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
+ }
+ }
+
+ action(r_sendRequestToRegionDir, "r", desc="send request to Region Directory") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(requestNetworkReg_out, CPURequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ DPRINTF(RubySlicc, "out dest: %s\n", mapAddressToMachine(address, MachineType:RegionDir));
+ }
+ }
+ }
+
+ action(ai_ackInvalidate, "ai", desc="Ack to let the reg-dir know that the inv is ordered") {
+ peek(regBuf_in, CPURequestMsg) {
+ enqueue(regAckNetwork_out, UnblockMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "ai out_msg: %s\n", out_msg);
+ }
+ }
+ }
+
+ action(aic_ackInvalidate, "aic", desc="Ack to let the reg-dir know that the inv is ordered") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.NoAckNeeded == false) {
+ enqueue(regAckNetwork_out, UnblockMsg, 1) {
+ out_msg.addr := address;
+ if (machineIDToMachineType(in_msg.Sender) == MachineType:CorePair) {
+ out_msg.Destination.add(createMachineID(MachineType:RegionBuffer, intToID(0)));
+ } else {
+ out_msg.Destination.add(createMachineID(MachineType:RegionBuffer, intToID(1)));
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "ai out_msg: %s\n", out_msg);
+ out_msg.wasValid := in_msg.isValid;
+ }
+ }
+ }
+ }
+
+ action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
+ entry.DataBlk := in_msg.DataBlk;
+ entry.LastSender := in_msg.Sender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
+ entry.DataBlk := in_msg.DataBlk;
+ entry.LastSender := in_msg.Sender;
+ }
+ }
+ }
+
+ action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
+ if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ }
+ }
+ }
+
+ action(ali_allocateL3Block, "ali", desc="allocate the L3 block on ForceInv") {
+ if (tbe.Dirty == true) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ }
+ }
+ }
+
+ action(ali_allocateL3BlockNoTBE, "alt", desc="allocate the L3 block on ForceInv no TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" ali wrote data to L3 (hit) ");
+ entry.DataBlk := in_msg.DataBlk;
+ entry.LastSender := in_msg.Sender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" ali wrote data to L3 ");
+ entry.DataBlk := in_msg.DataBlk;
+ entry.LastSender := in_msg.Sender;
+ }
+ }
+ }
+ }
+
+ action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
+ L3CacheMemory.deallocate(address);
+ }
+
+ action(p_popRequestQueue, "p", desc="pop request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(prd_popRegionQueue, "prd", desc="pop request queue") {
+ regDir_in.dequeue(clockEdge());
+ }
+
+ action(prb_popRegionBufQueue, "prb", desc="pop request queue") {
+ regBuf_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pm_popMemQueue, "pm", desc="pop mem queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
+ L3TriggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
+ unblockNetwork_in.dequeue(clockEdge());
+ }
+
+ action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(ww_stallAndWaitRegRequestQueue, "ww", desc="recycle region dir request queue") {
+ stall_and_wait(regDir_in, address);
+ }
+
+ action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
+ stall_and_wait(requestNetwork_in, address);
+ }
+
+ action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
+ wakeUpBuffers(address);
+ }
+
+ action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
+ wakeUpAllBuffers();
+ }
+
+ action(z_stall, "z", desc="...") {
+ }
+
+ // TRANSITIONS
+
+ // transitions from U
+
+ transition({BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {Inv, Downgrade}) {
+ ww_stallAndWaitRegRequestQueue;
+ }
+
+ transition(U, Inv, BI){L3TagArrayRead} {
+ tr_allocateTBE;
+ dcr_probeInvCoreData; // only need to invalidate sharers
+ ai_ackInvalidate;
+ prb_popRegionBufQueue;
+ }
+
+ transition(U, Downgrade, BI){L3TagArrayRead} {
+ tr_allocateTBE;
+ ddr_probeDownCoreData; // only need to invalidate sharers
+ ai_ackInvalidate;
+ prb_popRegionBufQueue;
+ }
+
+ // The next 2 transistions are needed in the event that an invalidation
+ // is waiting for its ack from the core, but the event makes it through
+ // the region directory before the acks. This wouldn't be needed if
+ // we waited to ack the region dir until the directory got all the acks
+ transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {RdBlkS, RdBlkM, RdBlk, WriteThrough, Atomic}) {
+ ww_stallAndWaitRegRequestQueue;
+ }
+
+ transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {RdBlkSP, RdBlkMP, RdBlkP}) {
+ st_stallAndWaitRequest;
+ }
+
+ transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {WriteThroughP,AtomicP}) {
+ st_stallAndWaitRequest;
+ }
+
+ transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead} {
+ t_allocateTBE;
+ l_queueMemRdReq;
+ sa_setAcks;
+ o_checkForCompletion;
+ ra_ackRegionDir;
+ prd_popRegionQueue;
+ }
+
+ transition(U, WriteThrough, BM_PM){L3TagArrayRead} {
+ t_allocateTBE;
+ w_sendResponseWBAck;
+ l_queueMemRdReq;
+ sa_setAcks;
+ o_checkForCompletion;
+ ra_ackRegionDir;
+ prd_popRegionQueue;
+ }
+
+ transition(U, {RdBlkM,Atomic}, BM_PM){L3TagArrayRead} {
+ t_allocateTBE;
+ l_queueMemRdReq;
+ sa_setAcks;
+ o_checkForCompletion;
+ ra_ackRegionDir;
+ prd_popRegionQueue;
+ }
+
+ transition(U, RdBlk, B_PM){L3TagArrayRead} {
+ t_allocateTBE;
+ l_queueMemRdReq;
+ sa_setAcks;
+ o_checkForCompletion;
+ ra_ackRegionDir;
+ prd_popRegionQueue;
+ }
+
+ transition(U, {RdBlkSP}, BS_M) {L3TagArrayRead} {
+ tp_allocateTBEP;
+ lrp_queueMemRdReqP;
+ p_popRequestQueue;
+ }
+
+ transition(U, WriteThroughP, BM_M) {L3TagArrayRead} {
+ tp_allocateTBEP;
+ wp_sendResponseWBAckP;
+ lrp_queueMemRdReqP;
+ p_popRequestQueue;
+ }
+
+ transition(U, {RdBlkMP,AtomicP}, BM_M) {L3TagArrayRead} {
+ tp_allocateTBEP;
+ lrp_queueMemRdReqP;
+ p_popRequestQueue;
+ }
+
+ transition(U, RdBlkP, B_M) {L3TagArrayRead} {
+ tp_allocateTBEP;
+ lrp_queueMemRdReqP;
+ p_popRequestQueue;
+ }
+
+ transition(U, VicDirtyP, BL) {L3TagArrayRead} {
+ tp_allocateTBEP;
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(U, VicCleanP, BL) {L3TagArrayRead} {
+ tp_allocateTBEP;
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(BM_Pm, RdBlkSP, BM_Pm_B) {L3DataArrayWrite} {
+ sb_sendResponseSBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BS_Pm, RdBlkSP, BS_Pm_B) {L3DataArrayWrite} {
+ sb_sendResponseSBypass;
+ p_popRequestQueue;
+ }
+
+ transition(B_Pm, RdBlkSP, B_Pm_B) {L3DataArrayWrite} {
+ sb_sendResponseSBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BP, RdBlkSP, BP_B) {L3DataArrayWrite} {
+ sb_sendResponseSBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BM_Pm, RdBlkMP, BM_Pm_B) {L3DataArrayWrite} {
+ mb_sendResponseMBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BS_Pm, RdBlkMP, BS_Pm_B) {L3DataArrayWrite} {
+ mb_sendResponseMBypass;
+ p_popRequestQueue;
+ }
+
+ transition(B_Pm, RdBlkMP, B_Pm_B) {L3DataArrayWrite} {
+ mb_sendResponseMBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BP, RdBlkMP, BP_B) {L3DataArrayWrite} {
+ mb_sendResponseMBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BM_Pm, {WriteThroughP,AtomicP}, BM_Pm_B) {L3DataArrayWrite} {
+ wdp_writeBackDataPrivate;
+ mbwt_sendResponseWriteThroughBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BS_Pm, {WriteThroughP,AtomicP}, BS_Pm_B) {L3DataArrayWrite} {
+ wdp_writeBackDataPrivate;
+ mbwt_sendResponseWriteThroughBypass;
+ p_popRequestQueue;
+ }
+
+ transition(B_Pm, {WriteThroughP,AtomicP}, B_Pm_B) {L3DataArrayWrite} {
+ wdp_writeBackDataPrivate;
+ mbwt_sendResponseWriteThroughBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BP, {WriteThroughP,AtomicP}, BP_B) {L3DataArrayWrite} {
+ wdp_writeBackDataPrivate;
+ mbwt_sendResponseWriteThroughBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BM_Pm, RdBlkP, BM_Pm_B) {L3DataArrayWrite} {
+ esb_sendResponseESBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BS_Pm, RdBlkP, BS_Pm_B) {L3DataArrayWrite} {
+ esb_sendResponseESBypass;
+ p_popRequestQueue;
+ }
+
+ transition(B_Pm, RdBlkP, B_Pm_B) {L3DataArrayWrite}{
+ esb_sendResponseESBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BP, RdBlkP, BP_B) {L3DataArrayWrite}{
+ esb_sendResponseESBypass;
+ p_popRequestQueue;
+ }
+
+ transition(BM_Pm_B, CoreUnblock, BM_Pm) {
+ wa_wakeUpDependents;
+ pu_popUnblockQueue;
+ }
+
+ transition(BS_Pm_B, CoreUnblock, BS_Pm) {
+ wa_wakeUpDependents;
+ pu_popUnblockQueue;
+ }
+
+ transition(B_Pm_B, CoreUnblock, B_Pm) {
+ wa_wakeUpDependents;
+ pu_popUnblockQueue;
+ }
+
+ transition(BP_B, CoreUnblock, BP) {
+ wa_wakeUpDependents;
+ pu_popUnblockQueue;
+ }
+
+ transition(BM_Pm_B, UnblockWriteThrough, BM_Pm) {
+ wa_wakeUpDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(BS_Pm_B, UnblockWriteThrough, BS_Pm) {
+ wa_wakeUpDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(B_Pm_B, UnblockWriteThrough, B_Pm) {
+ wa_wakeUpDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(BP_B, UnblockWriteThrough, BP) {
+ wa_wakeUpDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(BM_Pm, VicDirtyP, BM_Pm_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(BS_Pm, VicDirtyP, BS_Pm_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(B_Pm, VicDirtyP, B_Pm_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(BP, VicDirtyP, BP_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(BM_Pm, VicCleanP, BM_Pm_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(BS_Pm, VicCleanP, BS_Pm_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(B_Pm, VicCleanP, B_Pm_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(BP, VicCleanP, BP_BL) {
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition(BM_Pm_BL, CPUData, BM_Pm) {
+ yc_writeCPUDataToTBE;
+ d_writeDataToMemory;
+ wa_wakeUpDependents;
+ pr_popResponseQueue;
+ }
+
+ transition(BS_Pm_BL, CPUData, BS_Pm) {
+ yc_writeCPUDataToTBE;
+ d_writeDataToMemory;
+ wa_wakeUpDependents;
+ pr_popResponseQueue;
+ }
+
+ transition(B_Pm_BL, CPUData, B_Pm) {
+ yc_writeCPUDataToTBE;
+ d_writeDataToMemory;
+ wa_wakeUpDependents;
+ pr_popResponseQueue;
+ }
+
+ transition(BP_BL, CPUData, BP) {
+ yc_writeCPUDataToTBE;
+ d_writeDataToMemory;
+ wa_wakeUpDependents;
+ pr_popResponseQueue;
+ }
+
+ transition({BR, BW, BL}, {VicDirtyP, VicCleanP}) {
+ st_stallAndWaitRequest;
+ }
+
+ transition({BR, BW, BL}, {VicDirty, VicClean}) {
+ ww_stallAndWaitRegRequestQueue;
+ }
+
+ transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
+ dt_deallocateTBE;
+ d_writeDataToMemory;
+ al_allocateL3Block;
+ wa_wakeUpDependents;
+ pr_popResponseQueue;
+ }
+
+ transition(BL, StaleWB, U) {L3TagArrayWrite} {
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ pr_popResponseQueue;
+ }
+
+ transition({BI, B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {VicDirty, VicClean}) {
+ ww_stallAndWaitRegRequestQueue;
+ }
+
+ transition({BI, B, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {VicDirtyP, VicCleanP}) {
+ st_stallAndWaitRequest;
+ }
+
+ transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, WBAck) {
+ pm_popMemQueue;
+ }
+
+ transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, StaleVicDirtyP) {
+ rvp_removeVicDirtyIgnore;
+ wp_sendResponseWBAckP;
+ p_popRequestQueue;
+ }
+
+ transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, StaleVicDirty) {
+ rv_removeVicDirtyIgnore;
+ w_sendResponseWBAck;
+ prd_popRegionQueue;
+ }
+
+ transition(U, VicDirty, BL) {L3TagArrayRead} {
+ t_allocateTBE;
+ ra_ackRegionDir;
+ w_sendResponseWBAck;
+ prd_popRegionQueue;
+ }
+
+ transition(U, VicClean, BL) {L3TagArrayRead} {
+ t_allocateTBE;
+ ra_ackRegionDir;
+ w_sendResponseWBAck;
+ prd_popRegionQueue;
+ }
+
+ transition({B, BR}, CoreUnblock, U) {
+ wa_wakeUpDependents;
+ pu_popUnblockQueue;
+ }
+
+ transition({B, BR}, UnblockWriteThrough, U) {
+ wa_wakeUpDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(BS_M, MemData, B) {L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BS_PM, MemData, BS_Pm) {} {
+ mt_writeMemDataToTBE;
+ wa_wakeUpDependents;
+ pm_popMemQueue;
+ }
+
+ transition(BM_PM, MemData, BM_Pm){} {
+ mt_writeMemDataToTBE;
+ wa_wakeUpDependents;
+ pm_popMemQueue;
+ }
+
+ transition(B_PM, MemData, B_Pm){} {
+ mt_writeMemDataToTBE;
+ wa_wakeUpDependents;
+ pm_popMemQueue;
+ }
+
+ transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition(BM_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition(B_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition(BS_PM, L3Hit, BS_Pm) {
+ wa_wakeUpDependents;
+ ptl_popTriggerQueue;
+ }
+
+ transition(BM_PM, L3Hit, BM_Pm) {
+ wa_wakeUpDependents;
+ ptl_popTriggerQueue;
+ }
+
+ transition(B_PM, L3Hit, B_Pm) {
+ wa_wakeUpDependents;
+ ptl_popTriggerQueue;
+ }
+
+ transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BP, BI}, CPUPrbResp) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ pr_popResponseQueue;
+ }
+
+ transition({B, B_M, BS_M, BM_M}, {CPUPrbResp, LastCPUPrbResp}) {
+ z_stall;
+ }
+
+ transition({BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {CPUPrbResp, LastCPUPrbResp}) {
+ // recycling because PrbResponse and data come on the same network
+ yy_recycleResponseQueue;
+ }
+
+ transition(U, {CPUPrbResp, LastCPUPrbResp}) {L3TagArrayRead, L3DataArrayWrite} {
+ aic_ackInvalidate;
+ wdt_writeBackDataInvNoTBE;
+ ali_allocateL3BlockNoTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(BL, {CPUPrbResp, LastCPUPrbResp}) {} {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ wdi_writeBackDataInv;
+ ali_allocateL3Block;
+ pr_popResponseQueue;
+ }
+
+ transition(BS_PM, LastCPUPrbResp, BS_M) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ pr_popResponseQueue;
+ }
+
+ transition(BS_PM, ProbeAcksComplete, BS_M) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(BM_PM, LastCPUPrbResp, BM_M) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ pr_popResponseQueue;
+ }
+
+ transition(BM_PM, ProbeAcksComplete, BM_M) {} {
+ pt_popTriggerQueue;
+ }
+
+ transition(B_PM, LastCPUPrbResp, B_M) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ pr_popResponseQueue;
+ }
+
+ transition(B_PM, ProbeAcksComplete, B_M){} {
+ pt_popTriggerQueue;
+ }
+
+ transition(BS_Pm, LastCPUPrbResp, B) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(BM_Pm, LastCPUPrbResp, B) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(B_Pm, LastCPUPrbResp, B) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(BP, LastCPUPrbResp, B) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ c_sendResponseCtoD;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
+ c_sendResponseCtoD;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(BI, LastCPUPrbResp, B) {
+ aic_ackInvalidate;
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ ont_checkForCompletionNoTrigger;
+ wa_wakeUpDependents;
+ wdi_writeBackDataInv;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(BI, ProbeAcksComplete, U) {L3TagArrayWrite, L3DataArrayWrite}{
+ wa_wakeUpDependents;
+ wdi_writeBackDataInv;
+ ali_allocateL3Block;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+enumeration(CoherenceRequestType, desc="Coherence Request Types") {
+ // CPU Request Types ONLY
+ RdBlk, desc="Read Blk";
+ RdBlkM, desc="Read Blk Modified";
+ RdBlkS, desc="Read Blk Shared";
+ VicClean, desc="L2 clean eviction";
+ VicDirty, desc="L2 dirty eviction";
+
+ WrCancel, desc="want to cancel WB to Memory"; // should this be here?
+
+ WBApproval, desc="WB Approval";
+
+ // Messages between Dir and R-Dir
+ ForceInv, desc="Send invalide to the block";
+ ForceDowngrade, desc="Send downgrade to the block";
+ Unblock, desc="Used to let the dir know a message has been sunk";
+
+ // Messages between R-Dir and R-Buffer
+ PrivateNotify, desc="Let region buffer know it has private access";
+ SharedNotify, desc="Let region buffer know it has shared access";
+ WbNotify, desc="Let region buffer know it saw its wb request";
+ Downgrade, desc="Force the region buffer to downgrade to shared";
+ // Response to R-Dir (probably should be on a different network, but
+ // I need it to be ordered with respect to requests)
+ InvAck, desc="Let the R-Dir know when the inv has occured";
+
+ PrivateRequest, desc="R-buf wants the region in private";
+ UpgradeRequest, desc="R-buf wants the region in private";
+ SharedRequest, desc="R-buf wants the region in shared (could respond with private)";
+ CleanWbRequest, desc="R-buf wants to deallocate clean region";
+
+ NA, desc="So we don't get segfaults";
+}
+
+enumeration(ProbeRequestType, desc="Probe Request Types") {
+ PrbDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
+ PrbInv, desc="Probe to Invalidate";
+
+ // For regions
+ PrbRepl, desc="Force the cache to do a replacement";
+ PrbRegDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
+}
+
+
+enumeration(CoherenceResponseType, desc="Coherence Response Types") {
+ NBSysResp, desc="Northbridge response to CPU Rd request";
+ NBSysWBAck, desc="Northbridge response ok to WB";
+ TDSysResp, desc="TCCdirectory response to CPU Rd request";
+ TDSysWBAck, desc="TCCdirectory response ok to WB";
+ TDSysWBNack, desc="TCCdirectory response ok to drop";
+ CPUPrbResp, desc="CPU Probe Response";
+ CPUData, desc="CPU Data";
+ StaleNotif, desc="Notification of Stale WBAck, No data to writeback";
+ CPUCancelWB, desc="want to cancel WB to Memory";
+ MemData, desc="Data from Memory";
+
+ // for regions
+ PrivateAck, desc="Ack that r-buf received private notify";
+ RegionWbAck, desc="Writeback Ack that r-buf completed deallocation";
+ DirReadyAck, desc="Directory (mem ctrl)<->region dir handshake";
+}
+
+enumeration(CoherenceState, default="CoherenceState_NA", desc="Coherence State") {
+ Modified, desc="Modified";
+ Owned, desc="Owned state";
+ Exclusive, desc="Exclusive";
+ Shared, desc="Shared";
+ NA, desc="NA";
+}
+
+structure(CPURequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ Addr DemandAddress, desc="Physical block address for this request";
+ CoherenceRequestType Type, desc="Type of request";
+ DataBlock DataBlk, desc="data for the cache line"; // only for WB
+ bool Dirty, desc="whether WB data is dirty"; // only for WB
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ bool Shared, desc="For CPU_WrVicBlk, vic is O not M. For CPU_ClVicBlk, vic is S";
+ MessageSizeType MessageSize, desc="size category of the message";
+ Cycles InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
+ Cycles ForwardRequestTime, default="0", desc="time the dir forwarded the request";
+ Cycles ProbeRequestStartTime, default="0", desc="the time the dir started the probe request";
+ bool DemandRequest, default="false", desc="For profiling purposes";
+
+ NetDest Sharers, desc="Caches that may have a valid copy of the data";
+ bool ForceShared, desc="R-dir knows it is shared, pass on so it sends an S copy, not E";
+ bool Private, default="false", desc="Requestor already has private permissions, no need for dir check";
+ bool CtoDSinked, default="false", desc="This is true if the CtoD previously sent must have been sunk";
+
+ bool NoAckNeeded, default="false", desc="True if region buffer doesn't need to ack";
+ int Acks, default="0", desc="Acks that the dir (mem ctrl) should expect to receive";
+ CoherenceRequestType OriginalType, default="CoherenceRequestType_NA", desc="Type of request from core fwded through region buffer";
+
+ bool functionalRead(Packet *pkt) {
+ // Only PUTX messages contains the data block
+ if (Type == CoherenceRequestType:VicDirty) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+structure(NBProbeRequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ ProbeRequestType Type, desc="probe signal";
+ bool ReturnData, desc="Indicates CPU should return data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MessageSizeType MessageSize, desc="size category of the message";
+ bool DemandRequest, default="false", desc="demand request, requesting 3-hop transfer";
+ Addr DemandAddress, desc="Demand block address for a region request";
+ MachineID Requestor, desc="Requestor id for 3-hop requests";
+ bool NoAckNeeded, default="false", desc="For short circuting acks";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+
+}
+
+structure(TDProbeRequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ ProbeRequestType Type, desc="TD_PrbNxtState signal";
+ bool ReturnData, desc="Indicates CPU should return data";
+ bool localCtoD, desc="Indicates CtoD is within the GPU hierarchy (aka TCC subtree)";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MessageSizeType MessageSize, desc="size category of the message";
+ MachineID Sender, desc="Node who sent the data";
+ bool currentOwner, default="false", desc="Is the sender the current owner";
+ bool DoneAck, default="false", desc="Is this a done ack?";
+ bool Dirty, default="false", desc="Was block dirty when evicted";
+ bool wasValid, default="false", desc="Was block valid when evicted";
+ bool valid, default="false", desc="Is block valid";
+ bool validToInvalid, default="false", desc="Was block valid when evicted";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+}
+
+// Response Messages seemed to be easily munged into one type
+structure(ResponseMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="NB Sys Resp or CPU Response to Probe";
+ MachineID Sender, desc="Node who sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ // Begin Used Only By CPU Response
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Hit, desc="probe hit valid line";
+ bool Shared, desc="True if S, or if NB Probe ReturnData==1 && O";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ bool Ntsl, desc="indicates probed lin will be invalid after probe";
+ bool UntransferredOwner, desc="pending confirmation of ownership change";
+ // End Used Only By CPU Response
+
+ // Begin NB Response Only
+ CoherenceState State, default=CoherenceState_NA, desc="What returned data from NB should be in";
+ bool CtoD, desc="was the originator a CtoD?";
+ // End NB Response Only
+
+ bool NbReqShared, desc="modification of Shared field from initial request, e.g. hit by shared probe";
+
+ MessageSizeType MessageSize, desc="size category of the message";
+ Cycles InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
+ Cycles ForwardRequestTime, default="0", desc="time the dir forwarded the request";
+ Cycles ProbeRequestStartTime, default="0", desc="the time the dir started the probe request";
+ bool DemandRequest, default="false", desc="For profiling purposes";
+
+ bool L3Hit, default="false", desc="Did memory or L3 supply the data?";
+ MachineID OriginalResponder, desc="Mach which wrote the data to the L3";
+
+ bool NotCached, default="false", desc="True when the Region buffer has already evicted the line";
+
+ bool NoAckNeeded, default="false", desc="For short circuting acks";
+ bool isValid, default="false", desc="Is acked block valid";
+
+ bool functionalRead(Packet *pkt) {
+ // Only PUTX messages contains the data block
+ if (Type == CoherenceResponseType:CPUData ||
+ Type == CoherenceResponseType:MemData) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+structure(UnblockMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ NetDest Destination, desc="Destination (always directory)";
+ MessageSizeType MessageSize, desc="size category of the message";
+}
+
+enumeration(TriggerType, desc="Trigger Type") {
+ L2_to_L1, desc="L2 to L1 fill";
+ AcksComplete, desc="NB received all needed Acks";
+
+ // For regions
+ InvNext, desc="Invalidate the next block";
+ PrivateAck, desc="Loopback ack for machines with no Region Buffer";
+ AllOutstanding, desc="All outstanding requests have finished";
+ L3Hit, desc="L3 hit in dir";
+
+ // For region directory once the directory is blocked
+ InvRegion, desc="Invalidate region";
+ DowngradeRegion, desc="downgrade region";
+}
+
+enumeration(CacheId, desc="Which Cache in the Core") {
+ L1I, desc="L1 I-cache";
+ L1D0, desc="L1 D-cache cluster 0";
+ L1D1, desc="L1 D-cache cluster 1";
+ NA, desc="Default";
+}
+
+structure(TriggerMsg, desc="...", interface="Message") {
+ Addr addr, desc="Address";
+ TriggerType Type, desc="Type of trigger";
+ CacheId Dest, default="CacheId_NA", desc="Cache to invalidate";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Jason Power
+ */
+
+machine(MachineType:RegionBuffer, "Region Buffer for AMD_Base-like protocol")
+: CacheMemory *cacheMemory; // stores only region addresses. Must set block size same as below
+ bool isOnCPU;
+ int blocksPerRegion := 64; // 4k regions
+ Cycles toDirLatency := 5; // Latency to fwd requests to directory
+ Cycles toRegionDirLatency := 5; // Latency for requests and acks to directory
+ Cycles nextEvictLatency := 1; // latency added between each block while evicting region
+ bool noTCCdir := "False";
+ int TCC_select_num_bits := 1;
+
+ // From the Cores
+ MessageBuffer * requestFromCore, network="From", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseFromCore, network="From", virtual_network="2", vnet_type="response";
+
+ // Requests to the cores or directory
+ MessageBuffer * requestToNetwork, network="To", virtual_network="0", vnet_type="request";
+
+ // From Region-Dir
+ MessageBuffer * notifyFromRegionDir, network="From", virtual_network="7", vnet_type="request";
+ MessageBuffer * probeFromRegionDir, network="From", virtual_network="8", vnet_type="request";
+
+ // From the directory
+ MessageBuffer * unblockFromDir, network="From", virtual_network="4", vnet_type="unblock";
+
+ // To the region-Dir
+ MessageBuffer * responseToRegDir, network="To", virtual_network="2", vnet_type="response";
+
+ MessageBuffer * triggerQueue;
+{
+
+ // States
+ state_declaration(State, desc="Region states", default="RegionBuffer_State_NP") {
+ NP, AccessPermission:Invalid, desc="Not present in region directory";
+ P, AccessPermission:Invalid, desc="Region is private to the cache";
+ S, AccessPermission:Invalid, desc="Region is possibly shared with others";
+
+ NP_PS, AccessPermission:Invalid, desc="Intermediate state waiting for notify from r-dir";
+ S_P, AccessPermission:Invalid, desc="Intermediate state while upgrading region";
+
+ P_NP, AccessPermission:Invalid, desc="Intermediate state while evicting all lines in region";
+ P_S, AccessPermission:Invalid, desc="Intermediate state while downgrading all lines in region";
+
+ S_NP_PS, AccessPermission:Invalid, desc="Got an inv in S_P, waiting for all inv acks, then going to since the write is already out there NP_PS";
+ P_NP_NP, AccessPermission:Invalid, desc="Evicting region on repl, then got an inv. Need to re-evict";
+
+ P_NP_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
+ P_S_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
+ S_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
+ S_NP_PS_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
+
+ SS_P, AccessPermission:Invalid, desc="Waiting for CPU write that we know is there";
+
+ P_NP_W, AccessPermission:Invalid, desc="Waiting for writeback ack";
+
+ NP_W, AccessPermission:Invalid, desc="Got a done ack before request, waiting for that victim";
+ }
+
+ enumeration(Event, desc="Region directory events") {
+ CPURead, desc="Access from CPU core";
+ CPUWrite, desc="Access from CPU core";
+ CPUWriteback, desc="Writeback request from CPU core";
+
+ ReplRegion, desc="Start a replace on a region";
+
+ PrivateNotify, desc="Update entry to private state";
+ SharedNotify, desc="Update entry to shared state";
+ WbNotify, desc="Writeback notification received";
+ InvRegion, desc="Start invalidating a region";
+ DowngradeRegion,desc="Start invalidating a region";
+
+ InvAck, desc="Ack from core";
+
+ DoneAck, desc="Ack from core that request has finished";
+ AllOutstanding, desc="All outstanding requests have now finished";
+
+ Evict, desc="Loopback to evict each block";
+ LastAck_PrbResp, desc="Done eviciting all the blocks, got the last ack from core, now respond to region dir";
+ LastAck_CleanWb, desc="Done eviciting all the blocks, got the last ack from core, now start clean writeback (note the dir has already been updated)";
+
+ StallAccess, desc="Wait for the done ack on the address before proceeding";
+ StallDoneAck, desc="Wait for the access on the address before proceeding";
+
+ StaleRequest, desc="Got a stale victim from the cache, fwd it without incrementing outstanding";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+ structure(BoolVec, external="yes") {
+ bool at(int);
+ void resize(int);
+ void clear();
+ int size();
+ }
+
+ structure(Entry, desc="Region entry", interface="AbstractCacheEntry") {
+ Addr addr, desc="Base address of this region";
+ State RegionState, desc="Region state";
+ DataBlock DataBlk, desc="Data for the block (always empty in region buffer)";
+ BoolVec ValidBlocks, desc="A vector to keep track of valid blocks";
+ int NumValidBlocks, desc="Number of trues in ValidBlocks to avoid iterating";
+ BoolVec UsedBlocks, desc="A vector to keep track of blocks ever valid";
+ bool dirty, desc="Dirty as best known by the region buffer";
+ // This is needed so we don't ack an invalidate until all requests are ordered
+ int NumOutstandingReqs, desc="Total outstanding private/shared requests";
+ BoolVec OutstandingReqs, desc="Blocks that have outstanding private/shared requests";
+ bool MustDowngrade, desc="Set when we got a downgrade before the shd or pvt permissions";
+ Cycles ProbeRequestTime, default="Cycles(0)", desc="Time region dir started the probe";
+ Cycles InitialRequestTime, default="Cycles(0)", desc="Time message was sent to region dir";
+ bool MsgSentToDir, desc="True if the current request required a message to the dir";
+ bool clearOnDone, default="false", desc="clear valid bit when request completes";
+ Addr clearOnDoneAddr, desc="clear valid bit when request completes";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ //int NumValidBlocks, desc="Number of blocks valid so we don't have to count a BoolVec";
+ BoolVec ValidBlocks, desc="A vector to keep track of valid blocks";
+ bool AllAcksReceived, desc="Got all necessary acks from dir";
+ bool DoneEvicting, desc="Done iterating through blocks checking for valids";
+ BoolVec AcksReceived, desc="Received acks for theses blocks\n";
+ bool SendAck, desc="If true, send an ack to the r-dir at end of inv";
+ ProbeRequestType MsgType, desc="Type of message to send while 'evicting' ";
+ int NumOutstandingReqs, desc="Total outstanding private/shared requests";
+ BoolVec OutstandingReqs, desc="Blocks that have outstanding private/shared requests";
+ MachineID Requestor, desc="Requestor for three hop transactions";
+ bool DemandRequest, default="false", desc="Associated with a demand request";
+ Addr DemandAddress, desc="Address for the demand request";
+ bool DoneAckReceived, default="false", desc="True if the done ack arrived before the message";
+ Addr DoneAckAddr, desc="Address of the done ack received early";
+ int OutstandingThreshold, desc="Number of outstanding requests to trigger AllOutstanding on";
+
+ ProbeRequestType NewMsgType, desc="Type of message to send while 'evicting' ";
+ MachineID NewRequestor, desc="Requestor for three hop transactions";
+ bool NewDemandRequest, default="false", desc="Associated with a demand request";
+ Addr NewDemandAddress, desc="Address for the demand request";
+ bool dirty, desc="dirty";
+ bool AllOutstandingTriggered, default="false", desc="bit for only one all outstanding";
+ int OutstandingAcks, default="0", desc="number of acks to wait for";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ // Stores only region addresses
+ TBETable TBEs, template="<RegionBuffer_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ int blockBits, default="RubySystem::getBlockSizeBits()";
+ int blockBytes, default="RubySystem::getBlockSizeBytes()";
+ int regionBits, default="log2(m_blocksPerRegion)";
+
+ // Functions
+
+ int getRegionOffset(Addr addr) {
+ if (blocksPerRegion > 1) {
+ Addr offset := bitSelect(addr, blockBits, regionBits+blockBits-1);
+ int ret := addressToInt(offset);
+ assert(ret < blocksPerRegion);
+ return ret;
+ } else {
+ return 0;
+ }
+ }
+
+ Addr getRegionBase(Addr addr) {
+ return maskLowOrderBits(addr, blockBits+regionBits);
+ }
+
+ Addr getNextBlock(Addr addr) {
+ Addr a := addr;
+ return makeNextStrideAddress(a, 1);
+ }
+
+ MachineID getPeer(MachineID mach, Addr address) {
+ if (isOnCPU) {
+ return createMachineID(MachineType:CorePair, intToID(0));
+ } else if (noTCCdir) {
+ return mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ } else {
+ return createMachineID(MachineType:TCCdir, intToID(0));
+ }
+ }
+
+ bool isOutstanding(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe) && tbe.OutstandingReqs.size() > 0) {
+ DPRINTF(RubySlicc, " outstanding tbe reqs %s %s %d %d\n",
+ tbe.OutstandingReqs, addr, getRegionOffset(addr),
+ tbe.OutstandingReqs.at(getRegionOffset(addr)));
+ return tbe.OutstandingReqs.at(getRegionOffset(addr));
+ } else if (is_valid(cache_entry)) {
+ DPRINTF(RubySlicc, " outstanding cache reqs %s %s %d %d\n",
+ cache_entry.OutstandingReqs, addr, getRegionOffset(addr),
+ cache_entry.OutstandingReqs.at(getRegionOffset(addr)));
+ return cache_entry.OutstandingReqs.at(getRegionOffset(addr));
+ } else {
+ return false;
+ }
+ }
+
+ bool isOnGPU() {
+ if (isOnCPU) {
+ return false;
+ }
+ return true;
+ }
+
+ bool isRead(CoherenceRequestType type) {
+ return (type == CoherenceRequestType:RdBlk || type == CoherenceRequestType:RdBlkS ||
+ type == CoherenceRequestType:VicClean);
+ }
+
+ bool presentOrAvail(Addr addr) {
+ return cacheMemory.isTagPresent(getRegionBase(addr)) || cacheMemory.cacheAvail(getRegionBase(addr));
+ }
+
+ // Returns a region entry!
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", cacheMemory.lookup(getRegionBase(addr)));
+ }
+
+ TBE getTBE(Addr addr), return_by_pointer="yes" {
+ return TBEs.lookup(getRegionBase(addr));
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ return getCacheEntry(getRegionBase(addr)).DataBlk;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.RegionState;
+ }
+ return State:NP;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ if (is_valid(cache_entry)) {
+ cache_entry.RegionState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := getTBE(addr);
+ if(is_valid(tbe)) {
+ return RegionBuffer_State_to_permission(tbe.TBEState);
+ }
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return RegionBuffer_State_to_permission(cache_entry.RegionState);
+ }
+ return AccessPermission:NotPresent;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ functionalMemoryRead(pkt);
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ if (functionalMemoryWrite(pkt)) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(RegionBuffer_State_to_permission(state));
+ }
+ }
+
+ void recordRequestType(RequestType stat, Addr addr) {
+ if (stat == RequestType:TagArrayRead) {
+ cacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (stat == RequestType:TagArrayWrite) {
+ cacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:TagArrayRead) {
+ return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ // Overloaded outgoing request nework for both probes to cores and reqeusts
+ // to the directory.
+ // Fix Me: These forwarded requests need to be on a separate virtual channel
+ // to avoid deadlock!
+ out_port(requestNetwork_out, CPURequestMsg, requestToNetwork);
+ out_port(probeNetwork_out, NBProbeRequestMsg, requestToNetwork);
+
+ out_port(responseNetwork_out, ResponseMsg, responseToRegDir);
+
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=4) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := getTBE(in_msg.addr);
+ DPRINTF(RubySlicc, "trigger msg: %s (%s)\n", in_msg, getRegionBase(in_msg.addr));
+ assert(is_valid(tbe));
+ if (in_msg.Type == TriggerType:AcksComplete) {
+ if (tbe.SendAck) {
+ trigger(Event:LastAck_PrbResp, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:LastAck_CleanWb, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == TriggerType:AllOutstanding) {
+ trigger(Event:AllOutstanding, in_msg.addr, cache_entry, tbe);
+ } else {
+ assert(in_msg.Type == TriggerType:InvNext);
+ trigger(Event:Evict, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ in_port(unblockNetwork_in, UnblockMsg, unblockFromDir, rank=3) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
+ peek(unblockNetwork_in, UnblockMsg) {
+ TBE tbe := getTBE(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.DoneAck) {
+ if (isOutstanding(tbe, cache_entry, in_msg.addr)) {
+ trigger(Event:DoneAck, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:StallDoneAck, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ assert(is_valid(tbe));
+ trigger(Event:InvAck, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ in_port(probeNetwork_in, NBProbeRequestMsg, probeFromRegionDir, rank=2) {
+ if (probeNetwork_in.isReady(clockEdge())) {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ TBE tbe := getTBE(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ assert(getRegionBase(in_msg.addr) == in_msg.addr);
+ if (in_msg.Type == ProbeRequestType:PrbInv) {
+ trigger(Event:InvRegion, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
+ trigger(Event:DowngradeRegion, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unknown probe message\n");
+ }
+ }
+ }
+ }
+
+ in_port(notifyNetwork_in, CPURequestMsg, notifyFromRegionDir, rank=1) {
+ if (notifyNetwork_in.isReady(clockEdge())) {
+ peek(notifyNetwork_in, CPURequestMsg) {
+ TBE tbe := getTBE(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ //Fix Me...add back in: assert(is_valid(cache_entry));
+ if (in_msg.Type == CoherenceRequestType:WbNotify) {
+ trigger(Event:WbNotify, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:SharedNotify) {
+ trigger(Event:SharedNotify, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:PrivateNotify) {
+ trigger(Event:PrivateNotify, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unknown notify message\n");
+ }
+ }
+ }
+ }
+
+ // In from cores
+ // NOTE: We get the cache / TBE entry based on the region address,
+ // but pass the block address to the actions
+ in_port(requestNetwork_in, CPURequestMsg, requestFromCore, rank=0) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, CPURequestMsg) {
+ TBE tbe := getTBE(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (is_valid(tbe) && tbe.DoneAckReceived && tbe.DoneAckAddr == in_msg.addr) {
+ DPRINTF(RubySlicc, "Stale/Stall request %s\n", in_msg.Type);
+ if (in_msg.Type == CoherenceRequestType:VicDirty || in_msg.Type == CoherenceRequestType:VicClean )
+ {
+ trigger(Event:StaleRequest, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:StallAccess, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (isOutstanding(tbe, cache_entry, in_msg.addr)) {
+ DPRINTF(RubySlicc, "Stall outstanding request %s\n", in_msg.Type);
+ trigger(Event:StallAccess, in_msg.addr, cache_entry, tbe);
+ } else {
+ if (presentOrAvail(in_msg.addr)) {
+ if (in_msg.Type == CoherenceRequestType:RdBlkM ) {
+ trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WriteThrough ) {
+ trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:Atomic ) {
+ trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
+ } else {
+ if (in_msg.Type == CoherenceRequestType:VicDirty ||
+ in_msg.Type == CoherenceRequestType:VicClean) {
+ trigger(Event:CPUWriteback, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:CPURead, in_msg.addr, cache_entry, tbe);
+ }
+ }
+ } else {
+ Addr victim := cacheMemory.cacheProbe(getRegionBase(in_msg.addr));
+ TBE victim_tbe := getTBE(victim);
+ Entry victim_entry := getCacheEntry(victim);
+ DPRINTF(RubySlicc, "Replacing region %s for %s(%s)\n", victim, in_msg.addr, getRegionBase(in_msg.addr));
+ trigger(Event:ReplRegion, victim, victim_entry, victim_tbe);
+ }
+ }
+ }
+ }
+ }
+
+ // Actions
+ action(f_fwdReqToDir, "f", desc="Forward CPU request to directory") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := in_msg.Type;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Private := true;
+ out_msg.InitialRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := curCycle();
+ if (getState(tbe, cache_entry, address) == State:S) {
+ out_msg.ForceShared := true;
+ }
+ DPRINTF(RubySlicc, "Fwd: %s\n", out_msg);
+ //assert(getState(tbe, cache_entry, address) == State:P || getState(tbe, cache_entry, address) == State:S);
+ if (getState(tbe, cache_entry, address) == State:NP_W) {
+ APPEND_TRANSITION_COMMENT(" fwding stale request: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Type);
+ }
+ }
+ }
+ }
+
+ action(u_updateRegionEntry, "u", desc="Update the entry for profiling") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (is_valid(cache_entry)) {
+ if (in_msg.CtoDSinked == false) {
+ APPEND_TRANSITION_COMMENT(" incr outstanding ");
+ cache_entry.NumOutstandingReqs := 1 + cache_entry.NumOutstandingReqs;
+ assert(cache_entry.OutstandingReqs.at(getRegionOffset(address)) == false);
+ cache_entry.OutstandingReqs.at(getRegionOffset(address)) := true;
+ assert(cache_entry.NumOutstandingReqs == countBoolVec(cache_entry.OutstandingReqs));
+ } else {
+ APPEND_TRANSITION_COMMENT(" NOT incr outstanding ");
+ assert(in_msg.Type == CoherenceRequestType:RdBlkM || in_msg.Type == CoherenceRequestType:RdBlkS);
+ }
+ APPEND_TRANSITION_COMMENT(cache_entry.NumOutstandingReqs);
+ if (in_msg.Type == CoherenceRequestType:RdBlkM || in_msg.Type == CoherenceRequestType:Atomic ||
+ in_msg.Type == CoherenceRequestType:WriteThrough )
+ {
+ cache_entry.dirty := true;
+ }
+ if (in_msg.Type == CoherenceRequestType:VicDirty ||
+ in_msg.Type == CoherenceRequestType:VicClean) {
+ DPRINTF(RubySlicc, "Got %s for addr %s\n", in_msg.Type, address);
+ //assert(cache_entry.ValidBlocks.at(getRegionOffset(address)));
+ // can in fact be inv if core got an inv after a vicclean before it got here
+ if (cache_entry.ValidBlocks.at(getRegionOffset(address))) {
+ cache_entry.clearOnDone := true;
+ cache_entry.clearOnDoneAddr := address;
+ //cache_entry.ValidBlocks.at(getRegionOffset(address)) := false;
+ //cache_entry.NumValidBlocks := cache_entry.NumValidBlocks - 1;
+ }
+ } else {
+ if (cache_entry.ValidBlocks.at(getRegionOffset(address)) == false) {
+ cache_entry.NumValidBlocks := cache_entry.NumValidBlocks + 1;
+ }
+ DPRINTF(RubySlicc, "before valid addr %s bits %s\n",
+ in_msg.Type, address, cache_entry.ValidBlocks);
+ cache_entry.ValidBlocks.at(getRegionOffset(address)) := true;
+ DPRINTF(RubySlicc, "after valid addr %s bits %s\n",
+ in_msg.Type, address, cache_entry.ValidBlocks);
+ cache_entry.UsedBlocks.at(getRegionOffset(address)) := true;
+ }
+ assert(cache_entry.NumValidBlocks <= blocksPerRegion);
+ assert(cache_entry.NumValidBlocks >= 0);
+ APPEND_TRANSITION_COMMENT(" valid blocks ");
+ APPEND_TRANSITION_COMMENT(cache_entry.ValidBlocks);
+ } else {
+ error("This shouldn't happen anymore I think");
+ //tbe.ValidBlocks.at(getRegionOffest(address)) := true;
+ assert(getState(tbe, cache_entry, address) == State:P_NP);
+ }
+ }
+ }
+
+ action(uw_updatePossibleWriteback, "uw", desc="writeback request complete") {
+ peek(unblockNetwork_in, UnblockMsg) {
+ if (is_valid(cache_entry) && in_msg.validToInvalid &&
+ cache_entry.clearOnDone && cache_entry.clearOnDoneAddr == address) {
+ DPRINTF(RubySlicc, "I have no idea what is going on here\n");
+ cache_entry.ValidBlocks.at(getRegionOffset(address)) := false;
+ cache_entry.NumValidBlocks := cache_entry.NumValidBlocks - 1;
+ cache_entry.clearOnDone := false;
+ }
+ }
+ }
+
+
+ action(rp_requestPrivate, "rp", desc="Send private request r-dir") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ // No need to send acks on replacements
+ assert(is_invalid(tbe));
+ enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
+ out_msg.addr := address; // use the actual address so the demand request can be fulfilled
+ out_msg.DemandAddress := address;
+ out_msg.Type := CoherenceRequestType:PrivateRequest;
+ out_msg.OriginalType := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.InitialRequestTime := curCycle();
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ DPRINTF(RubySlicc, "Private request %s\n", out_msg);
+ }
+ cache_entry.ProbeRequestTime := curCycle();
+ cache_entry.MsgSentToDir := true;
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ }
+ }
+
+ action(ru_requestUpgrade, "ru", desc="Send upgrade request r-dir") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ // No need to send acks on replacements
+ assert(is_invalid(tbe));
+ enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
+ out_msg.addr := address; // use the actual address so the demand request can be fulfilled
+ out_msg.Type := CoherenceRequestType:UpgradeRequest;
+ out_msg.OriginalType := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.InitialRequestTime := curCycle();
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ cache_entry.ProbeRequestTime := curCycle();
+ cache_entry.MsgSentToDir := true;
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ }
+ }
+
+ action(rw_requestWriteback, "rq", desc="Send writeback request") {
+ // No need to send acks on replacements
+ enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
+ out_msg.addr := getRegionBase(address); // use the actual address so the demand request can be fulfilled
+ out_msg.Type := CoherenceRequestType:CleanWbRequest;
+ out_msg.Requestor := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.Dirty := tbe.dirty;
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ }
+ }
+
+ action(rs_requestShared, "rs", desc="Send shared request r-dir") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ // No need to send acks on replacements
+ assert(is_invalid(tbe));
+ enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
+ out_msg.addr := address; // use the actual address so the demand request can be fulfilled
+ out_msg.Type := CoherenceRequestType:SharedRequest;
+ out_msg.OriginalType := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.InitialRequestTime := curCycle();
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ cache_entry.ProbeRequestTime := curCycle();
+ cache_entry.MsgSentToDir := true;
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ }
+ }
+
+ action(ai_ackRegionInv, "ai", desc="Send ack to r-dir on region inv if tbe says so") {
+ // No need to send acks on replacements
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ad_ackDircetory, "ad", desc="send probe response to directory") {
+ if (noTCCdir && tbe.MsgType == ProbeRequestType:PrbDowngrade && isOnGPU()) { //VIPER tcc doesnt understand PrbShrData
+ assert(tbe.DemandRequest); //So, let RegionBuffer take care of sending back ack
+ enqueue(responseNetwork_out, ResponseMsg, toDirLatency) {
+ out_msg.addr := tbe.DemandAddress;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := getPeer(machineID,address);
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false; // only true if sending back data i think
+ out_msg.Hit := false;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.NoAckNeeded := true;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+
+ action(aie_ackRegionExclusiveInv, "aie", desc="Send ack to r-dir on region inv if tbe says so") {
+ // No need to send acks on replacements
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.NotCached := true;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.Dirty := tbe.dirty;
+ }
+ }
+
+ action(ain_ackRegionInvNow, "ain", desc="Send ack to r-dir on region inv") {
+ enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(aine_ackRegionInvExlusiveNow, "aine", desc="Send ack to r-dir on region inv with exlusive permission") {
+ enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceResponseType:CPUPrbResp;
+ out_msg.Sender := machineID;
+ out_msg.NotCached := true;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ap_ackPrivateNotify, "ap", desc="Send ack to r-dir on private notify") {
+ enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceResponseType:PrivateAck;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(aw_ackWbNotify, "aw", desc="Send ack to r-dir on writeback notify") {
+ peek(notifyNetwork_in, CPURequestMsg) {
+ if (in_msg.NoAckNeeded == false) {
+ enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceResponseType:RegionWbAck;
+ out_msg.Sender := machineID;
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ }
+
+ action(e_evictCurrent, "e", desc="Evict this block in the region") {
+ // send force invalidate message to directory to invalidate this block
+ // must invalidate all blocks since region buffer could have privitized it
+ if (tbe.ValidBlocks.at(getRegionOffset(address)) &&
+ (tbe.DemandRequest == false || tbe.DemandAddress != address)) {
+ DPRINTF(RubySlicc, "trying to evict address %s (base: %s, offset: %d)\n", address, getRegionBase(address), getRegionOffset(address));
+ DPRINTF(RubySlicc, "tbe valid blocks %s\n", tbe.ValidBlocks);
+
+ enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := tbe.MsgType;
+ out_msg.ReturnData := true;
+ if (address == tbe.DemandAddress) {
+ out_msg.DemandRequest := true;
+ }
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(getPeer(machineID,address));
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ APPEND_TRANSITION_COMMENT(" current ");
+ APPEND_TRANSITION_COMMENT(tbe.ValidBlocks.at(getRegionOffset(address)));
+ tbe.AllAcksReceived := false;
+ } else {
+ DPRINTF(RubySlicc, "Not evicting demand %s\n", address);
+ }
+ }
+
+ action(ed_evictDemand, "ed", desc="Evict the demand request if it's valid") {
+ if (noTCCdir && tbe.MsgType == ProbeRequestType:PrbDowngrade && isOnGPU()) {
+ tbe.OutstandingAcks := 0;
+ tbe.AllAcksReceived := true;
+ tbe.DoneEvicting := true;
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:AcksComplete;
+ out_msg.addr := getRegionBase(address);
+ }
+ } else if (tbe.DemandRequest) {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
+ out_msg.addr := tbe.DemandAddress;
+ out_msg.Type := tbe.MsgType;
+ out_msg.ReturnData := true;
+ out_msg.DemandRequest := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.add(getPeer(machineID,address));
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ tbe.AllAcksReceived := false;
+ }
+ if (tbe.ValidBlocks.at(getRegionOffset(tbe.DemandAddress)) == false) {
+ tbe.OutstandingAcks := tbe.OutstandingAcks + 1;
+ }
+ APPEND_TRANSITION_COMMENT("Evicting demand ");
+ APPEND_TRANSITION_COMMENT(tbe.DemandAddress);
+ }
+ APPEND_TRANSITION_COMMENT("waiting acks ");
+ APPEND_TRANSITION_COMMENT(tbe.OutstandingAcks);
+ }
+
+ action(adp_AckDemandProbe, "fp", desc="forward demand probe even if we know that the core is invalid") {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ if (in_msg.DemandRequest) {
+ enqueue(responseNetwork_out, ResponseMsg, toDirLatency) {
+ out_msg.addr := in_msg.DemandAddress;
+ out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
+ out_msg.Sender := getPeer(machineID,address);
+ // will this always be ok? probably not for multisocket
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := false; // only true if sending back data i think
+ out_msg.Hit := false;
+ out_msg.Ntsl := false;
+ out_msg.State := CoherenceState:NA;
+ out_msg.NoAckNeeded := true;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+ }
+ }
+
+ action(en_enqueueNextEvict, "en", desc="Queue evict the next block in the region") {
+ // increment in_msg.addr by blockSize bytes and enqueue on triggerPort
+ // Only enqueue if the next address doesn't overrun the region bound
+ if (getRegionBase(getNextBlock(address)) == getRegionBase(address)) {
+ enqueue(triggerQueue_out, TriggerMsg, nextEvictLatency) {
+ out_msg.Type := TriggerType:InvNext;
+ out_msg.addr := getNextBlock(address);
+ }
+ } else {
+ tbe.DoneEvicting := true;
+ DPRINTF(RubySlicc, "Done evicing region %s\n", getRegionBase(address));
+ DPRINTF(RubySlicc, "Waiting for %s acks\n", tbe.OutstandingAcks);
+ if (tbe.AllAcksReceived == true) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:AcksComplete;
+ out_msg.addr := getRegionBase(address);
+ }
+ }
+ }
+ }
+
+ action(ef_enqueueFirstEvict, "ef", desc="Queue the first block in the region to be evicted") {
+ if (tbe.DoneEvicting == false) {
+ enqueue(triggerQueue_out, TriggerMsg, nextEvictLatency) {
+ out_msg.Type := TriggerType:InvNext;
+ out_msg.addr := getRegionBase(address);
+ }
+ }
+ }
+
+ action(ra_receiveAck, "ra", desc="Mark TBE entry as received this ack") {
+ DPRINTF(RubySlicc, "received ack for %s reg: %s vec: %s pos: %d\n",
+ address, getRegionBase(address), tbe.ValidBlocks, getRegionOffset(address));
+ peek(unblockNetwork_in, UnblockMsg) {
+ //
+ // Note the tbe ValidBlock vec will be a conservative list of the
+ // valid blocks since the cache entry ValidBlock vec is set on the
+ // request
+ //
+ if (in_msg.wasValid) {
+ assert(tbe.ValidBlocks.at(getRegionOffset(address)));
+ }
+ }
+ tbe.OutstandingAcks := tbe.OutstandingAcks - 1;
+ tbe.AcksReceived.at(getRegionOffset(address)) := true;
+ assert(tbe.OutstandingAcks >= 0);
+ if (tbe.OutstandingAcks == 0) {
+ tbe.AllAcksReceived := true;
+ if (tbe.DoneEvicting) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:AcksComplete;
+ out_msg.addr := getRegionBase(address);
+ }
+ }
+ }
+
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ APPEND_TRANSITION_COMMENT(" Acks left receive ");
+ APPEND_TRANSITION_COMMENT(tbe.OutstandingAcks);
+ }
+
+ action(do_decrementOutstanding, "do", desc="Decrement outstanding requests") {
+ APPEND_TRANSITION_COMMENT(" decr outstanding ");
+ if (is_valid(cache_entry)) {
+ cache_entry.NumOutstandingReqs := cache_entry.NumOutstandingReqs - 1;
+ assert(cache_entry.OutstandingReqs.at(getRegionOffset(address)));
+ cache_entry.OutstandingReqs.at(getRegionOffset(address)) := false;
+ assert(cache_entry.NumOutstandingReqs >= 0);
+ assert(cache_entry.NumOutstandingReqs == countBoolVec(cache_entry.OutstandingReqs));
+ APPEND_TRANSITION_COMMENT(cache_entry.NumOutstandingReqs);
+ }
+ if (is_valid(tbe)) {
+ tbe.NumOutstandingReqs := tbe.NumOutstandingReqs - 1;
+ assert(tbe.OutstandingReqs.at(getRegionOffset(address)));
+ tbe.OutstandingReqs.at(getRegionOffset(address)) := false;
+ assert(tbe.NumOutstandingReqs >= 0);
+ assert(tbe.NumOutstandingReqs == countBoolVec(tbe.OutstandingReqs));
+ APPEND_TRANSITION_COMMENT(tbe.NumOutstandingReqs);
+ }
+ }
+
+ action(co_checkOutstanding, "co", desc="check if there are no more outstanding requests") {
+ assert(is_valid(tbe));
+ if ((tbe.NumOutstandingReqs <= tbe.OutstandingThreshold) &&
+ (tbe.AllOutstandingTriggered == false)) {
+ APPEND_TRANSITION_COMMENT(" no more outstanding: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumOutstandingReqs);
+ APPEND_TRANSITION_COMMENT(tbe.OutstandingThreshold);
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:AllOutstanding;
+ if (tbe.DemandRequest) {
+ out_msg.addr := tbe.DemandAddress;
+ } else {
+ out_msg.addr := getRegionBase(address);
+ }
+ DPRINTF(RubySlicc, "co enqueuing %s\n", out_msg);
+ tbe.AllOutstandingTriggered := true;
+ }
+ } else {
+ APPEND_TRANSITION_COMMENT(" still more outstanding ");
+ }
+ }
+
+ action(ro_resetAllOutstanding, "ro", desc="Reset all outstanding") {
+ tbe.AllOutstandingTriggered := false;
+ }
+
+ action(so_setOutstandingCheckOne, "so", desc="Check outstanding is waiting for 1, not 0") {
+ // Need this for S_P because one request is outstanding between here and r-dir
+ tbe.OutstandingThreshold := 1;
+ }
+
+ action(a_allocateRegionEntry, "a", desc="Allocate a new entry") {
+ set_cache_entry(cacheMemory.allocate(getRegionBase(address), new Entry));
+ cache_entry.ValidBlocks.clear();
+ cache_entry.ValidBlocks.resize(blocksPerRegion);
+ cache_entry.UsedBlocks.clear();
+ cache_entry.UsedBlocks.resize(blocksPerRegion);
+ cache_entry.dirty := false;
+ cache_entry.NumOutstandingReqs := 0;
+ cache_entry.OutstandingReqs.clear();
+ cache_entry.OutstandingReqs.resize(blocksPerRegion);
+ }
+
+ action(d_deallocateRegionEntry, "d", desc="Deallocate region entry") {
+ cacheMemory.deallocate(getRegionBase(address));
+ unset_cache_entry();
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ TBEs.allocate(getRegionBase(address));
+ set_tbe(getTBE(address));
+ tbe.OutstandingAcks := 0;
+ tbe.AllAcksReceived := true; // starts true since the region could be empty
+ tbe.DoneEvicting := false;
+ tbe.AcksReceived.clear();
+ tbe.AcksReceived.resize(blocksPerRegion);
+ tbe.SendAck := false;
+ tbe.OutstandingThreshold := 0;
+ if (is_valid(cache_entry)) {
+ tbe.NumOutstandingReqs := cache_entry.NumOutstandingReqs;
+ tbe.OutstandingReqs := cache_entry.OutstandingReqs;
+ assert(tbe.NumOutstandingReqs == countBoolVec(tbe.OutstandingReqs));
+ tbe.dirty := cache_entry.dirty;
+ tbe.ValidBlocks := cache_entry.ValidBlocks;
+ tbe.OutstandingAcks := countBoolVec(tbe.ValidBlocks);
+ APPEND_TRANSITION_COMMENT(" tbe valid blocks ");
+ APPEND_TRANSITION_COMMENT(tbe.ValidBlocks);
+ APPEND_TRANSITION_COMMENT(" cache valid blocks ");
+ APPEND_TRANSITION_COMMENT(cache_entry.ValidBlocks);
+ } else {
+ tbe.dirty := false;
+ }
+ }
+
+ action(m_markSendAck, "m", desc="Mark TBE that we need to ack at end") {
+ assert(is_valid(tbe));
+ tbe.SendAck := true;
+ }
+
+ action(db_markDirtyBit, "db", desc="Mark TBE dirty bit") {
+ peek(unblockNetwork_in, UnblockMsg) {
+ if (is_valid(tbe)) {
+ tbe.dirty := tbe.dirty || in_msg.Dirty;
+ }
+ }
+ }
+
+ action(dr_markDoneAckReceived, "dr", desc="Mark TBE that a done ack has been received") {
+ assert(is_valid(tbe));
+ tbe.DoneAckReceived := true;
+ tbe.DoneAckAddr := address;
+ APPEND_TRANSITION_COMMENT(" marking done ack on TBE ");
+ }
+
+ action(se_setTBE, "se", desc="Set msg type to evict") {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ tbe.MsgType := in_msg.Type;
+ tbe.Requestor := in_msg.Requestor;
+ tbe.DemandAddress := in_msg.DemandAddress;
+ tbe.DemandRequest := in_msg.DemandRequest;
+ }
+ }
+
+ action(sne_setNewTBE, "sne", desc="Set msg type to evict") {
+ peek(probeNetwork_in, NBProbeRequestMsg) {
+ tbe.NewMsgType := in_msg.Type;
+ tbe.NewRequestor := in_msg.Requestor;
+ tbe.NewDemandAddress := in_msg.DemandAddress;
+ tbe.NewDemandRequest := in_msg.DemandRequest;
+ }
+ }
+
+ action(soe_setOldTBE, "soe", desc="Set msg type to evict") {
+ tbe.MsgType := tbe.NewMsgType;
+ tbe.Requestor := tbe.NewRequestor;
+ tbe.DemandAddress := tbe.NewDemandAddress;
+ tbe.DemandRequest := tbe.NewDemandRequest;
+ tbe.OutstandingAcks := countBoolVec(tbe.ValidBlocks);
+ tbe.AllAcksReceived := true; // starts true since the region could be empty
+ tbe.DoneEvicting := false;
+ tbe.AcksReceived.clear();
+ tbe.AcksReceived.resize(blocksPerRegion);
+ tbe.SendAck := false;
+ }
+
+ action(ser_setTBE, "ser", desc="Set msg type to evict repl") {
+ tbe.MsgType := ProbeRequestType:PrbInv;
+ }
+
+ action(md_setMustDowngrade, "md", desc="When permissions finally get here, must be shared") {
+ assert(is_valid(cache_entry));
+ cache_entry.MustDowngrade := true;
+ }
+
+ action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
+ TBEs.deallocate(getRegionBase(address));
+ unset_tbe();
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop the request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pl_popUnblockQueue, "pl", desc="Pop the unblock queue") {
+ unblockNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pn_popNotifyQueue, "pn", desc="Pop the notify queue") {
+ notifyNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pp_popProbeQueue, "pp", desc="Pop the probe queue") {
+ probeNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="Pop the trigger queue") {
+ DPRINTF(RubySlicc, "Trigger Before Contents: %s\n", triggerQueue_in);
+ triggerQueue_in.dequeue(clockEdge());
+ DPRINTF(RubySlicc, "Trigger After Contents: %s\n", triggerQueue_in);
+ }
+
+ // Must always use wake all, since non-region address wait on region addresses
+ action(wa_wakeUpAllDependents, "wa", desc="Wake up any requests waiting for this region") {
+ wakeUpAllBuffers();
+ }
+
+ action(zz_stallAndWaitRequestQueue, "\z", desc="recycle request queue") {
+ Addr regAddr := getRegionBase(address);
+ DPRINTF(RubySlicc, "Stalling address %s\n", regAddr);
+ stall_and_wait(requestNetwork_in, regAddr);
+ }
+
+ action(yy_stallAndWaitProbeQueue, "\y", desc="stall probe queue") {
+ Addr regAddr := getRegionBase(address);
+ stall_and_wait(probeNetwork_in, regAddr);
+ }
+
+ action(yyy_recycleProbeQueue, "\yy", desc="recycle probe queue") {
+ probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(zzz_recycleRequestQueue, "\zz", desc="recycle request queue") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(www_recycleUnblockNetwork, "\ww", desc="recycle unblock queue") {
+ unblockNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(z_stall, "z", desc="stall request queue") {
+ // fake state
+ }
+
+ action(mru_setMRU, "mru", desc="set MRU") {
+ cacheMemory.setMRU(address, cache_entry.NumValidBlocks);
+ }
+
+ // Transitions
+
+ transition({NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, P_NP_W, P_NP_NP, NP_W}, {CPURead, CPUWriteback, CPUWrite}) {} {
+ zz_stallAndWaitRequestQueue;
+ }
+
+ transition(SS_P, {CPURead, CPUWriteback}) {
+ zz_stallAndWaitRequestQueue;
+ }
+
+ transition({NP, S, P, NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P, NP_W, P_NP_NP}, StallAccess) {} {
+ zz_stallAndWaitRequestQueue;
+ }
+
+ transition({S, P, NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P, P_NP_W, P_NP_NP, NP_W}, StallDoneAck) {
+ www_recycleUnblockNetwork;
+ }
+
+ transition(NP, StallDoneAck, NP_W) {
+ t_allocateTBE;
+ db_markDirtyBit;
+ dr_markDoneAckReceived;
+ pl_popUnblockQueue;
+ }
+
+ transition(NP_W, StaleRequest, NP) {
+ f_fwdReqToDir;
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ p_popRequestQueue;
+ }
+
+ transition(P_NP_O, DowngradeRegion) {} {
+ z_stall; // should stall and wait
+ }
+
+ transition({NP_PS, S_NP_PS, S_P, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P}, ReplRegion) {} {
+ zz_stallAndWaitRequestQueue; // can't let things get out of order!
+ }
+
+ transition({P_NP_O, S_O, SS_P}, InvRegion) {} {
+ yyy_recycleProbeQueue; // can't be z_stall because there could be a RdBlkM in the requestQueue which has the sinked flag which is blocking the inv
+ }
+
+ transition(P_NP, {InvRegion, DowngradeRegion}, P_NP_NP) {} {
+ sne_setNewTBE;
+ pp_popProbeQueue;
+ }
+
+ transition(S_P, DowngradeRegion) {} {
+ adp_AckDemandProbe;
+ ain_ackRegionInvNow;
+ pp_popProbeQueue;
+ }
+
+ transition(P_NP_W, InvRegion) {
+ adp_AckDemandProbe;
+ ain_ackRegionInvNow;
+ pp_popProbeQueue;
+ }
+
+ transition(P_NP_W, DowngradeRegion) {
+ adp_AckDemandProbe;
+ aine_ackRegionInvExlusiveNow;
+ pp_popProbeQueue;
+ }
+
+ transition({P, S}, {CPURead, CPUWriteback}) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ f_fwdReqToDir;
+ u_updateRegionEntry;
+ p_popRequestQueue;
+ }
+
+ transition(P, CPUWrite) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ f_fwdReqToDir;
+ u_updateRegionEntry;
+ p_popRequestQueue;
+ }
+
+ transition(S, CPUWrite, S_O) {TagArrayRead} {
+ mru_setMRU;
+ t_allocateTBE;
+ co_checkOutstanding;
+ zz_stallAndWaitRequestQueue;
+ }
+
+ transition(S_O, AllOutstanding, SS_P) {
+ wa_wakeUpAllDependents;
+ ro_resetAllOutstanding;
+ pt_popTriggerQueue;
+ }
+
+ transition(SS_P, CPUWrite, S_P) {
+ mru_setMRU;
+ dt_deallocateTBE;
+ ru_requestUpgrade;
+ u_updateRegionEntry;
+ p_popRequestQueue;
+ }
+
+ transition(NP, {CPURead, CPUWriteback}, NP_PS) {TagArrayRead, TagArrayWrite} {
+ a_allocateRegionEntry;
+ rs_requestShared;
+ u_updateRegionEntry;
+ p_popRequestQueue;//zz_stallAndWaitRequestQueue;
+ }
+
+ transition(NP, CPUWrite, NP_PS) {TagArrayRead, TagArrayWrite} {
+ a_allocateRegionEntry;
+ rp_requestPrivate;
+ u_updateRegionEntry;
+ p_popRequestQueue;//zz_stallAndWaitRequestQueue;
+ }
+
+ transition(NP_PS, PrivateNotify, P) {} {
+ ap_ackPrivateNotify;
+ wa_wakeUpAllDependents;
+ pn_popNotifyQueue;
+ }
+
+ transition(S_P, PrivateNotify, P) {} {
+ ap_ackPrivateNotify;
+ wa_wakeUpAllDependents;
+ pn_popNotifyQueue;
+ }
+
+ transition(NP_PS, SharedNotify, S) {} {
+ ap_ackPrivateNotify;
+ wa_wakeUpAllDependents;
+ pn_popNotifyQueue;
+ }
+
+ transition(P_NP_W, WbNotify, NP) {} {
+ aw_ackWbNotify;
+ wa_wakeUpAllDependents;
+ dt_deallocateTBE;
+ pn_popNotifyQueue;
+ }
+
+ transition({P, S}, ReplRegion, P_NP_O) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ ser_setTBE;
+ d_deallocateRegionEntry;
+ co_checkOutstanding;
+ }
+
+ transition({P, S}, InvRegion, P_NP_O) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ se_setTBE;
+ m_markSendAck;
+ d_deallocateRegionEntry;
+ co_checkOutstanding;
+ pp_popProbeQueue;
+ }
+
+ transition(P_NP_O, AllOutstanding, P_NP) {} {
+ ed_evictDemand;
+ ef_enqueueFirstEvict;
+ ro_resetAllOutstanding;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_P, InvRegion, S_NP_PS_O) {TagArrayRead} {
+ t_allocateTBE;
+ se_setTBE;
+ m_markSendAck;
+ so_setOutstandingCheckOne;
+ co_checkOutstanding;
+ pp_popProbeQueue;
+ }
+
+ transition(S_NP_PS_O, AllOutstanding, S_NP_PS) {
+ ed_evictDemand;
+ ef_enqueueFirstEvict;
+ ro_resetAllOutstanding;
+ pt_popTriggerQueue;
+ }
+
+ transition(P, DowngradeRegion, P_S_O) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ se_setTBE;
+ m_markSendAck;
+ co_checkOutstanding;
+ pp_popProbeQueue;
+ }
+
+ transition(P_S_O, AllOutstanding, P_S) {} {
+ ed_evictDemand;
+ ef_enqueueFirstEvict;
+ ro_resetAllOutstanding;
+ pt_popTriggerQueue;
+ }
+
+ transition({P, S}, DoneAck) {TagArrayWrite} {
+ do_decrementOutstanding;
+ wa_wakeUpAllDependents;
+ db_markDirtyBit;
+ uw_updatePossibleWriteback;
+ pl_popUnblockQueue;
+ }
+
+ transition({S_P, NP_PS, S_NP_PS}, DoneAck) {TagArrayWrite} {
+ www_recycleUnblockNetwork;
+ }
+
+ transition({P_NP_O, S_NP_PS_O, P_S_O, S_O}, DoneAck) {} {
+ do_decrementOutstanding;
+ co_checkOutstanding;
+ db_markDirtyBit;
+ uw_updatePossibleWriteback;
+ pl_popUnblockQueue;
+ }
+
+ transition({P_NP, P_S, S_NP_PS, P_NP_NP}, Evict) {} {
+ e_evictCurrent;
+ en_enqueueNextEvict;
+ pt_popTriggerQueue;
+ }
+
+ transition({P_NP, P_S, S_NP_PS, P_NP_NP}, InvAck) {} {
+ ra_receiveAck;
+ db_markDirtyBit;
+ pl_popUnblockQueue;
+ }
+
+ transition(P_NP, LastAck_CleanWb, P_NP_W) {} {
+ rw_requestWriteback;
+ pt_popTriggerQueue;
+ }
+
+ transition(P_NP_NP, LastAck_CleanWb, P_NP) {} {
+ soe_setOldTBE;
+ m_markSendAck;
+ ed_evictDemand;
+ ef_enqueueFirstEvict;
+ pt_popTriggerQueue;
+ }
+
+ transition(P_NP, LastAck_PrbResp, NP) {} {
+ aie_ackRegionExclusiveInv;
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_NP_PS, LastAck_PrbResp, NP_PS) {} {
+ aie_ackRegionExclusiveInv;
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(P_S, LastAck_PrbResp, S) {} {
+ ai_ackRegionInv;
+ ad_ackDircetory;
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ pt_popTriggerQueue;
+ }
+
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Jason Power
+ */
+
+machine(MachineType:RegionDir, "Region Directory for AMD_Base-like protocol")
+: CacheMemory *cacheMemory; // stores only region addresses. Must set block size same as below
+ NodeID cpuRegionBufferNum;
+ NodeID gpuRegionBufferNum;
+ int blocksPerRegion := 64; // 4k regions
+ Cycles toDirLatency := 10; // Latency to fwd requests and send invs to directory
+ bool always_migrate := "False";
+ bool sym_migrate := "False";
+ bool asym_migrate := "False";
+ bool noTCCdir := "False";
+ int TCC_select_num_bits := 1;
+
+ // To the directory
+ MessageBuffer * requestToDir, network="To", virtual_network="5", vnet_type="request";
+
+ // To the region buffers
+ MessageBuffer * notifyToRBuffer, network="To", virtual_network="7", vnet_type="request";
+ MessageBuffer * probeToRBuffer, network="To", virtual_network="8", vnet_type="request";
+
+ // From the region buffers
+ MessageBuffer * responseFromRBuffer, network="From", virtual_network="2", vnet_type="response";
+ MessageBuffer * requestFromRegBuf, network="From", virtual_network="0", vnet_type="request";
+
+ MessageBuffer * triggerQueue;
+{
+
+ // States
+ state_declaration(State, desc="Region states", default="RegionDir_State_NP") {
+ NP, AccessPermission:Invalid, desc="Not present in region directory";
+ P, AccessPermission:Invalid, desc="Region is private to owner";
+ S, AccessPermission:Invalid, desc="Region is shared between CPU and GPU";
+
+ P_NP, AccessPermission:Invalid, desc="Evicting the region";
+ NP_P, AccessPermission:Invalid, desc="Must wait for ack from R-buf";
+ NP_S, AccessPermission:Invalid, desc="Must wait for ack from R-buf";
+ P_P, AccessPermission:Invalid, desc="Waiting for ack from R-buf";
+ S_S, AccessPermission:Invalid, desc="Waiting for ack from R-buf";
+ P_S, AccessPermission:Invalid, desc="Downgrading the region";
+ S_P, AccessPermission:Invalid, desc="Upgrading the region";
+ P_AS, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
+ S_AP, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
+ P_AP, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
+
+ SP_NP_W, AccessPermission:Invalid, desc="Last sharer writing back, waiting for ack";
+ S_W, AccessPermission:Invalid, desc="Sharer writing back, waiting for ack";
+
+ P_AP_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
+ P_AS_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
+ S_AP_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
+ }
+
+ enumeration(Event, desc="Region directory events") {
+ SendInv, desc="Send inv message to any machine that has a region buffer";
+ SendUpgrade, desc="Send upgrade message to any machine that has a region buffer";
+ SendDowngrade, desc="Send downgrade message to any machine that has a region buffer";
+
+ Evict, desc="Evict this region";
+
+ UpgradeRequest, desc="Request from r-buf for an upgrade";
+ SharedRequest, desc="Request from r-buf for read";
+ PrivateRequest, desc="Request from r-buf for write";
+
+ InvAckCore, desc="Ack from region buffer to order the invalidate";
+ InvAckCoreNoShare, desc="Ack from region buffer to order the invalidate, and it does not have the region";
+ CPUPrivateAck, desc="Ack from region buffer to order private notification";
+
+ LastAck, desc="Done eviciting all the blocks";
+
+ StaleCleanWbRequest, desc="stale clean writeback reqeust";
+ StaleCleanWbRequestNoShare, desc="stale clean wb req from a cache which should be removed from sharers";
+ CleanWbRequest, desc="clean writeback reqeust, multiple sharers";
+ CleanWbRequest_LastSharer, desc="clean writeback reqeust, last sharer";
+ WritebackAck, desc="Writeback Ack from region buffer";
+ DirReadyAck, desc="Directory is ready, waiting Ack from region buffer";
+
+ TriggerInv, desc="trigger invalidate message";
+ TriggerDowngrade, desc="trigger downgrade message";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ DataArrayRead, desc="Read the data array";
+ DataArrayWrite, desc="Write the data array";
+ TagArrayRead, desc="Read the data array";
+ TagArrayWrite, desc="Write the data array";
+ }
+
+ structure(BoolVec, external="yes") {
+ bool at(int);
+ void resize(int);
+ void clear();
+ }
+
+ structure(Entry, desc="Region entry", interface="AbstractCacheEntry") {
+ Addr addr, desc="Base address of this region";
+ NetDest Sharers, desc="Set of machines that are sharing, but not owners";
+ State RegionState, desc="Region state";
+ DataBlock DataBlk, desc="Data for the block (always empty in region dir)";
+ MachineID Owner, desc="Machine which owns all blocks in this region";
+ Cycles ProbeStart, desc="Time when the first probe request was issued";
+ bool LastWriten, default="false", desc="The last time someone accessed this region, it wrote it";
+ bool LastWritenByCpu, default="false", desc="The last time the CPU accessed this region, it wrote it";
+ bool LastWritenByGpu, default="false", desc="The last time the GPU accessed this region, it wrote it";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ MachineID Owner, desc="Machine which owns all blocks in this region";
+ NetDest Sharers, desc="Set of machines to send evicts";
+ int NumValidBlocks, desc="Number of blocks valid so we don't have to count a BoolVec";
+ bool AllAcksReceived, desc="Got all necessary acks from dir";
+ CoherenceRequestType MsgType, desc="Msg type for the evicts could be inv or dwngrd";
+ Cycles ProbeRequestTime, default="Cycles(0)", desc="Start of probe request";
+ Cycles InitialRequestTime, default="Cycles(0)", desc="To forward back on out msg";
+ Addr DemandAddress, desc="Demand address from original request";
+ uint64_t probe_id, desc="probe id for lifetime profiling";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ // Stores only region addresses
+ TBETable TBEs, template="<RegionDir_TBE>", constructor="m_number_of_TBEs";
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ int blockBits, default="RubySystem::getBlockSizeBits()";
+ int blockBytes, default="RubySystem::getBlockSizeBytes()";
+ int regionBits, default="log2(m_blocksPerRegion)";
+
+ // Functions
+
+ MachineID getCoreMachine(MachineID rBuf, Addr address) {
+ if (machineIDToNodeID(rBuf) == cpuRegionBufferNum) {
+ return createMachineID(MachineType:CorePair, intToID(0));
+ } else if (machineIDToNodeID(rBuf) == gpuRegionBufferNum) {
+ if (noTCCdir) {
+ return mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits);
+ } else {
+ return createMachineID(MachineType:TCCdir, intToID(0));
+ }
+ } else {
+ error("Unexpected region buffer number");
+ }
+ }
+
+ bool isCpuMachine(MachineID rBuf) {
+ if (machineIDToNodeID(rBuf) == cpuRegionBufferNum) {
+ return true;
+ } else if (machineIDToNodeID(rBuf) == gpuRegionBufferNum) {
+ return false;
+ } else {
+ error("Unexpected region buffer number");
+ }
+ }
+
+ bool symMigrate(Entry cache_entry) {
+ return cache_entry.LastWriten;
+ }
+
+ bool asymMigrate(Entry cache_entry, MachineID requestor) {
+ if (isCpuMachine(requestor)) {
+ return cache_entry.LastWritenByCpu;
+ } else {
+ return cache_entry.LastWritenByGpu;
+ }
+ }
+
+ int getRegionOffset(Addr addr) {
+ if (blocksPerRegion > 1) {
+ Addr offset := bitSelect(addr, blockBits, regionBits+blockBits-1);
+ int ret := addressToInt(offset);
+ assert(ret < blocksPerRegion);
+ return ret;
+ } else {
+ return 0;
+ }
+ }
+
+ Addr getRegionBase(Addr addr) {
+ return maskLowOrderBits(addr, blockBits+regionBits);
+ }
+
+ Addr getNextBlock(Addr addr) {
+ Addr a := addr;
+ makeNextStrideAddress(a, 1);
+ return a;
+ }
+
+ bool presentOrAvail(Addr addr) {
+ DPRINTF(RubySlicc, "Present? %s, avail? %s\n", cacheMemory.isTagPresent(getRegionBase(addr)), cacheMemory.cacheAvail(getRegionBase(addr)));
+ return cacheMemory.isTagPresent(getRegionBase(addr)) || cacheMemory.cacheAvail(getRegionBase(addr));
+ }
+
+ // Returns a region entry!
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", cacheMemory.lookup(getRegionBase(addr)));
+ }
+
+ TBE getTBE(Addr addr), return_by_pointer="yes" {
+ return TBEs.lookup(getRegionBase(addr));
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ return getCacheEntry(getRegionBase(addr)).DataBlk;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.RegionState;
+ }
+ return State:NP;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ if (is_valid(cache_entry)) {
+ cache_entry.RegionState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := getTBE(addr);
+ if(is_valid(tbe)) {
+ return RegionDir_State_to_permission(tbe.TBEState);
+ }
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return RegionDir_State_to_permission(cache_entry.RegionState);
+ }
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(RegionDir_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ functionalMemoryRead(pkt);
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ if (functionalMemoryWrite(pkt)) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ cacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ cacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ cacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ cacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:DataArrayRead) {
+ return cacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:DataArrayWrite) {
+ return cacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:TagArrayRead) {
+ return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:TagArrayWrite) {
+ return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ out_port(requestNetwork_out, CPURequestMsg, requestToDir);
+ out_port(notifyNetwork_out, CPURequestMsg, notifyToRBuffer);
+ out_port(probeNetwork_out, NBProbeRequestMsg, probeToRBuffer);
+
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=2) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ assert(in_msg.addr == getRegionBase(in_msg.addr));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := getTBE(in_msg.addr);
+ DPRINTF(RubySlicc, "trigger msg: %s (%s)\n", in_msg, getRegionBase(in_msg.addr));
+ if (in_msg.Type == TriggerType:AcksComplete) {
+ assert(is_valid(tbe));
+ trigger(Event:LastAck, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == TriggerType:InvRegion) {
+ assert(is_valid(tbe));
+ trigger(Event:TriggerInv, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == TriggerType:DowngradeRegion) {
+ assert(is_valid(tbe));
+ trigger(Event:TriggerDowngrade, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unknown trigger message");
+ }
+ }
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseFromRBuffer, rank=1) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ TBE tbe := getTBE(in_msg.addr);
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
+ assert(in_msg.addr == getRegionBase(in_msg.addr));
+ assert(is_valid(tbe));
+ if (in_msg.NotCached) {
+ trigger(Event:InvAckCoreNoShare, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:InvAckCore, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:PrivateAck) {
+ assert(in_msg.addr == getRegionBase(in_msg.addr));
+ assert(is_valid(cache_entry));
+ //Fix Me...add back in: assert(cache_entry.Sharers.isElement(in_msg.Sender));
+ trigger(Event:CPUPrivateAck, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:RegionWbAck) {
+ //Fix Me...add back in: assert(cache_entry.Sharers.isElement(in_msg.Sender) == false);
+ assert(in_msg.addr == getRegionBase(in_msg.addr));
+ trigger(Event:WritebackAck, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DirReadyAck) {
+ assert(is_valid(tbe));
+ trigger(Event:DirReadyAck, getRegionBase(in_msg.addr), cache_entry, tbe);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ // In from cores
+ // NOTE: We get the cache / TBE entry based on the region address,
+ // but pass the block address to the actions
+ in_port(requestNetwork_in, CPURequestMsg, requestFromRegBuf, rank=0) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, CPURequestMsg) {
+ //assert(in_msg.addr == getRegionBase(in_msg.addr));
+ Addr address := getRegionBase(in_msg.addr);
+ DPRINTF(RubySlicc, "Got %s, base %s\n", in_msg.addr, address);
+ if (presentOrAvail(address)) {
+ TBE tbe := getTBE(address);
+ Entry cache_entry := getCacheEntry(address);
+ if (in_msg.Type == CoherenceRequestType:PrivateRequest) {
+ if (is_valid(cache_entry) && (cache_entry.Owner != in_msg.Requestor ||
+ getState(tbe, cache_entry, address) == State:S)) {
+ trigger(Event:SendInv, address, cache_entry, tbe);
+ } else {
+ trigger(Event:PrivateRequest, address, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:SharedRequest) {
+ if (is_invalid(cache_entry)) {
+ // If no one has ever requested this region give private permissions
+ trigger(Event:PrivateRequest, address, cache_entry, tbe);
+ } else {
+ if (always_migrate ||
+ (sym_migrate && symMigrate(cache_entry)) ||
+ (asym_migrate && asymMigrate(cache_entry, in_msg.Requestor))) {
+ if (cache_entry.Sharers.count() == 1 &&
+ cache_entry.Sharers.isElement(in_msg.Requestor)) {
+ trigger(Event:UpgradeRequest, address, cache_entry, tbe);
+ } else {
+ trigger(Event:SendInv, address, cache_entry, tbe);
+ }
+ } else { // don't migrate
+ if(cache_entry.Sharers.isElement(in_msg.Requestor) ||
+ getState(tbe, cache_entry, address) == State:S) {
+ trigger(Event:SharedRequest, address, cache_entry, tbe);
+ } else {
+ trigger(Event:SendDowngrade, address, cache_entry, tbe);
+ }
+ }
+ }
+ } else if (in_msg.Type == CoherenceRequestType:UpgradeRequest) {
+ if (is_invalid(cache_entry)) {
+ trigger(Event:PrivateRequest, address, cache_entry, tbe);
+ } else if (cache_entry.Sharers.count() == 1 && cache_entry.Sharers.isElement(in_msg.Requestor)) {
+ trigger(Event:UpgradeRequest, address, cache_entry, tbe);
+ } else {
+ trigger(Event:SendUpgrade, address, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:CleanWbRequest) {
+ if (is_invalid(cache_entry) || cache_entry.Sharers.isElement(in_msg.Requestor) == false) {
+ trigger(Event:StaleCleanWbRequest, address, cache_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "wb address %s(%s) owner %s sharers %s requestor %s %d %d\n", in_msg.addr, getRegionBase(in_msg.addr), cache_entry.Owner, cache_entry.Sharers, in_msg.Requestor, cache_entry.Sharers.isElement(in_msg.Requestor), cache_entry.Sharers.count());
+ if (cache_entry.Sharers.isElement(in_msg.Requestor) && cache_entry.Sharers.count() == 1) {
+ DPRINTF(RubySlicc, "last wb\n");
+ trigger(Event:CleanWbRequest_LastSharer, address, cache_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "clean wb\n");
+ trigger(Event:CleanWbRequest, address, cache_entry, tbe);
+ }
+ }
+ } else {
+ error("unknown region dir request type");
+ }
+ } else {
+ Addr victim := cacheMemory.cacheProbe(getRegionBase(in_msg.addr));
+ TBE victim_tbe := getTBE(victim);
+ Entry victim_entry := getCacheEntry(victim);
+ DPRINTF(RubySlicc, "Evicting address %s for new region at address %s(%s)\n", victim, in_msg.addr, getRegionBase(in_msg.addr));
+ assert(is_valid(victim_entry));
+ trigger(Event:Evict, victim, victim_entry, victim_tbe);
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(f_fwdReqToDir, "f", desc="Forward CPU request to directory") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
+ out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
+ out_msg.Type := in_msg.OriginalType;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Private := in_msg.Private;
+ out_msg.NoAckNeeded := true;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ProbeRequestStartTime := curCycle();
+ out_msg.DemandRequest := true;
+ if (is_valid(cache_entry) && getState(tbe, cache_entry, address) != State:S) {
+ out_msg.Acks := cache_entry.Sharers.count();
+ } else {
+ out_msg.Acks := 0;
+ }
+ }
+ }
+ }
+
+ action(f_fwdReqToDirShared, "fs", desc="Forward CPU request to directory (shared)") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
+ out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
+ out_msg.Type := in_msg.OriginalType;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Private := in_msg.Private;
+ out_msg.NoAckNeeded := true;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ProbeRequestStartTime := curCycle();
+ out_msg.DemandRequest := true;
+ out_msg.ForceShared := true;
+ if (is_valid(cache_entry) && getState(tbe, cache_entry, address) != State:S) {
+ out_msg.Acks := cache_entry.Sharers.count();
+ } else {
+ out_msg.Acks := 0;
+ }
+ }
+ }
+ }
+
+ action(f_fwdReqToDirWithAck, "fa", desc="Forward CPU request to directory with ack request") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
+ out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
+ out_msg.Type := in_msg.OriginalType;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Private := in_msg.Private;
+ out_msg.NoAckNeeded := false;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ProbeRequestStartTime := curCycle();
+ out_msg.DemandRequest := true;
+ if (is_valid(cache_entry)) {
+ out_msg.Acks := cache_entry.Sharers.count();
+ // Don't need an ack from the requestor!
+ if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ } else {
+ out_msg.Acks := 0;
+ }
+ }
+ }
+ }
+
+ action(f_fwdReqToDirWithAckShared, "fas", desc="Forward CPU request to directory with ack request") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
+ out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
+ out_msg.Type := in_msg.OriginalType;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
+ out_msg.Shared := in_msg.Shared;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.Private := in_msg.Private;
+ out_msg.NoAckNeeded := false;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ProbeRequestStartTime := curCycle();
+ out_msg.DemandRequest := true;
+ out_msg.ForceShared := true;
+ if (is_valid(cache_entry)) {
+ out_msg.Acks := cache_entry.Sharers.count();
+ // Don't need an ack from the requestor!
+ if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ } else {
+ out_msg.Acks := 0;
+ }
+ }
+ }
+ }
+
+ action(a_allocateRegionEntry, "a", desc="Allocate a new entry") {
+ set_cache_entry(cacheMemory.allocate(getRegionBase(address), new Entry));
+ peek(requestNetwork_in, CPURequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ }
+
+ action(d_deallocateRegionEntry, "d", desc="Deallocate region entry") {
+ cacheMemory.deallocate(getRegionBase(address));
+ unset_cache_entry();
+ }
+
+ action(ra_receiveAck, "ra", desc="Mark TBE entry as received this ack") {
+ //assert(tbe.ValidBlocks.at(getRegionOffset(address)));
+ DPRINTF(RubySlicc, "received ack for %s reg: %s\n", address, getRegionBase(address));
+ tbe.NumValidBlocks := tbe.NumValidBlocks - 1;
+ assert(tbe.NumValidBlocks >= 0);
+ if (tbe.NumValidBlocks == 0) {
+ tbe.AllAcksReceived := true;
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:AcksComplete;
+ out_msg.addr := address;
+ }
+ }
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ APPEND_TRANSITION_COMMENT(" Acks left receive ");
+ APPEND_TRANSITION_COMMENT(tbe.NumValidBlocks);
+ }
+
+ action(ca_checkAcks, "ca", desc="Check to see if we need more acks") {
+ if (tbe.NumValidBlocks == 0) {
+ tbe.AllAcksReceived := true;
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:AcksComplete;
+ out_msg.addr := address;
+ }
+ }
+ }
+
+ action(ti_triggerInv, "ti", desc="") {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:InvRegion;
+ out_msg.addr := address;
+ }
+ }
+
+ action(td_triggerDowngrade, "td", desc="") {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.Type := TriggerType:DowngradeRegion;
+ out_msg.addr := address;
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ TBEs.allocate(getRegionBase(address));
+ set_tbe(getTBE(address));
+ if (is_valid(cache_entry)) {
+ tbe.Owner := cache_entry.Owner;
+ tbe.Sharers := cache_entry.Sharers;
+ tbe.AllAcksReceived := true; // assume no acks are required
+ }
+ tbe.ProbeRequestTime := curCycle();
+ peek(requestNetwork_in, CPURequestMsg) {
+ tbe.InitialRequestTime := in_msg.InitialRequestTime;
+ tbe.DemandAddress := in_msg.addr;
+ }
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ APPEND_TRANSITION_COMMENT(" Acks left ");
+ APPEND_TRANSITION_COMMENT(tbe.NumValidBlocks);
+ APPEND_TRANSITION_COMMENT(" Owner, ");
+ APPEND_TRANSITION_COMMENT(tbe.Owner);
+ APPEND_TRANSITION_COMMENT(" sharers, ");
+ APPEND_TRANSITION_COMMENT(tbe.Sharers);
+ }
+
+ action(ss_setSharers, "ss", desc="Add requestor to sharers") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ cache_entry.Sharers.add(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
+ }
+ }
+
+ action(rs_removeSharer, "rs", desc="Remove requestor to sharers") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ cache_entry.Sharers.remove(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" removing ");
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ APPEND_TRANSITION_COMMENT(" sharers ");
+ APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
+ }
+ }
+
+ action(rsr_removeSharerResponse, "rsr", desc="Remove requestor to sharers") {
+ peek(responseNetwork_in, ResponseMsg) {
+ cache_entry.Sharers.remove(in_msg.Sender);
+ APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
+ }
+ }
+
+ action(cs_clearSharers, "cs", desc="Add requestor to sharers") {
+ cache_entry.Sharers.clear();
+ }
+
+ action(so_setOwner, "so", desc="Set the owner to the requestor") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ cache_entry.Owner := in_msg.Requestor;
+ APPEND_TRANSITION_COMMENT(" Owner now: ");
+ APPEND_TRANSITION_COMMENT(cache_entry.Owner);
+ }
+ }
+
+ action(rr_removeRequestorFromTBE, "rr", desc="Remove requestor from TBE sharers") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ tbe.Sharers.remove(in_msg.Requestor);
+ }
+ }
+
+ action(ur_updateDirtyStatusOnRequest, "ur", desc="Update dirty status on demand request") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (is_valid(cache_entry)) {
+ if ((in_msg.Type == CoherenceRequestType:SharedRequest) &&
+ (cache_entry.Sharers.isElement(in_msg.Requestor) == false)) {
+ cache_entry.LastWriten := false;
+ if (isCpuMachine(in_msg.Requestor)) {
+ cache_entry.LastWritenByCpu := false;
+ } else {
+ cache_entry.LastWritenByGpu := false;
+ }
+ } else if ((in_msg.Type == CoherenceRequestType:PrivateRequest) ||
+ (in_msg.Type == CoherenceRequestType:UpgradeRequest)) {
+ cache_entry.LastWriten := true;
+ if (isCpuMachine(in_msg.Requestor)) {
+ cache_entry.LastWritenByCpu := true;
+ } else {
+ cache_entry.LastWritenByGpu := true;
+ }
+ }
+ }
+ }
+ }
+
+ action(ud_updateDirtyStatusWithWb, "ud", desc="Update dirty status on writeback") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (is_valid(cache_entry) && in_msg.Dirty) {
+ cache_entry.LastWriten := true;
+ if (isCpuMachine(in_msg.Requestor)) {
+ cache_entry.LastWritenByCpu := true;
+ } else {
+ cache_entry.LastWritenByGpu := true;
+ }
+ }
+ }
+ }
+
+ action(sns_setNumAcksSharers, "sns", desc="Set number of acks to one per shared region buffer") {
+ assert(is_valid(tbe));
+ assert(is_valid(cache_entry));
+ tbe.NumValidBlocks := tbe.Sharers.count();
+ }
+
+ action(sno_setNumAcksOne, "sno", desc="Set number of acks to one per shared region buffer") {
+ assert(is_valid(tbe));
+ assert(is_valid(cache_entry));
+ tbe.NumValidBlocks := 1;
+ }
+
+ action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
+ TBEs.deallocate(getRegionBase(address));
+ APPEND_TRANSITION_COMMENT(" reg: ");
+ APPEND_TRANSITION_COMMENT(getRegionBase(address));
+ unset_tbe();
+ }
+
+ action(wb_sendWbNotice, "wb", desc="Send notice to cache that writeback is acknowledged") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(notifyNetwork_out, CPURequestMsg, 1) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceRequestType:WbNotify;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ }
+ }
+ }
+
+ action(wbn_sendWbNoticeNoAck, "wbn", desc="Send notice to cache that writeback is acknowledged (no ack needed)") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(notifyNetwork_out, CPURequestMsg, 1) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceRequestType:WbNotify;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.NoAckNeeded := true;
+ }
+ }
+ }
+
+ action(b_sendPrivateNotice, "b", desc="Send notice to private cache that it has private access") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(notifyNetwork_out, CPURequestMsg, 1) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceRequestType:PrivateNotify;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ }
+ }
+ }
+
+ action(bs_sendSharedNotice, "bs", desc="Send notice to private cache that it has private access") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(notifyNetwork_out, CPURequestMsg, 1) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceRequestType:SharedNotify;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ }
+ }
+ }
+
+ action(c_sendSharedNoticeToOrigReq, "c", desc="Send notice to private cache that it has shared access") {
+ assert(is_valid(tbe));
+ enqueue(notifyNetwork_out, CPURequestMsg, 1) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceRequestType:SharedNotify;
+ out_msg.Destination.add(tbe.Owner);
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestTime;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ APPEND_TRANSITION_COMMENT("dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+
+ action(sp_sendPrivateNoticeToOrigReq, "sp", desc="Send notice to private cache that it has private access") {
+ assert(is_valid(tbe));
+ enqueue(notifyNetwork_out, CPURequestMsg, 1) {
+ out_msg.addr := getRegionBase(address);
+ out_msg.Type := CoherenceRequestType:PrivateNotify;
+ out_msg.Destination.add(tbe.Owner);
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestTime;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ APPEND_TRANSITION_COMMENT("dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+
+ action(i_RegionInvNotify, "i", desc="Send notice to private cache that it no longer has private access") {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.DemandAddress := tbe.DemandAddress;
+ //out_msg.Requestor := tbe.Requestor;
+ out_msg.Requestor := machineID;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ //Fix me: assert(tbe.Sharers.count() > 0);
+ out_msg.DemandRequest := true;
+ out_msg.Destination := tbe.Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ APPEND_TRANSITION_COMMENT("dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+
+ action(i0_RegionInvNotifyDemand0, "i0", desc="Send notice to private cache that it no longer has private access") {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ // Demand address should default to 0 -> out_msg.DemandAddress := 0;
+ out_msg.Requestor := machineID;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.Destination := tbe.Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ APPEND_TRANSITION_COMMENT("dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+
+ action(rd_RegionDowngrade, "rd", desc="Send notice to private cache that it only has shared access") {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.DemandAddress := tbe.DemandAddress;
+ out_msg.Requestor := machineID;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.DemandRequest := true;
+ out_msg.Destination := tbe.Sharers;
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ APPEND_TRANSITION_COMMENT("dest: ");
+ APPEND_TRANSITION_COMMENT(out_msg.Destination);
+ }
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop the request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="Pop the trigger queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="Pop the response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(s_stallAndWaitRequest, "s", desc="Stall and wait on the region address") {
+ Addr regAddr := getRegionBase(address);
+ stall_and_wait(requestNetwork_in, regAddr);
+ }
+
+ action(w_wakeUpRegionDependents, "w", desc="Wake up any requests waiting for this region") {
+ wakeUpBuffers(getRegionBase(address));
+ }
+
+ action(wa_wakeUpAllDependents, "wa", desc="Wake up any requests waiting for this region") {
+ wakeUpAllBuffers();
+ }
+
+ action(zz_recycleRequestQueue, "\z", desc="...") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(z_stall, "z", desc="stall request queue") {
+ // fake state
+ }
+
+ action(mru_setMRU, "mru", desc="set MRU") {
+ cacheMemory.setMRU(address);
+ }
+
+ // Transistions
+
+ transition({NP_P, P_P, NP_S, S_S, S_P, P_S, P_NP, S_AP, P_AS, P_AP, SP_NP_W, S_W, P_AP_W, P_AS_W, S_AP_W}, {PrivateRequest, SharedRequest, UpgradeRequest, SendInv, SendUpgrade, SendDowngrade, CleanWbRequest, CleanWbRequest_LastSharer, StaleCleanWbRequest}) {
+ s_stallAndWaitRequest
+ }
+
+ transition({NP_P, P_P, NP_S, S_S, S_P, S_W, P_S, P_NP, S_AP, P_AS, P_AP, P_AP_W, P_AS_W, S_AP_W}, Evict) {
+ zz_recycleRequestQueue;
+ }
+
+ transition(NP, {PrivateRequest, SendUpgrade}, NP_P) {TagArrayRead, TagArrayWrite} {
+ a_allocateRegionEntry;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDir;
+ b_sendPrivateNotice;
+ so_setOwner;
+ ss_setSharers;
+ t_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(P, {PrivateRequest, UpgradeRequest}, P_P) {TagArrayRead} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDir;
+ b_sendPrivateNotice;
+ t_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition({NP_P, P_P}, CPUPrivateAck, P) {
+ dt_deallocateTBE;
+ w_wakeUpRegionDependents;
+ pr_popResponseQueue;
+ }
+
+ transition({NP, P, S}, StaleCleanWbRequest) {TagArrayRead, TagArrayWrite} {
+ wbn_sendWbNoticeNoAck;
+ ud_updateDirtyStatusWithWb;
+ p_popRequestQueue;
+ }
+
+ transition(NP, SharedRequest, NP_S) {TagArrayRead, TagArrayWrite} {
+ a_allocateRegionEntry;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDirShared;
+ bs_sendSharedNotice;
+ so_setOwner;
+ ss_setSharers;
+ t_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ // Could probably do this in parallel with other shared requests
+ transition(S, SharedRequest, S_S) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDirShared;
+ bs_sendSharedNotice;
+ ss_setSharers;
+ t_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition({P, S}, CleanWbRequest_LastSharer, SP_NP_W) {TagArrayRead, TagArrayWrite} {
+ ud_updateDirtyStatusWithWb;
+ wb_sendWbNotice;
+ rs_removeSharer;
+ t_allocateTBE;
+ d_deallocateRegionEntry;
+ p_popRequestQueue;
+ }
+
+ transition(S, CleanWbRequest, S_W) {TagArrayRead, TagArrayWrite} {
+ ud_updateDirtyStatusWithWb;
+ wb_sendWbNotice;
+ rs_removeSharer;
+ t_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(SP_NP_W, WritebackAck, NP) {
+ dt_deallocateTBE;
+ w_wakeUpRegionDependents;
+ pr_popResponseQueue;
+ }
+
+ transition(S_W, WritebackAck, S) {
+ dt_deallocateTBE;
+ w_wakeUpRegionDependents;
+ pr_popResponseQueue;
+ }
+
+ transition({NP_S, S_S}, CPUPrivateAck, S) {
+ dt_deallocateTBE;
+ w_wakeUpRegionDependents;
+ pr_popResponseQueue;
+ }
+
+ transition(S, UpgradeRequest, S_P) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDir;
+ b_sendPrivateNotice;
+ so_setOwner;
+ t_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(S_P, CPUPrivateAck, P) {
+ dt_deallocateTBE;
+ w_wakeUpRegionDependents;
+ pr_popResponseQueue;
+ }
+
+ transition(P, SendInv, P_AP_W) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDirWithAck;
+ so_setOwner;
+ t_allocateTBE;
+ rr_removeRequestorFromTBE;
+ sns_setNumAcksSharers;
+ cs_clearSharers;
+ ss_setSharers;
+ //i_RegionInvNotify;
+ p_popRequestQueue;
+ }
+
+ transition({P_AP_W, S_AP_W}, DirReadyAck) {
+ ti_triggerInv;
+ pr_popResponseQueue;
+ }
+
+ transition(P_AS_W, DirReadyAck) {
+ td_triggerDowngrade;
+ pr_popResponseQueue;
+ }
+
+ transition(P_AS_W, TriggerDowngrade, P_AS) {
+ rd_RegionDowngrade;
+ pt_popTriggerQueue;
+ }
+
+ transition(P_AP_W, TriggerInv, P_AP) {
+ i_RegionInvNotify;
+ pt_popTriggerQueue;
+ }
+
+ transition(S_AP_W, TriggerInv, S_AP) {
+ i_RegionInvNotify;
+ pt_popTriggerQueue;
+ }
+
+ transition(P, SendUpgrade, P_AP_W) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDirWithAck;
+ so_setOwner;
+ t_allocateTBE;
+ rr_removeRequestorFromTBE;
+ sns_setNumAcksSharers;
+ cs_clearSharers;
+ ss_setSharers;
+ p_popRequestQueue;
+ }
+
+ transition(P, Evict, P_NP) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ sns_setNumAcksSharers;
+ i0_RegionInvNotifyDemand0;
+ d_deallocateRegionEntry;
+ }
+
+ transition(S, SendInv, P_AP_W) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDirWithAck;
+ so_setOwner;
+ t_allocateTBE;
+ rr_removeRequestorFromTBE;
+ sns_setNumAcksSharers;
+ cs_clearSharers;
+ ss_setSharers;
+ p_popRequestQueue;
+ }
+
+ transition(S, Evict, P_NP) {TagArrayRead, TagArrayWrite} {
+ t_allocateTBE;
+ sns_setNumAcksSharers;
+ i0_RegionInvNotifyDemand0;
+ d_deallocateRegionEntry;
+ }
+
+ transition(P_NP, LastAck, NP) {
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(S, SendUpgrade, S_AP_W) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDirWithAck;
+ so_setOwner;
+ t_allocateTBE;
+ rr_removeRequestorFromTBE;
+ sns_setNumAcksSharers;
+ cs_clearSharers;
+ ss_setSharers;
+ p_popRequestQueue;
+ }
+
+ transition(S_AP, LastAck, S_P) {
+ sp_sendPrivateNoticeToOrigReq;
+ pt_popTriggerQueue;
+ }
+
+ transition(P_AP, LastAck, P_P) {
+ sp_sendPrivateNoticeToOrigReq;
+ pt_popTriggerQueue;
+ }
+
+ transition(P, SendDowngrade, P_AS_W) {TagArrayRead, TagArrayWrite} {
+ mru_setMRU;
+ ur_updateDirtyStatusOnRequest;
+ f_fwdReqToDirWithAckShared;
+ so_setOwner;
+ t_allocateTBE;
+ sns_setNumAcksSharers;
+ ss_setSharers; //why do we set the sharers before sending the downgrade? Are we sending a downgrade to the requestor?
+ p_popRequestQueue;
+ }
+
+ transition(P_AS, LastAck, P_S) {
+ c_sendSharedNoticeToOrigReq;
+ pt_popTriggerQueue;
+ }
+
+ transition(P_S, CPUPrivateAck, S) {
+ dt_deallocateTBE;
+ w_wakeUpRegionDependents;
+ pr_popResponseQueue;
+ }
+
+ transition({P_NP, P_AS, S_AP, P_AP}, InvAckCore) {} {
+ ra_receiveAck;
+ pr_popResponseQueue;
+ }
+
+ transition({P_NP, S_AP, P_AP}, InvAckCoreNoShare) {} {
+ ra_receiveAck;
+ pr_popResponseQueue;
+ }
+
+ transition(P_AS, InvAckCoreNoShare) {} {
+ ra_receiveAck;
+ rsr_removeSharerResponse;
+ pr_popResponseQueue;
+ }
+
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+machine(MachineType:Directory, "AMD Baseline protocol")
+: DirectoryMemory * directory;
+ CacheMemory * L3CacheMemory;
+ Cycles response_latency := 5;
+ Cycles l3_hit_latency := 50;
+ bool noTCCdir := "False";
+ bool CPUonly := "False";
+ int TCC_select_num_bits;
+ bool useL3OnWT := "False";
+ Cycles to_memory_controller_latency := 1;
+
+ // From the Cores
+ MessageBuffer * requestFromCores, network="From", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseFromCores, network="From", virtual_network="2", vnet_type="response";
+ MessageBuffer * unblockFromCores, network="From", virtual_network="4", vnet_type="unblock";
+
+ MessageBuffer * probeToCore, network="To", virtual_network="0", vnet_type="request";
+ MessageBuffer * responseToCore, network="To", virtual_network="2", vnet_type="response";
+
+ MessageBuffer * triggerQueue;
+ MessageBuffer * L3triggerQueue;
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_U") {
+ U, AccessPermission:Backing_Store, desc="unblocked";
+ BL, AccessPermission:Busy, desc="got L3 WB request";
+ // BL is Busy because it's possible for the data only to be in the network
+ // in the WB, L3 has sent it and gone on with its business in possibly I
+ // state.
+ BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
+ BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ // CPU requests
+ RdBlkS, desc="...";
+ RdBlkM, desc="...";
+ RdBlk, desc="...";
+ CtoD, desc="...";
+ WriteThrough, desc="WriteThrough Message";
+ Atomic, desc="Atomic Message";
+
+ // writebacks
+ VicDirty, desc="...";
+ VicClean, desc="...";
+ CPUData, desc="WB data from CPU";
+ StaleWB, desc="Notification that WB has been superceded by a probe";
+
+ // probe responses
+ CPUPrbResp, desc="Probe Response Msg";
+
+ ProbeAcksComplete, desc="Probe Acks Complete";
+
+ L3Hit, desc="Hit in L3 return data to core";
+
+ // Memory Controller
+ MemData, desc="Fetched data from memory arrives";
+ WBAck, desc="Writeback Ack from memory arrives";
+
+ CoreUnblock, desc="Core received data, unblock";
+ UnblockWriteThrough, desc="Unblock because of writethrough request finishing";
+
+ StaleVicDirty, desc="Core invalidated before VicDirty processed";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ L3DataArrayRead, desc="Read the data array";
+ L3DataArrayWrite, desc="Write the data array";
+ L3TagArrayRead, desc="Read the data array";
+ L3TagArrayWrite, desc="Write the data array";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore";
+ }
+
+ structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
+ DataBlock DataBlk, desc="data for the block";
+ MachineID LastSender, desc="Mach which this block came from";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, desc="Is the data dirty?";
+ int NumPendingAcks, desc="num acks expected";
+ MachineID OriginalRequestor, desc="Original Requestor";
+ MachineID WTRequestor, desc="WT Requestor";
+ bool Cached, desc="data hit in Cache";
+ bool MemData, desc="Got MemData?",default="false";
+ bool wtData, desc="Got write through data?",default="false";
+ bool atomicData, desc="Got Atomic op?",default="false";
+ Cycles InitialRequestTime, desc="...";
+ Cycles ForwardRequestTime, desc="...";
+ Cycles ProbeRequestStartTime, desc="...";
+ MachineID LastSender, desc="Mach which this block came from";
+ bool L3Hit, default="false", desc="Was this an L3 hit?";
+ uint64_t probe_id, desc="probe id for lifetime profiling";
+ WriteMask writeMask, desc="outstanding write through mask";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+
+ if (is_valid(dir_entry)) {
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if (is_valid(tbe) && tbe.MemData) {
+ DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
+ return tbe.DataBlk;
+ }
+ DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
+ return getDirectoryEntry(addr).DataBlk;
+ }
+
+ State getState(TBE tbe, CacheEntry entry, Addr addr) {
+ return getDirectoryEntry(addr).DirectoryState;
+ }
+
+ void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
+ getDirectoryEntry(addr).DirectoryState := state;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes
+ + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ // For this Directory, all permissions are just tracked in Directory, since
+ // it's not possible to have something in TBE but not Dir, just keep track
+ // of state all in one place.
+ if (directory.isPresent(addr)) {
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(CacheEntry entry, Addr addr, State state) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L3DataArrayRead) {
+ L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L3DataArrayWrite) {
+ L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L3TagArrayRead) {
+ L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L3TagArrayWrite) {
+ L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L3DataArrayRead) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L3DataArrayWrite) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L3TagArrayRead) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L3TagArrayWrite) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
+ out_port(responseNetwork_out, ResponseMsg, responseToCore);
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == TriggerType:AcksComplete) {
+ trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
+ }else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
+ trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
+ } else {
+ error("Unknown trigger msg");
+ }
+ }
+ }
+ }
+
+ in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=4) {
+ if (L3TriggerQueue_in.isReady(clockEdge())) {
+ peek(L3TriggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == TriggerType:L3Hit) {
+ trigger(Event:L3Hit, in_msg.addr, entry, tbe);
+ } else {
+ error("Unknown trigger msg");
+ }
+ }
+ }
+ }
+
+ // Unblock Network
+ in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=3) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
+ peek(unblockNetwork_in, UnblockMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
+ }
+ }
+ }
+
+ // Core response network
+ in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=2) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
+ trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:CPUData) {
+ trigger(Event:CPUData, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
+ trigger(Event:StaleWB, in_msg.addr, entry, tbe);
+ } else {
+ error("Unexpected response type");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=1) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:MemData, in_msg.addr, entry, tbe);
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
+ trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:Atomic) {
+ trigger(Event:Atomic, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
+ trigger(Event:VicDirty, in_msg.addr, entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
+ trigger(Event:VicClean, in_msg.addr, entry, tbe);
+ }
+ } else {
+ error("Bad request message type");
+ }
+ }
+ }
+ }
+
+ // Actions
+ action(s_sendResponseS, "s", desc="send Shared response") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Cached) {
+ out_msg.State := CoherenceState:Shared;
+ } else {
+ out_msg.State := CoherenceState:Exclusive;
+ }
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(m_sendResponseM, "m", desc="send Modified response") {
+ if (tbe.wtData) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:UnblockWriteThrough;
+ }
+ }else{
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := false;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ if(tbe.atomicData){
+ out_msg.WTRequestor := tbe.WTRequestor;
+ }
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ if (tbe.atomicData) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:UnblockWriteThrough;
+ }
+ }
+ }
+ }
+
+ action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := true;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(w_sendResponseWBAck, "w", desc="send WB Ack") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysWBAck;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(l_queueMemWBReq, "lq", desc="Write WB data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+ action(l_queueMemRdReq, "lr", desc="Read data from memory") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ if (tbe.Dirty == false) {
+ tbe.DataBlk := entry.DataBlk;
+ }
+ tbe.LastSender := entry.LastSender;
+ tbe.L3Hit := true;
+ tbe.MemData := true;
+ L3CacheMemory.deallocate(address);
+ } else {
+ queueMemoryRead(machineID, address, to_memory_controller_latency);
+ }
+ }
+ }
+
+ action(dc_probeInvCoreData, "dc", desc="probe inv cores, return data") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+
+ // add relevant TCC node to list. This replaces all TCPs and SQCs
+ if (((in_msg.Type == CoherenceRequestType:WriteThrough ||
+ in_msg.Type == CoherenceRequestType:Atomic) &&
+ in_msg.NoWriteConflict) ||
+ CPUonly) {
+ } else if (noTCCdir) {
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ } else {
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ }
+ out_msg.Destination.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := out_msg.Destination.count();
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ APPEND_TRANSITION_COMMENT(" dc: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
+ peek(requestNetwork_in, CPURequestMsg) { // not the right network?
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+ // add relevant TCC node to the list. This replaces all TCPs and SQCs
+ if (noTCCdir || CPUonly) {
+ //Don't need to notify TCC about reads
+ } else {
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:TCCdir,
+ TCC_select_low_bit, TCC_select_num_bits));
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ }
+ if (noTCCdir && !CPUonly) {
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ }
+ out_msg.Destination.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := out_msg.Destination.count();
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
+ peek(requestNetwork_in, CPURequestMsg) { // not the right network?
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := false;
+ out_msg.MessageSize := MessageSizeType:Control;
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+
+ // add relevant TCC node to the list. This replaces all TCPs and SQCs
+ if (noTCCdir && !CPUonly) {
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ } else {
+ if (!noTCCdir) {
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:TCCdir,
+ TCC_select_low_bit,
+ TCC_select_num_bits));
+ }
+ }
+ out_msg.Destination.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := out_msg.Destination.count();
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(d_writeDataToMemory, "d", desc="Write data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
+ if (tbe.Dirty == false) {
+ // have to update the TBE, too, because of how this
+ // directory deals with functional writes
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ peek(requestNetwork_in, CPURequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.wtData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ if (in_msg.Type == CoherenceRequestType:Atomic) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.atomicData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
+ tbe.Dirty := true;
+ }
+ tbe.OriginalRequestor := in_msg.Requestor;
+ tbe.NumPendingAcks := 0;
+ tbe.Cached := in_msg.ForceShared;
+ tbe.InitialRequestTime := in_msg.InitialRequestTime;
+ }
+ }
+
+ action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
+ if (tbe.Dirty == false) {
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ }
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(wd_writeBackData, "wd", desc="Write back data if needed") {
+ if (tbe.wtData) {
+ getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.writeMask);
+ } else if (tbe.atomicData) {
+ tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,tbe.writeMask);
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ } else if (tbe.Dirty == false) {
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ }
+ }
+
+ action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
+ peek(memQueue_in, MemoryMsg) {
+ if (tbe.wtData == true) {
+ // do nothing
+ } else if (tbe.Dirty == false) {
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk;
+ }
+ tbe.MemData := true;
+ }
+ }
+
+ action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty) {
+ if (tbe.wtData) {
+ DataBlock tmp := in_msg.DataBlk;
+ tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
+ tbe.DataBlk := tmp;
+ tbe.writeMask.fillMask();
+ } else if (tbe.Dirty) {
+ if(tbe.atomicData == false && tbe.wtData == false) {
+ DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
+ assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
+ }
+ } else {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ tbe.LastSender := in_msg.Sender;
+ }
+ }
+ if (in_msg.Hit) {
+ tbe.Cached := true;
+ }
+ }
+ }
+
+ action(mwc_markSinkWriteCancel, "mwc", desc="Mark to sink impending VicDirty") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getDirectoryEntry(address).VicDirtyIgnore.add(in_msg.Sender);
+ APPEND_TRANSITION_COMMENT(" setting bit to sink VicDirty ");
+ }
+ }
+
+ action(x_decrementAcks, "x", desc="decrement Acks pending") {
+ tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
+ APPEND_TRANSITION_COMMENT(" Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(o_checkForCompletion, "o", desc="check for ack completion") {
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
+ }
+ }
+
+ action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
+ entry.DataBlk := in_msg.DataBlk;
+ entry.LastSender := in_msg.Sender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
+ entry.DataBlk := in_msg.DataBlk;
+
+ entry.LastSender := in_msg.Sender;
+ }
+ }
+ }
+
+ action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
+ if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ }
+ }
+ }
+
+ action(sf_setForwardReqTime, "sf", desc="...") {
+ tbe.ForwardRequestTime := curCycle();
+ }
+
+ action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
+ L3CacheMemory.deallocate(address);
+ }
+
+ action(p_popRequestQueue, "p", desc="pop request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pm_popMemQueue, "pm", desc="pop mem queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
+ L3TriggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
+ unblockNetwork_in.dequeue(clockEdge());
+ }
+
+ action(zz_recycleRequestQueue, "zz", desc="recycle request queue") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
+ stall_and_wait(requestNetwork_in, address);
+ }
+
+ action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
+ wakeUpBuffers(address);
+ }
+
+ action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
+ wakeUpAllBuffers();
+ }
+
+ action(z_stall, "z", desc="...") {
+ }
+
+ // TRANSITIONS
+ transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {RdBlkS, RdBlkM, RdBlk, CtoD}) {
+ st_stallAndWaitRequest;
+ }
+
+ // It may be possible to save multiple invalidations here!
+ transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {Atomic, WriteThrough}) {
+ st_stallAndWaitRequest;
+ }
+
+
+ // transitions from U
+ transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead} {
+ t_allocateTBE;
+ l_queueMemRdReq;
+ sc_probeShrCoreData;
+ p_popRequestQueue;
+ }
+
+ transition(U, WriteThrough, BM_PM) {L3TagArrayRead, L3TagArrayWrite} {
+ t_allocateTBE;
+ w_sendResponseWBAck;
+ l_queueMemRdReq;
+ dc_probeInvCoreData;
+ p_popRequestQueue;
+ }
+
+ transition(U, Atomic, BM_PM) {L3TagArrayRead, L3TagArrayWrite} {
+ t_allocateTBE;
+ l_queueMemRdReq;
+ dc_probeInvCoreData;
+ p_popRequestQueue;
+ }
+
+ transition(U, {RdBlkM}, BM_PM) {L3TagArrayRead} {
+ t_allocateTBE;
+ l_queueMemRdReq;
+ dc_probeInvCoreData;
+ p_popRequestQueue;
+ }
+
+ transition(U, RdBlk, B_PM) {L3TagArrayRead}{
+ t_allocateTBE;
+ l_queueMemRdReq;
+ sc_probeShrCoreData;
+ p_popRequestQueue;
+ }
+
+ transition(U, CtoD, BP) {L3TagArrayRead} {
+ t_allocateTBE;
+ ic_probeInvCore;
+ p_popRequestQueue;
+ }
+
+ transition(U, VicDirty, BL) {L3TagArrayRead} {
+ t_allocateTBE;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(U, VicClean, BL) {L3TagArrayRead} {
+ t_allocateTBE;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition(BL, {VicDirty, VicClean}) {
+ zz_recycleRequestQueue;
+ }
+
+ transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
+ d_writeDataToMemory;
+ al_allocateL3Block;
+ wa_wakeUpDependents;
+ dt_deallocateTBE;
+ pr_popResponseQueue;
+ }
+
+ transition(BL, StaleWB, U) {L3TagArrayWrite} {
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ pr_popResponseQueue;
+ }
+
+ transition({B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm}, {VicDirty, VicClean}) {
+ z_stall;
+ }
+
+ transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, WBAck) {
+ pm_popMemQueue;
+ }
+
+ transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, StaleVicDirty) {
+ rv_removeVicDirtyIgnore;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition({B}, CoreUnblock, U) {
+ wa_wakeUpDependents;
+ pu_popUnblockQueue;
+ }
+
+ transition(B, UnblockWriteThrough, U) {
+ wa_wakeUpDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(BS_PM, MemData, BS_Pm) {} {
+ mt_writeMemDataToTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BM_PM, MemData, BM_Pm){} {
+ mt_writeMemDataToTBE;
+ pm_popMemQueue;
+ }
+
+ transition(B_PM, MemData, B_Pm){} {
+ mt_writeMemDataToTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BS_PM, L3Hit, BS_Pm) {} {
+ ptl_popTriggerQueue;
+ }
+
+ transition(BM_PM, L3Hit, BM_Pm) {} {
+ ptl_popTriggerQueue;
+ }
+
+ transition(B_PM, L3Hit, B_Pm) {} {
+ ptl_popTriggerQueue;
+ }
+
+ transition(BS_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition(BM_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition(B_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BP}, CPUPrbResp) {
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ o_checkForCompletion;
+ pr_popResponseQueue;
+ }
+
+ transition(BS_PM, ProbeAcksComplete, BS_M) {} {
+ sf_setForwardReqTime;
+ pt_popTriggerQueue;
+ }
+
+ transition(BM_PM, ProbeAcksComplete, BM_M) {} {
+ sf_setForwardReqTime;
+ pt_popTriggerQueue;
+ }
+
+ transition(B_PM, ProbeAcksComplete, B_M){} {
+ sf_setForwardReqTime;
+ pt_popTriggerQueue;
+ }
+
+ transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ c_sendResponseCtoD;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+
+enumeration(CoherenceRequestType, desc="Coherence Request Types") {
+ // CPU Request Types ONLY
+ RdBlk, desc="Read Blk";
+ RdBlkM, desc="Read Blk Modified";
+ RdBlkS, desc="Read Blk Shared";
+ CtoD, desc="Change To Dirty";
+ VicClean, desc="L2 clean eviction";
+ VicDirty, desc="L2 dirty eviction";
+ Atomic, desc="Upper level atomic";
+ AtomicWriteBack, desc="Upper level atomic";
+ WriteThrough, desc="Ordered WriteThrough w/Data";
+ WriteThroughFifo, desc="WriteThrough with no data";
+ WriteThroughDummy, desc="WriteThrough with no data for atomic operation";
+ WriteFlush, desc="Release Flush";
+
+ WrCancel, desc="want to cancel WB to Memory"; // should this be here?
+
+ WBApproval, desc="WB Approval";
+
+ // Messages between Dir and R-Dir
+ ForceInv, desc="Send invalide to the block";
+ ForceDowngrade, desc="Send downgrade to the block";
+ Unblock, desc="Used to let the dir know a message has been sunk";
+
+ // Messages between R-Dir and R-Buffer
+ PrivateNotify, desc="Let region buffer know it has private access";
+ SharedNotify, desc="Let region buffer know it has shared access";
+ WbNotify, desc="Let region buffer know it saw its wb request";
+ Downgrade, desc="Force the region buffer to downgrade to shared";
+ // Response to R-Dir (probably should be on a different network, but
+ // I need it to be ordered with respect to requests)
+ InvAck, desc="Let the R-Dir know when the inv has occured";
+
+ PrivateRequest, desc="R-buf wants the region in private";
+ UpgradeRequest, desc="R-buf wants the region in private";
+ SharedRequest, desc="R-buf wants the region in shared (could respond with private)";
+ CleanWbRequest, desc="R-buf wants to deallocate clean region";
+
+ NA, desc="So we don't get segfaults";
+}
+
+enumeration(ProbeRequestType, desc="Probe Request Types") {
+ PrbDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
+ PrbInv, desc="Probe to Invalidate";
+
+ // For regions
+ PrbRepl, desc="Force the cache to do a replacement";
+ PrbRegDowngrade, desc="Probe for Status"; // EtoS, MtoO, StoS
+ PrbAtomic, desc="Forwarded Atomic Operation";
+}
+
+
+enumeration(CoherenceResponseType, desc="Coherence Response Types") {
+ NBSysResp, desc="Northbridge response to CPU Rd request";
+ NBSysWBAck, desc="Northbridge response ok to WB";
+ TDSysResp, desc="TCCdirectory response to CPU Rd request";
+ TDSysWBAck, desc="TCCdirectory response ok to WB";
+ TDSysWBNack, desc="TCCdirectory response ok to drop";
+ CPUPrbResp, desc="CPU Probe Response";
+ CPUData, desc="CPU Data";
+ StaleNotif, desc="Notification of Stale WBAck, No data to writeback";
+ CPUCancelWB, desc="want to cancel WB to Memory";
+ MemData, desc="Data from Memory";
+
+ // for regions
+ PrivateAck, desc="Ack that r-buf received private notify";
+ RegionWbAck, desc="Writeback Ack that r-buf completed deallocation";
+ DirReadyAck, desc="Directory (mem ctrl)<->region dir handshake";
+}
+
+enumeration(CoherenceState, default="CoherenceState_NA", desc="Coherence State") {
+ Modified, desc="Modified";
+ Owned, desc="Owned state";
+ Exclusive, desc="Exclusive";
+ Shared, desc="Shared";
+ NA, desc="NA";
+}
+
+structure(CPURequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ Addr DemandAddress, desc="Physical block address for this request";
+ CoherenceRequestType Type, desc="Type of request";
+ DataBlock DataBlk, desc="data for the cache line"; // only for WB
+ bool Dirty, desc="whether WB data is dirty"; // only for WB
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ bool Shared, desc="For CPU_WrVicBlk, vic is O not M. For CPU_ClVicBlk, vic is S";
+ MessageSizeType MessageSize, desc="size category of the message";
+ Cycles InitialRequestTime, desc="time the initial requests was sent from the L1Cache";
+ Cycles ForwardRequestTime, desc="time the dir forwarded the request";
+ Cycles ProbeRequestStartTime, desc="the time the dir started the probe request";
+ bool DemandRequest, default="false", desc="For profiling purposes";
+
+ NetDest Sharers, desc="Caches that may have a valid copy of the data";
+ bool ForceShared, desc="R-dir knows it is shared, pass on so it sends an S copy, not E";
+ bool Private, default="false", desc="Requestor already has private permissions, no need for dir check";
+ bool CtoDSinked, default="false", desc="This is true if the CtoD previously sent must have been sunk";
+
+ bool NoAckNeeded, default="false", desc="True if region buffer doesn't need to ack";
+ int Acks, default="0", desc="Acks that the dir (mem ctrl) should expect to receive";
+ CoherenceRequestType OriginalType, default="CoherenceRequestType_NA", desc="Type of request from core fwded through region buffer";
+ WriteMask writeMask, desc="Write Through Data";
+ MachineID WTRequestor, desc="Node who initiated the write through";
+ HSAScope scope, default="HSAScope_SYSTEM", desc="Request Scope";
+ int wfid, default="0", desc="wavefront id";
+ bool NoWriteConflict, default="true", desc="write collided with CAB entry";
+ int ProgramCounter, desc="PC that accesses to this block";
+
+ bool functionalRead(Packet *pkt) {
+ // Only PUTX messages contains the data block
+ if (Type == CoherenceRequestType:VicDirty) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+structure(NBProbeRequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ ProbeRequestType Type, desc="NB_PrbNxtState signal";
+ bool ReturnData, desc="Indicates CPU should return data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MessageSizeType MessageSize, desc="size category of the message";
+ bool DemandRequest, default="false", desc="demand request, requesting 3-hop transfer";
+ Addr DemandAddress, desc="Demand block address for a region request";
+ MachineID Requestor, desc="Requestor id for 3-hop requests";
+ bool NoAckNeeded, default="false", desc="For short circuting acks";
+ int ProgramCounter, desc="PC that accesses to this block";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+
+}
+
+structure(TDProbeRequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ ProbeRequestType Type, desc="TD_PrbNxtState signal";
+ bool ReturnData, desc="Indicates CPU should return data";
+ bool localCtoD, desc="Indicates CtoD is within the GPU hierarchy (aka TCC subtree)";
+ NetDest Destination, desc="Node to whom the data is sent";
+ MessageSizeType MessageSize, desc="size category of the message";
+ int Phase, desc="Synchronization Phase";
+ int wfid, desc="wavefront id for Release";
+ MachineID Requestor, desc="Node who initiated the request";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+}
+
+// Response Messages seemed to be easily munged into one type
+structure(ResponseMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="NB Sys Resp or CPU Response to Probe";
+ MachineID Sender, desc="Node who sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ // Begin Used Only By CPU Response
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Hit, desc="probe hit valid line";
+ bool Shared, desc="True if S, or if NB Probe ReturnData==1 && O";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ bool Ntsl, desc="indicates probed lin will be invalid after probe";
+ bool UntransferredOwner, desc="pending confirmation of ownership change";
+ // End Used Only By CPU Response
+
+ // Begin NB Response Only
+ CoherenceState State, default=CoherenceState_NA, desc="What returned data from NB should be in";
+ bool CtoD, desc="was the originator a CtoD?";
+ // End NB Response Only
+
+ // Normally if a block gets hit by a probe while waiting to be written back,
+ // you flip the NbReqShared signal (part of the CPURequest signal group).
+ // But since this is in packets and I don't want to send a separate packet,
+ // let's just send this signal back with the data instead
+ bool NbReqShared, desc="modification of Shared field from initial request, e.g. hit by shared probe";
+
+ MessageSizeType MessageSize, desc="size category of the message";
+ Cycles InitialRequestTime, desc="time the initial requests was sent from the L1Cache";
+ Cycles ForwardRequestTime, desc="time the dir forwarded the request";
+ Cycles ProbeRequestStartTime, desc="the time the dir started the probe request";
+ bool DemandRequest, default="false", desc="For profiling purposes";
+
+ bool L3Hit, default="false", desc="Did memory or L3 supply the data?";
+ MachineID OriginalResponder, desc="Mach which wrote the data to the L3";
+ MachineID WTRequestor, desc="Node who started the writethrough";
+
+ bool NotCached, default="false", desc="True when the Region buffer has already evicted the line";
+
+ bool NoAckNeeded, default="false", desc="For short circuting acks";
+ bool isValid, default="false", desc="Is acked block valid";
+ int wfid, default="0", desc="wavefront id";
+ int Phase, desc="Synchronization Phase";
+
+ int ProgramCounter, desc="PC that issues this request";
+ bool mispred, desc="tell TCP if the block should not be bypassed";
+
+
+ bool functionalRead(Packet *pkt) {
+ // Only PUTX messages contains the data block
+ if (Type == CoherenceResponseType:CPUData ||
+ Type == CoherenceResponseType:MemData) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+structure(UnblockMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ NetDest Destination, desc="Destination (always directory)";
+ MessageSizeType MessageSize, desc="size category of the message";
+ MachineID Sender, desc="Node who sent the data";
+ bool currentOwner, default="false", desc="Is the sender the current owner";
+ bool DoneAck, default="false", desc="Is this a done ack?";
+ bool Dirty, default="false", desc="Was block dirty when evicted";
+ bool wasValid, default="false", desc="Was block valid when evicted";
+ bool valid, default="false", desc="Is block valid";
+ bool validToInvalid, default="false", desc="Was block valid when evicted";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+}
+
+enumeration(TriggerType, desc="Trigger Type") {
+ L2_to_L1, desc="L2 to L1 fill";
+ AcksComplete, desc="NB received all needed Acks";
+
+ // For regions
+ InvNext, desc="Invalidate the next block";
+ PrivateAck, desc="Loopback ack for machines with no Region Buffer";
+ AllOutstanding, desc="All outstanding requests have finished";
+ L3Hit, desc="L3 hit in dir";
+
+ // For region directory once the directory is blocked
+ InvRegion, desc="Invalidate region";
+ DowngradeRegion, desc="downgrade region";
+ //For writethrough
+ UnblockWriteThrough, desc="unblock";
+ WriteData, desc="Write to full cacheblock data";
+ WriteDone, desc="Sequencer says that write is done";
+ AtomicDone, desc="Atomic is done";
+}
+
+enumeration(CacheId, desc="Which Cache in the Core") {
+ L1I, desc="L1 I-cache";
+ L1D0, desc="L1 D-cache cluster 0";
+ L1D1, desc="L1 D-cache cluster 1";
+ NA, desc="Default";
+}
+
+structure(TriggerMsg, desc="...", interface="Message") {
+ Addr addr, desc="Address";
+ TriggerType Type, desc="Type of trigger";
+ CacheId Dest, default="CacheId_NA", desc="Cache to invalidate";
+ int ProgramCounter, desc="PC that accesses to this block";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+
+}
+
+enumeration(FifoType, desc="Fifo Type") {
+ WriteDummy, desc="Dummy Write for atomic operation";
+ WriteThrough, desc="simple writethrough request";
+ WriteFlush, desc="synchronization message";
+}
+
+structure(FifoMsg, desc="...", interface="Message") {
+ Addr addr, desc="Address";
+ FifoType Type, desc="WriteThrough/WriteFlush";
+ int wfid, default="0",desc="wavefront id";
+ MachineID Requestor, desc="Flush Requestor";
+ MachineID oRequestor, desc="original Flush Requestor";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check on message type required since the protocol should
+ // read data from those messages that contain the block
+ return false;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * For use for simulation and test purposes only
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu,
+ * Sooraj Puthoor
+ */
+
+/*
+ * This file is based on MOESI_AMD_Base.sm
+ * Differences with AMD base protocol
+ * -- Uses a probe filter memory to track sharers.
+ * -- The probe filter can be inclusive or non-inclusive
+ * -- Only two sharers tracked. Sharers are a) GPU or/and b) CPU
+ * -- If sharer information available, the sharer is probed
+ * -- If sharer information not available, probes are broadcasted
+ */
+
+machine(MachineType:Directory, "AMD Baseline protocol")
+: DirectoryMemory * directory;
+ CacheMemory * L3CacheMemory;
+ CacheMemory * ProbeFilterMemory;
+ Cycles response_latency := 5;
+ Cycles l3_hit_latency := 50;
+ bool noTCCdir := "False";
+ bool CAB_TCC := "False";
+ int TCC_select_num_bits:=1;
+ bool useL3OnWT := "False";
+ bool inclusiveDir := "True";
+ Cycles to_memory_controller_latency := 1;
+
+ // From the Cores
+ MessageBuffer * requestFromCores, network="From", virtual_network="0", ordered="false", vnet_type="request";
+ MessageBuffer * responseFromCores, network="From", virtual_network="2", ordered="false", vnet_type="response";
+ MessageBuffer * unblockFromCores, network="From", virtual_network="4", ordered="false", vnet_type="unblock";
+
+ MessageBuffer * probeToCore, network="To", virtual_network="0", ordered="false", vnet_type="request";
+ MessageBuffer * responseToCore, network="To", virtual_network="2", ordered="false", vnet_type="response";
+
+ MessageBuffer * triggerQueue, ordered="true";
+ MessageBuffer * L3triggerQueue, ordered="true";
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_U") {
+ U, AccessPermission:Backing_Store, desc="unblocked";
+ BL, AccessPermission:Busy, desc="got L3 WB request";
+ // BL is Busy because it is busy waiting for the data
+ // which is possibly in the network. The cache which evicted the data
+ // might have moved to some other state after doing the eviction
+ // BS==> Received a read request; has not requested ownership
+ // B==> Received a read request; has requested ownership
+ // BM==> Received a modification request
+ B_P, AccessPermission:Backing_Store, desc="Back invalidation, waiting for probes";
+ BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
+ BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
+ BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
+ BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
+ B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ // CPU requests
+ RdBlkS, desc="...";
+ RdBlkM, desc="...";
+ RdBlk, desc="...";
+ CtoD, desc="...";
+ WriteThrough, desc="WriteThrough Message";
+ Atomic, desc="Atomic Message";
+
+ // writebacks
+ VicDirty, desc="...";
+ VicClean, desc="...";
+ CPUData, desc="WB data from CPU";
+ StaleWB, desc="Notification that WB has been superceded by a probe";
+
+ // probe responses
+ CPUPrbResp, desc="Probe Response Msg";
+
+ ProbeAcksComplete, desc="Probe Acks Complete";
+
+ L3Hit, desc="Hit in L3 return data to core";
+
+ // Replacement
+ PF_Repl, desc="Replace address from probe filter";
+
+ // Memory Controller
+ MemData, desc="Fetched data from memory arrives";
+ WBAck, desc="Writeback Ack from memory arrives";
+
+ CoreUnblock, desc="Core received data, unblock";
+ UnblockWriteThrough, desc="Unblock because of writethrough request finishing";
+
+ StaleVicDirty, desc="Core invalidated before VicDirty processed";
+ }
+
+ enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
+ L3DataArrayRead, desc="Read the data array";
+ L3DataArrayWrite, desc="Write the data array";
+ L3TagArrayRead, desc="Read the data array";
+ L3TagArrayWrite, desc="Write the data array";
+
+ PFTagArrayRead, desc="Read the data array";
+ PFTagArrayWrite, desc="Write the data array";
+ }
+
+ // TYPES
+
+ enumeration(ProbeFilterState, desc="") {
+ T, desc="Tracked";
+ NT, desc="Not tracked";
+ B, desc="Blocked, This entry is being replaced";
+ }
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ DataBlock DataBlk, desc="data for the block";
+ NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore";
+ }
+
+ structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
+ DataBlock DataBlk, desc="data for the block";
+ MachineID LastSender, desc="Mach which this block came from";
+ ProbeFilterState pfState, desc="ProbeFilter state",default="Directory_ProbeFilterState_NT";
+ bool isOnCPU, desc="Block valid in the CPU complex",default="false";
+ bool isOnGPU, desc="Block valid in the GPU complex",default="false";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block";
+ bool Dirty, desc="Is the data dirty?";
+ int NumPendingAcks, desc="num acks expected";
+ MachineID OriginalRequestor, desc="Original Requestor";
+ MachineID WTRequestor, desc="WT Requestor";
+ bool Cached, desc="data hit in Cache";
+ bool MemData, desc="Got MemData?",default="false";
+ bool wtData, desc="Got write through data?",default="false";
+ bool atomicData, desc="Got Atomic op?",default="false";
+ Cycles InitialRequestTime, desc="...";
+ Cycles ForwardRequestTime, desc="...";
+ Cycles ProbeRequestStartTime, desc="...";
+ MachineID LastSender, desc="Mach which this block came from";
+ bool L3Hit, default="false", desc="Was this an L3 hit?";
+ uint64_t probe_id, desc="probe id for lifetime profiling";
+ WriteMask writeMask, desc="outstanding write through mask";
+ Addr demandAddress, desc="Address of demand request which caused probe filter eviction";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+
+ if (is_valid(dir_entry)) {
+ //DPRINTF(RubySlicc, "Getting entry %s: %s\n", addr, dir_entry.DataBlk);
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
+ TBE tbe := TBEs.lookup(addr);
+ if (is_valid(tbe) && tbe.MemData) {
+ DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
+ return tbe.DataBlk;
+ }
+ DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
+ return getDirectoryEntry(addr).DataBlk;
+ }
+
+ State getState(TBE tbe, CacheEntry entry, Addr addr) {
+ CacheEntry probeFilterEntry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(addr));
+ if (inclusiveDir) {
+ if (is_valid(probeFilterEntry) && probeFilterEntry.pfState == ProbeFilterState:B) {
+ return State:B_P;
+ }
+ }
+ return getDirectoryEntry(addr).DirectoryState;
+ }
+
+ void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
+ getDirectoryEntry(addr).DirectoryState := state;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs.lookup(addr);
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes +
+ functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ // For this Directory, all permissions are just tracked in Directory, since
+ // it's not possible to have something in TBE but not Dir, just keep track
+ // of state all in one place.
+ if (directory.isPresent(addr)) {
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(CacheEntry entry, Addr addr, State state) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+
+ void recordRequestType(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L3DataArrayRead) {
+ L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
+ } else if (request_type == RequestType:L3DataArrayWrite) {
+ L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
+ } else if (request_type == RequestType:L3TagArrayRead) {
+ L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:L3TagArrayWrite) {
+ L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ } else if (request_type == RequestType:PFTagArrayRead) {
+ ProbeFilterMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
+ } else if (request_type == RequestType:PFTagArrayWrite) {
+ ProbeFilterMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
+ }
+ }
+
+ bool checkResourceAvailable(RequestType request_type, Addr addr) {
+ if (request_type == RequestType:L3DataArrayRead) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L3DataArrayWrite) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
+ } else if (request_type == RequestType:L3TagArrayRead) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:L3TagArrayWrite) {
+ return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:PFTagArrayRead) {
+ return ProbeFilterMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else if (request_type == RequestType:PFTagArrayWrite) {
+ return ProbeFilterMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
+ } else {
+ error("Invalid RequestType type in checkResourceAvailable");
+ return true;
+ }
+ }
+
+ bool isNotPresentProbeFilter(Addr address) {
+ if (ProbeFilterMemory.isTagPresent(address) ||
+ ProbeFilterMemory.cacheAvail(address)) {
+ return false;
+ }
+ return true;
+ }
+
+ bool isGPUSharer(Addr address) {
+ assert(ProbeFilterMemory.isTagPresent(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
+ if (entry.pfState == ProbeFilterState:NT) {
+ return true;
+ } else if (entry.isOnGPU){
+ return true;
+ }
+ return false;
+ }
+
+ bool isCPUSharer(Addr address) {
+ assert(ProbeFilterMemory.isTagPresent(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
+ if (entry.pfState == ProbeFilterState:NT) {
+ return true;
+ } else if (entry.isOnCPU){
+ return true;
+ }
+ return false;
+ }
+
+
+ // ** OUT_PORTS **
+ out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
+ out_port(responseNetwork_out, ResponseMsg, responseToCore);
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+ out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == TriggerType:AcksComplete) {
+ trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
+ }else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
+ trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
+ } else {
+ error("Unknown trigger msg");
+ }
+ }
+ }
+ }
+
+ in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=4) {
+ if (L3TriggerQueue_in.isReady(clockEdge())) {
+ peek(L3TriggerQueue_in, TriggerMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == TriggerType:L3Hit) {
+ trigger(Event:L3Hit, in_msg.addr, entry, tbe);
+ } else {
+ error("Unknown trigger msg");
+ }
+ }
+ }
+ }
+
+ // Unblock Network
+ in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=3) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
+ peek(unblockNetwork_in, UnblockMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
+ }
+ }
+ }
+
+ // Core response network
+ in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=2) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
+ trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:CPUData) {
+ trigger(Event:CPUData, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
+ trigger(Event:StaleWB, in_msg.addr, entry, tbe);
+ } else {
+ error("Unexpected response type");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=1) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:MemData, in_msg.addr, entry, tbe);
+ DPRINTF(RubySlicc, "%s\n", in_msg);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, CPURequestMsg) {
+ TBE tbe := TBEs.lookup(in_msg.addr);
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
+ if (inclusiveDir && isNotPresentProbeFilter(in_msg.addr)) {
+ Addr victim := ProbeFilterMemory.cacheProbe(in_msg.addr);
+ tbe := TBEs.lookup(victim);
+ entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(victim));
+ trigger(Event:PF_Repl, victim, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
+ trigger(Event:RdBlk, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
+ trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:Atomic) {
+ trigger(Event:Atomic, in_msg.addr, entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
+ trigger(Event:VicDirty, in_msg.addr, entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
+ DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
+ trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
+ trigger(Event:VicClean, in_msg.addr, entry, tbe);
+ }
+ } else {
+ error("Bad request message type");
+ }
+ }
+ }
+ }
+
+ // Actions
+ action(s_sendResponseS, "s", desc="send Shared response") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Shared;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Cached) {
+ out_msg.State := CoherenceState:Shared;
+ } else {
+ out_msg.State := CoherenceState:Exclusive;
+ }
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ // write-through and atomics do not send an unblock ack back to the
+ // directory. Hence, directory has to generate a self unblocking
+ // message. Additionally, write through's does not require data
+ // in its response. Hence, write through is treated seperately from
+ // write-back and atomics
+ action(m_sendResponseM, "m", desc="send Modified response") {
+ if (tbe.wtData) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:UnblockWriteThrough;
+ }
+ }else{
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ if (tbe.L3Hit) {
+ out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
+ } else {
+ out_msg.Sender := machineID;
+ }
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := false;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ out_msg.OriginalResponder := tbe.LastSender;
+ if(tbe.atomicData){
+ out_msg.WTRequestor := tbe.WTRequestor;
+ }
+ out_msg.L3Hit := tbe.L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ if (tbe.atomicData) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:UnblockWriteThrough;
+ }
+ }
+ }
+ }
+
+ action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysResp;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(tbe.OriginalRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.Dirty := false;
+ out_msg.State := CoherenceState:Modified;
+ out_msg.CtoD := true;
+ out_msg.InitialRequestTime := tbe.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ }
+
+ action(w_sendResponseWBAck, "w", desc="send WB Ack") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:NBSysWBAck;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.WTRequestor := in_msg.WTRequestor;
+ out_msg.Sender := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(l_queueMemWBReq, "lq", desc="Write WB data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+ action(l_queueMemRdReq, "lr", desc="Read data from memory") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L3Hit;
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ }
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ tbe.DataBlk := entry.DataBlk;
+ tbe.LastSender := entry.LastSender;
+ tbe.L3Hit := true;
+ tbe.MemData := true;
+ L3CacheMemory.deallocate(address);
+ } else {
+ queueMemoryRead(machineID, address, to_memory_controller_latency);
+ }
+ }
+ }
+
+ action(dc_probeInvCoreData, "dc", desc="probe inv cores, return data") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ if(isCPUSharer(address)) {
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+ }
+
+ // add relevant TCC node to list. This replaces all TCPs and SQCs
+ if(isGPUSharer(address)) {
+ if ((in_msg.Type == CoherenceRequestType:WriteThrough ||
+ in_msg.Type == CoherenceRequestType:Atomic) &&
+ in_msg.NoWriteConflict) {
+ // Don't Include TCCs unless there was write-CAB conflict in the TCC
+ } else if(noTCCdir) {
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ } else {
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
+ }
+ }
+ out_msg.Destination.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := out_msg.Destination.count();
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ APPEND_TRANSITION_COMMENT(" dc: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(bp_backProbe, "bp", desc="back probe") {
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ if(isCPUSharer(address)) {
+ // won't be realistic for multisocket
+ out_msg.Destination.broadcast(MachineType:CorePair);
+ }
+ // add relevant TCC node to the list. This replaces all TCPs and SQCs
+ if(isGPUSharer(address)) {
+ if (noTCCdir) {
+ //Don't need to notify TCC about reads
+ } else {
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ }
+ if (noTCCdir && CAB_TCC) {
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ }
+ }
+ tbe.NumPendingAcks := out_msg.Destination.count();
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ APPEND_TRANSITION_COMMENT(" - back probe");
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+
+ action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
+ peek(requestNetwork_in, CPURequestMsg) { // not the right network?
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbDowngrade;
+ out_msg.ReturnData := true;
+ out_msg.MessageSize := MessageSizeType:Control;
+ if(isCPUSharer(address)) {
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+ }
+ // add relevant TCC node to the list. This replaces all TCPs and SQCs
+ if(isGPUSharer(address)) {
+ if (noTCCdir) {
+ //Don't need to notify TCC about reads
+ } else {
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
+ tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
+ }
+ if (noTCCdir && CAB_TCC) {
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ }
+ }
+ out_msg.Destination.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := out_msg.Destination.count();
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ DPRINTF(RubySlicc, "%s\n", (out_msg));
+ APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
+ peek(requestNetwork_in, CPURequestMsg) { // not the right network?
+ enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := ProbeRequestType:PrbInv;
+ out_msg.ReturnData := false;
+ out_msg.MessageSize := MessageSizeType:Control;
+ if(isCPUSharer(address)) {
+ out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
+ }
+
+ // add relevant TCC node to the list. This replaces all TCPs and SQCs
+ if(isGPUSharer(address)) {
+ if (noTCCdir) {
+ out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
+ TCC_select_low_bit, TCC_select_num_bits));
+ } else {
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:TCCdir));
+ }
+ }
+ out_msg.Destination.remove(in_msg.Requestor);
+ tbe.NumPendingAcks := out_msg.Destination.count();
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ DPRINTF(RubySlicc, "%s\n", out_msg);
+ tbe.ProbeRequestStartTime := curCycle();
+ }
+ }
+ }
+
+ action(sm_setMRU, "sm", desc="set probe filter entry as MRU") {
+ ProbeFilterMemory.setMRU(address);
+ }
+
+ action(d_writeDataToMemory, "d", desc="Write data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Writing Data: %s to address %s\n", in_msg.DataBlk,
+ in_msg.addr);
+ }
+ }
+
+ action(te_allocateTBEForEviction, "te", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ tbe.writeMask.clear();
+ tbe.wtData := false;
+ tbe.atomicData := false;
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ tbe.NumPendingAcks := 0;
+ }
+
+ action(t_allocateTBE, "t", desc="allocate TBE Entry") {
+ check_allocate(TBEs);
+ peek(requestNetwork_in, CPURequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs.lookup(address));
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.wtData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ if (in_msg.Type == CoherenceRequestType:Atomic) {
+ tbe.writeMask.clear();
+ tbe.writeMask.orMask(in_msg.writeMask);
+ tbe.atomicData := true;
+ tbe.WTRequestor := in_msg.WTRequestor;
+ tbe.LastSender := in_msg.Requestor;
+ }
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
+ tbe.Dirty := false;
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
+ tbe.Dirty := false;
+ }
+ tbe.OriginalRequestor := in_msg.Requestor;
+ tbe.NumPendingAcks := 0;
+ tbe.Cached := in_msg.ForceShared;
+ tbe.InitialRequestTime := in_msg.InitialRequestTime;
+ }
+ }
+
+ action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
+ if (tbe.Dirty == false) {
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ }
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(wd_writeBackData, "wd", desc="Write back data if needed") {
+ if (tbe.wtData) {
+ DataBlock tmp := getDirectoryEntry(address).DataBlk;
+ tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
+ tbe.DataBlk := tmp;
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ } else if (tbe.atomicData) {
+ tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,
+ tbe.writeMask);
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ } else if (tbe.Dirty == false) {
+ getDirectoryEntry(address).DataBlk := tbe.DataBlk;
+ }
+ }
+
+ action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
+ peek(memQueue_in, MemoryMsg) {
+ if (tbe.wtData == true) {
+ // DO Nothing (already have the directory data)
+ } else if (tbe.Dirty == false) {
+ tbe.DataBlk := getDirectoryEntry(address).DataBlk;
+ }
+ tbe.MemData := true;
+ }
+ }
+
+ action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Dirty) {
+ DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
+ DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
+ if (tbe.wtData) {
+ DataBlock tmp := in_msg.DataBlk;
+ tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
+ tbe.DataBlk := tmp;
+ } else if (tbe.Dirty) {
+ if(tbe.atomicData == false && tbe.wtData == false) {
+ DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
+ assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
+ }
+ } else {
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ tbe.LastSender := in_msg.Sender;
+ }
+ }
+ if (in_msg.Hit) {
+ tbe.Cached := true;
+ }
+ }
+ }
+
+ action(mwc_markSinkWriteCancel, "mwc", desc="Mark to sink impending VicDirty") {
+ peek(responseNetwork_in, ResponseMsg) {
+ DPRINTF(RubySlicc, "Write cancel bit set on address %s\n", address);
+ getDirectoryEntry(address).VicDirtyIgnore.add(in_msg.Sender);
+ APPEND_TRANSITION_COMMENT(" setting bit to sink VicDirty ");
+ }
+ }
+
+ action(x_decrementAcks, "x", desc="decrement Acks pending") {
+ tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
+ APPEND_TRANSITION_COMMENT(" Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(o_checkForCompletion, "o", desc="check for ack completion") {
+ if (tbe.NumPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:AcksComplete;
+ }
+ }
+ APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
+ }
+
+ action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
+ }
+ }
+
+ action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
+ entry.DataBlk := in_msg.DataBlk;
+ entry.LastSender := in_msg.Sender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
+ entry.DataBlk := in_msg.DataBlk;
+
+ entry.LastSender := in_msg.Sender;
+ }
+ }
+ }
+
+ action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
+ if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
+ if (L3CacheMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ } else {
+ if (L3CacheMemory.cacheAvail(address) == false) {
+ Addr victim := L3CacheMemory.cacheProbe(address);
+ CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
+ L3CacheMemory.lookup(victim));
+ queueMemoryWrite(machineID, victim, to_memory_controller_latency,
+ victim_entry.DataBlk);
+ L3CacheMemory.deallocate(victim);
+ }
+ assert(L3CacheMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
+ entry.DataBlk := tbe.DataBlk;
+ entry.LastSender := tbe.LastSender;
+ }
+ }
+ }
+
+ action(apf_allocateProbeFilterEntry, "apf", desc="Allocate probe filte entry") {
+ if (!ProbeFilterMemory.isTagPresent(address)) {
+ if (inclusiveDir) {
+ assert(ProbeFilterMemory.cacheAvail(address));
+ } else if (ProbeFilterMemory.cacheAvail(address) == false) {
+ Addr victim := ProbeFilterMemory.cacheProbe(address);
+ ProbeFilterMemory.deallocate(victim);
+ }
+ assert(ProbeFilterMemory.cacheAvail(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.allocate(address, new CacheEntry));
+ APPEND_TRANSITION_COMMENT(" allocating a new probe filter entry");
+ entry.pfState := ProbeFilterState:NT;
+ if (inclusiveDir) {
+ entry.pfState := ProbeFilterState:T;
+ }
+ entry.isOnCPU := false;
+ entry.isOnGPU := false;
+ }
+ }
+
+ action(mpfe_markPFEntryForEviction, "mpfe", desc="Mark this PF entry is being evicted") {
+ assert(ProbeFilterMemory.isTagPresent(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
+ entry.pfState := ProbeFilterState:B;
+ peek(requestNetwork_in, CPURequestMsg) {
+ tbe.demandAddress := in_msg.addr;
+ }
+ }
+
+ action(we_wakeUpEvictionDependents, "we", desc="Wake up requests waiting for demand address and victim address") {
+ wakeUpBuffers(address);
+ wakeUpBuffers(tbe.demandAddress);
+ }
+
+ action(dpf_deallocateProbeFilter, "dpf", desc="deallocate PF entry") {
+ assert(ProbeFilterMemory.isTagPresent(address));
+ ProbeFilterMemory.deallocate(address);
+ }
+
+ action(upf_updateProbeFilter, "upf", desc="") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ assert(ProbeFilterMemory.isTagPresent(address));
+ CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
+ if (in_msg.Type == CoherenceRequestType:WriteThrough) {
+ entry.pfState := ProbeFilterState:T;
+ entry.isOnCPU := false;
+ entry.isOnGPU := false;
+ } else if (in_msg.Type == CoherenceRequestType:Atomic) {
+ entry.pfState := ProbeFilterState:T;
+ entry.isOnCPU := false;
+ entry.isOnGPU := false;
+ } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
+ entry.pfState := ProbeFilterState:T;
+ entry.isOnCPU := false;
+ entry.isOnGPU := false;
+ } else if (in_msg.Type == CoherenceRequestType:CtoD) {
+ entry.pfState := ProbeFilterState:T;
+ entry.isOnCPU := false;
+ entry.isOnGPU := false;
+ }
+ if(machineIDToMachineType(in_msg.Requestor) == MachineType:CorePair) {
+ entry.isOnCPU := true;
+ } else {
+ entry.isOnGPU := true;
+ }
+ }
+ }
+
+ action(rmcd_removeSharerConditional, "rmcd", desc="remove sharer from probe Filter, conditional") {
+ peek(requestNetwork_in, CPURequestMsg) {
+ if (ProbeFilterMemory.isTagPresent(address)) {
+ CacheEntry entry := static_cast(CacheEntry, "pointer", ProbeFilterMemory.lookup(address));
+ if(machineIDToMachineType(in_msg.Requestor) == MachineType:CorePair) {//CorePair has inclusive L2
+ if (in_msg.Type == CoherenceRequestType:VicDirty) {
+ entry.isOnCPU := false;
+ } else if (in_msg.Type == CoherenceRequestType:VicClean) {
+ entry.isOnCPU := false;
+ }
+ }
+ }
+ }
+ }
+
+ action(sf_setForwardReqTime, "sf", desc="...") {
+ tbe.ForwardRequestTime := curCycle();
+ }
+
+ action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
+ L3CacheMemory.deallocate(address);
+ }
+
+ action(p_popRequestQueue, "p", desc="pop request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pr_popResponseQueue, "pr", desc="pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(pm_popMemQueue, "pm", desc="pop mem queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
+ L3TriggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
+ unblockNetwork_in.dequeue(clockEdge());
+ }
+
+ action(zz_recycleRequestQueue, "zz", desc="recycle request queue") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
+ stall_and_wait(requestNetwork_in, address);
+ }
+
+ action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
+ wakeUpBuffers(address);
+ }
+
+ action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
+ wakeUpAllBuffers();
+ }
+
+ action(z_stall, "z", desc="...") {
+ }
+
+ // TRANSITIONS
+ transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, {RdBlkS, RdBlkM, RdBlk, CtoD}) {
+ st_stallAndWaitRequest;
+ }
+
+ // It may be possible to save multiple invalidations here!
+ transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, {Atomic, WriteThrough}) {
+ st_stallAndWaitRequest;
+ }
+
+
+ // transitions from U
+ transition(U, PF_Repl, B_P) {PFTagArrayRead, PFTagArrayWrite}{
+ te_allocateTBEForEviction;
+ apf_allocateProbeFilterEntry;
+ bp_backProbe;
+ sm_setMRU;
+ mpfe_markPFEntryForEviction;
+ }
+
+ transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite} {
+ t_allocateTBE;
+ apf_allocateProbeFilterEntry;
+ l_queueMemRdReq;
+ sc_probeShrCoreData;
+ sm_setMRU;
+ upf_updateProbeFilter;
+ p_popRequestQueue;
+ }
+
+ transition(U, WriteThrough, BM_PM) {L3TagArrayRead, L3TagArrayWrite, PFTagArrayRead, PFTagArrayWrite} {
+ t_allocateTBE;
+ apf_allocateProbeFilterEntry;
+ w_sendResponseWBAck;
+ l_queueMemRdReq;
+ dc_probeInvCoreData;
+ sm_setMRU;
+ upf_updateProbeFilter;
+ p_popRequestQueue;
+ }
+
+ transition(U, Atomic, BM_PM) {L3TagArrayRead, L3TagArrayWrite, PFTagArrayRead, PFTagArrayWrite} {
+ t_allocateTBE;
+ apf_allocateProbeFilterEntry;
+ l_queueMemRdReq;
+ dc_probeInvCoreData;
+ sm_setMRU;
+ upf_updateProbeFilter;
+ p_popRequestQueue;
+ }
+
+ transition(U, {RdBlkM}, BM_PM) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite} {
+ t_allocateTBE;
+ apf_allocateProbeFilterEntry;
+ l_queueMemRdReq;
+ dc_probeInvCoreData;
+ sm_setMRU;
+ upf_updateProbeFilter;
+ p_popRequestQueue;
+ }
+
+ transition(U, RdBlk, B_PM) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite}{
+ t_allocateTBE;
+ apf_allocateProbeFilterEntry;
+ l_queueMemRdReq;
+ sc_probeShrCoreData;
+ sm_setMRU;
+ upf_updateProbeFilter;
+ p_popRequestQueue;
+ }
+
+ transition(U, CtoD, BP) {L3TagArrayRead, PFTagArrayRead, PFTagArrayWrite} {
+ t_allocateTBE;
+ apf_allocateProbeFilterEntry;
+ ic_probeInvCore;
+ sm_setMRU;
+ upf_updateProbeFilter;
+ p_popRequestQueue;
+ }
+
+ transition(U, VicDirty, BL) {L3TagArrayRead} {
+ t_allocateTBE;
+ w_sendResponseWBAck;
+ rmcd_removeSharerConditional;
+ p_popRequestQueue;
+ }
+
+ transition(U, VicClean, BL) {L3TagArrayRead} {
+ t_allocateTBE;
+ w_sendResponseWBAck;
+ rmcd_removeSharerConditional;
+ p_popRequestQueue;
+ }
+
+ transition(BL, {VicDirty, VicClean}) {
+ zz_recycleRequestQueue;
+ }
+
+ transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
+ d_writeDataToMemory;
+ al_allocateL3Block;
+ wa_wakeUpDependents;
+ dt_deallocateTBE;
+ //l_queueMemWBReq; // why need an ack? esp. with DRAMSim, just put it in queue no ack needed
+ pr_popResponseQueue;
+ }
+
+ transition(BL, StaleWB, U) {L3TagArrayWrite} {
+ dt_deallocateTBE;
+ wa_wakeUpAllDependents;
+ pr_popResponseQueue;
+ }
+
+ transition({B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P}, {VicDirty, VicClean}) {
+ z_stall;
+ }
+
+ transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, WBAck) {
+ pm_popMemQueue;
+ }
+
+ transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, PF_Repl) {
+ zz_recycleRequestQueue;
+ }
+
+ transition({U, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, B}, StaleVicDirty) {
+ rv_removeVicDirtyIgnore;
+ w_sendResponseWBAck;
+ p_popRequestQueue;
+ }
+
+ transition({B}, CoreUnblock, U) {
+ wa_wakeUpDependents;
+ pu_popUnblockQueue;
+ }
+
+ transition(B, UnblockWriteThrough, U) {
+ wa_wakeUpDependents;
+ pt_popTriggerQueue;
+ }
+
+ transition(BS_PM, MemData, BS_Pm) {} {
+ mt_writeMemDataToTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BM_PM, MemData, BM_Pm){} {
+ mt_writeMemDataToTBE;
+ pm_popMemQueue;
+ }
+
+ transition(B_PM, MemData, B_Pm){} {
+ mt_writeMemDataToTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BS_PM, L3Hit, BS_Pm) {} {
+ ptl_popTriggerQueue;
+ }
+
+ transition(BM_PM, L3Hit, BM_Pm) {} {
+ ptl_popTriggerQueue;
+ }
+
+ transition(B_PM, L3Hit, B_Pm) {} {
+ ptl_popTriggerQueue;
+ }
+
+ transition(BS_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
+ mt_writeMemDataToTBE;
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pm_popMemQueue;
+ }
+
+ transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition(BM_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition(B_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} {
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ ptl_popTriggerQueue;
+ }
+
+ transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B_P, BP}, CPUPrbResp) {
+ y_writeProbeDataToTBE;
+ x_decrementAcks;
+ o_checkForCompletion;
+ pr_popResponseQueue;
+ }
+
+ transition(BS_PM, ProbeAcksComplete, BS_M) {} {
+ sf_setForwardReqTime;
+ pt_popTriggerQueue;
+ }
+
+ transition(BM_PM, ProbeAcksComplete, BM_M) {} {
+ sf_setForwardReqTime;
+ pt_popTriggerQueue;
+ }
+
+ transition(B_PM, ProbeAcksComplete, B_M){} {
+ sf_setForwardReqTime;
+ pt_popTriggerQueue;
+ }
+
+ transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ s_sendResponseS;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ m_sendResponseM;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ es_sendResponseES;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(B_P, ProbeAcksComplete, U) {
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ we_wakeUpEvictionDependents;
+ dpf_deallocateProbeFilter;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+
+ transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
+ sf_setForwardReqTime;
+ c_sendResponseCtoD;
+ wd_writeBackData;
+ alwt_allocateL3BlockOnWT;
+ dt_deallocateTBE;
+ pt_popTriggerQueue;
+ }
+}
--- /dev/null
+protocol "MOESI_AMD_Base";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_AMD_Base-msg.sm";
+include "MOESI_AMD_Base-CorePair.sm";
+include "MOESI_AMD_Base-L3cache.sm";
+include "MOESI_AMD_Base-dir.sm";
--- /dev/null
+/*
+ * Copyright (c) 2019 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L1Cache, "L1 cache protocol")
+ : Sequencer * sequencer;
+ CacheMemory * L1Icache;
+ CacheMemory * L1Dcache;
+ Cycles request_latency := 1;
+ Cycles response_latency := 1;
+ Cycles use_timeout_latency := 50;
+ bool send_evictions;
+
+ // Message Queues
+ // From this node's L1 cache TO the network
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
+ vnet_type="request";
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
+ vnet_type="response";
+
+ // To this node's L1 cache FROM the network
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
+ vnet_type="request";
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
+ vnet_type="response";
+
+ MessageBuffer * triggerQueue;
+
+ MessageBuffer * mandatoryQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ I, AccessPermission:Invalid, desc="Idle";
+ S, AccessPermission:Read_Only, desc="Shared";
+ O, AccessPermission:Read_Only, desc="Owned";
+ M, AccessPermission:Read_Only, desc="Modified (dirty)";
+ M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
+ MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
+ MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
+
+ // Transient States
+ IM, AccessPermission:Busy, "IM", desc="Issued GetX";
+ SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
+ IS, AccessPermission:Busy, "IS", desc="Issued GetS";
+ SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
+ OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
+ MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
+ II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L1_Replacement, desc="Replacement";
+
+ // Requests
+ Own_GETX, desc="We observe our own GetX forwarded back to us";
+ Fwd_GETX, desc="A GetX from another processor";
+ Fwd_GETS, desc="A GetS from another processor";
+ Fwd_DMA, desc="A GetS from another processor";
+ Inv, desc="Invalidations from the directory";
+
+ // Responses
+ Ack, desc="Received an ack message";
+ Data, desc="Received a data message, responder has a shared copy";
+ Exclusive_Data, desc="Received a data message";
+
+ Writeback_Ack, desc="Writeback O.K. from directory";
+ Writeback_Ack_Data, desc="Writeback O.K. from directory";
+ Writeback_Nack, desc="Writeback not O.K. from directory";
+
+ // Triggers
+ All_acks, desc="Received all required data and message acks";
+
+ // Timeouts
+ Use_Timeout, desc="lockout period ended";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Addr addr, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
+ }
+
+ structure(TBETable, external ="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+ TimerTable useTimerTable;
+
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ if(is_valid(L1Dcache_entry)) {
+ return L1Dcache_entry;
+ }
+
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ return L1Icache_entry;
+ }
+
+ Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ }
+
+ Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ // L1 hit latency
+ Cycles mandatoryQueueLatency(RubyRequestType type) {
+ if (type == RubyRequestType:IFETCH) {
+ return L1Icache.getTagLatency();
+ } else {
+ return L1Dcache.getTagLatency();
+ }
+ }
+
+ // Latency for responses that fetch data from cache
+ Cycles cacheResponseLatency() {
+ if (L1Dcache.getTagLatency() > response_latency) {
+ return L1Dcache.getTagLatency();
+ } else {
+ return response_latency;
+ }
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
+
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
+ ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
+ ((cache_entry.CacheState != State:S) && (state == State:S)) ||
+ ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
+
+ cache_entry.CacheState := state;
+ sequencer.checkCoherence(addr);
+ }
+ else {
+ cache_entry.CacheState := state;
+ }
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
+ return L1Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
+ return L1Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L1Cache_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ testAndRead(addr, cache_entry.DataBlk, pkt);
+ } else {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ error("Data block missing!");
+ }
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, cache_entry.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ TBE tbe := TBEs[addr];
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Load;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
+ return Event:Store;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ // ** OUT_PORTS **
+
+ out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Use Timer
+ in_port(useTimerTable_in, Addr, useTimerTable, rank=4) {
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
+ TBEs.lookup(readyAddress));
+ }
+ }
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_acks, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache, rank=2) {
+ if (responseToL1Cache_in.isReady(clockEdge())) {
+ peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Exclusive_Data, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK_DATA) {
+ trigger(Event:Writeback_Ack_Data, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, requestToL1Cache, rank=1) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, RequestMsg, block_on="addr") {
+ assert(in_msg.Destination.isElement(machineID));
+ DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
+
+ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
+ if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
+ trigger(Event:Own_GETX, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ trigger(Event:Fwd_GETX, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
+ trigger(Event:Fwd_DMA, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue betweens Node's CPU and it's L1 caches
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, rank=0) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The tag matches for the L1, so the L1 asks the L2 for it.
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry,
+ TBEs[in_msg.LineAddress]);
+ } else {
+
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ // Check to see if it is in the OTHER L1
+ if (is_valid(L1Dcache_entry)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
+ TBEs[in_msg.LineAddress]);
+ }
+ if (L1Icache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry,
+ TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ // Check if the line we want to evict is not locked
+ Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, addr);
+ trigger(Event:L1_Replacement,
+ addr,
+ getL1ICacheEntry(addr),
+ TBEs[addr]);
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The tag matches for the L1, so the L1 ask the L2 for it
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry,
+ TBEs[in_msg.LineAddress]);
+ } else {
+
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ // Check to see if it is in the OTHER L1
+ if (is_valid(L1Icache_entry)) {
+ // The block is in the wrong L1, put the request on the queue to the shared L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
+ }
+ if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry,
+ TBEs[in_msg.LineAddress]);
+ } else {
+ // No room in the L1, so we need to make room in the L1
+ // Check if the line we want to evict is not locked
+ Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, addr);
+ trigger(Event:L1_Replacement,
+ addr,
+ getL1DCacheEntry(addr),
+ TBEs[addr]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ }
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ enqueue(requestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ }
+ }
+ }
+
+ action(d_issuePUTX, "d", desc="Issue PUTX") {
+ enqueue(requestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(dd_issuePUTO, "\d", desc="Issue PUTO") {
+ enqueue(requestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTO;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
+ enqueue(requestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTS;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(e_sendData, "e", desc="Send data from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ if (in_msg.RequestorMachine == MachineType:L2Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DPRINTF(RubySlicc, "Sending data to L2: %#x\n", in_msg.addr);
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ DPRINTF(RubySlicc, "Sending data to L1\n");
+ }
+ }
+ }
+
+ action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ if (in_msg.RequestorMachine == MachineType:L2Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
+ }
+ }
+ }
+
+ action(f_sendAck, "f", desc="Send ack from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.RequestorMachine == MachineType:L1Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 0 - 1; // -1
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.Acks := 0 - 1; // -1
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ }
+
+ action(g_sendUnblock, "g", desc="Send unblock to memory") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Dcache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk);
+ }
+
+ action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk);
+ }
+
+ action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.readCallback(address, cache_entry.DataBlk, true);
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Dcache.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk);
+ cache_entry.Dirty := true;
+ }
+
+ action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.writeCallback(address, cache_entry.DataBlk, true);
+ cache_entry.Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ assert(is_valid(cache_entry));
+ tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
+ tbe.Dirty := cache_entry.Dirty;
+ }
+
+ action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
+ useTimerTable.unset(address);
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(l_popForwardQueue, "l", desc="Pop forwarded request queue.") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseToL1Cache_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
+ }
+ }
+
+ action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseToL1Cache_in.dequeue(clockEdge());
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ assert(is_valid(tbe));
+ if (tbe.NumPendingMsgs == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
+ useTimerTable.set(address,
+ clockEdge() + cyclesToTicks(use_timeout_latency));
+ }
+
+ action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DMA_ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.Dirty := false;
+ out_msg.Acks := 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ if (in_msg.RequestorMachine == MachineType:L1Cache ||
+ in_msg.RequestorMachine == MachineType:DMA) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ // out_msg.Dirty := tbe.Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.DataBlk := tbe.DataBlk;
+ // out_msg.Dirty := tbe.Dirty;
+ out_msg.Dirty := false;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ }
+
+ action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ if (in_msg.RequestorMachine == MachineType:L1Cache) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ }
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ }
+
+ // L2 will usually request data for a writeback
+ action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
+ enqueue(requestNetwork_out, RequestMsg, request_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(mapAddressToMachine(address,
+ MachineType:L2Cache));
+ if (tbe.Dirty) {
+ out_msg.Type := CoherenceRequestType:WRITEBACK_DIRTY_DATA;
+ } else {
+ out_msg.Type := CoherenceRequestType:WRITEBACK_CLEAN_DATA;
+ }
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseToL1Cache_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+
+ if (in_msg.Type == CoherenceResponseType:DATA) {
+ //assert(in_msg.Dirty == false);
+ }
+ }
+ }
+
+ action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ if (L1Dcache.isTagPresent(address)) {
+ L1Dcache.deallocate(address);
+ } else {
+ L1Icache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if ((is_invalid(cache_entry))) {
+ set_cache_entry(L1Dcache.allocate(address, new Entry));
+ }
+ }
+
+ action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if ((is_invalid(cache_entry))) {
+ set_cache_entry(L1Icache.allocate(address, new Entry));
+ }
+ }
+
+ action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1Icache.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1Dcache.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1Dcache.demand_hits;
+ }
+
+ action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({M_W, MM_W}, L1_Replacement) {
+ zz_recycleMandatoryQueue;
+ }
+
+ transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
+ z_recycleRequestQueue;
+ }
+
+ transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
+ zz_recycleMandatoryQueue;
+ }
+
+ // Transitions from Idle
+ transition(I, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch, IS) {
+ jj_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileInstMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Store, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L1_Replacement) {
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(I, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition({S, SM, O, OM, MM, MM_W, M, M_W}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ // Transitions from Shared
+ transition(S, Store, SM) {
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L1_Replacement, SI) {
+ i_allocateTBE;
+ dd_issuePUTS;
+ forward_eviction_to_cpu;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(S, Inv, I) {
+ f_sendAck;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(S, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(S, Fwd_DMA) {
+ e_sendData;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Owned
+ transition(O, Store, OM) {
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L1_Replacement, OI) {
+ i_allocateTBE;
+ dd_issuePUTO;
+ forward_eviction_to_cpu;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(O, Fwd_GETX, I) {
+ ee_sendDataExclusive;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(O, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(O, Fwd_DMA) {
+ e_sendData;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ // Transitions from MM
+ transition({MM, MM_W}, Store) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, L1_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ forward_eviction_to_cpu;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(MM, Fwd_GETX, I) {
+ ee_sendDataExclusive;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Fwd_GETS, I) {
+ ee_sendDataExclusive;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Fwd_DMA, MM) {
+ e_sendData;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ // Transitions from M
+ transition(M, Store, MM) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Store, MM_W) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L1_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUTX;
+ forward_eviction_to_cpu;
+ kk_deallocateL1CacheBlock;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ // e_sendData;
+ ee_sendDataExclusive;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(M, Fwd_GETS, O) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(M, Fwd_DMA) {
+ e_sendData;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ // Transitions from IM
+
+ transition(IM, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM, {Exclusive_Data, Data}, OM) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ // Transitions from SM
+ transition(SM, Inv, IM) {
+ f_sendAck;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(SM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM, {Data, Exclusive_Data}, OM) {
+ // v_writeDataToCacheVerify;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(SM, Fwd_DMA) {
+ e_sendData;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ // Transitions from OM
+ transition(OM, Own_GETX) {
+ mm_decrementNumberOfMessages;
+ o_checkForCompletion;
+ l_popForwardQueue;
+ }
+
+
+ // transition(OM, Fwd_GETX, OMF) {
+ transition(OM, Fwd_GETX, IM) {
+ ee_sendDataExclusive;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Fwd_GETS) {
+ e_sendData;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Fwd_DMA) {
+ e_sendData;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ //transition({OM, OMF}, Ack) {
+ transition(OM, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OM, All_acks, MM_W) {
+ xx_store_hit;
+ gg_sendUnblockExclusive;
+ s_deallocateTBE;
+ o_scheduleUseTimeout;
+ j_popTriggerQueue;
+ }
+
+ transition(MM_W, Use_Timeout, MM) {
+ jj_unsetUseTimer;
+ }
+
+ // Transitions from IS
+
+ transition(IS, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IS, Data, S) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ hx_load_hit;
+ g_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Exclusive_Data, M_W) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ hx_load_hit;
+ gg_sendUnblockExclusive;
+ o_scheduleUseTimeout;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(M_W, Use_Timeout, M) {
+ jj_unsetUseTimer;
+ }
+
+ // Transitions from OI/MI
+
+ transition(MI, Fwd_GETS, OI) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(MI, Fwd_DMA) {
+ q_sendDataFromTBEToCache;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ transition(MI, Fwd_GETX, II) {
+ q_sendExclusiveDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({SI, OI}, Fwd_GETS) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({SI, OI}, Fwd_DMA) {
+ q_sendDataFromTBEToCache;
+ ub_dmaUnblockL2Cache;
+ l_popForwardQueue;
+ }
+
+ transition(OI, Fwd_GETX, II) {
+ q_sendExclusiveDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({SI, OI, MI}, Writeback_Ack_Data, I) {
+ qq_sendWBDataFromTBEToL2; // always send data
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition({SI, OI, MI}, Writeback_Ack, I) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition({MI, OI}, Writeback_Nack, OI) {
+ // FIXME: This might cause deadlock by re-using the writeback
+ // channel, we should handle this case differently.
+ dd_issuePUTO;
+ n_popResponseQueue;
+ }
+
+ // Transitions from II
+ transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ // transition({II, SI}, Writeback_Nack, I) {
+ transition(II, Writeback_Nack, I) {
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(SI, Writeback_Nack) {
+ dd_issuePUTS;
+ n_popResponseQueue;
+ }
+
+ transition(II, Inv) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(SI, Inv, II) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L2Cache, "Token protocol")
+: CacheMemory * L2cache;
+ Cycles response_latency := 1;
+ Cycles request_latency := 1;
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="0",
+ vnet_type="request"; // this L2 bank -> a local L1
+ MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="1",
+ vnet_type="request"; // this L2 bank -> mod-directory
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="2",
+ vnet_type="response"; // this L2 bank -> a local L1 || mod-directory
+
+ // FROM the network to this local bank of L2 cache
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="0",
+ vnet_type="request"; // a local L1 -> this L2 bank, Lets try this???
+ MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="1",
+ vnet_type="request"; // mod-directory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="2",
+ vnet_type="response"; // a local L1 || mod-directory -> this L2 bank
+
+ MessageBuffer * triggerQueue;
+{
+ // STATES
+ state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
+
+ // Stable states
+ NP, AccessPermission:Invalid, desc="Not Present";
+ I, AccessPermission:Invalid, desc="Invalid";
+ ILS, AccessPermission:Invalid, desc="Idle/NP, but local sharers exist";
+ ILX, AccessPermission:Invalid, desc="Idle/NP, but local exclusive exists";
+ ILO, AccessPermission:Invalid, desc="Idle/NP, but local owner exists";
+ ILOX, AccessPermission:Invalid, desc="Idle/NP, but local owner exists and chip is exclusive";
+ ILOS, AccessPermission:Invalid, desc="Idle/NP, but local owner exists and local sharers as well";
+ ILOSX, AccessPermission:Invalid, desc="Idle/NP, but local owner exists, local sharers exist, chip is exclusive ";
+ S, AccessPermission:Read_Only, desc="Shared, no local sharers";
+ O, AccessPermission:Read_Only, desc="Owned, no local sharers";
+ OLS, AccessPermission:Read_Only, desc="Owned with local sharers";
+ OLSX, AccessPermission:Read_Only, desc="Owned with local sharers, chip is exclusive";
+ SLS, AccessPermission:Read_Only, desc="Shared with local sharers";
+ M, AccessPermission:Read_Write, desc="Modified";
+
+ // Transient States
+
+ IFGX, AccessPermission:Busy, desc="Blocked, forwarded global GETX to local owner/exclusive. No other on-chip invs needed";
+ IFGS, AccessPermission:Busy, desc="Blocked, forwarded global GETS to local owner";
+ ISFGS, AccessPermission:Busy, desc="Blocked, forwarded global GETS to local owner, local sharers exist";
+ IFGXX, AccessPermission:Busy, desc="Blocked, forwarded global GETX to local owner but may need acks from other sharers";
+ OLSF, AccessPermission:Busy, desc="Blocked, got Fwd_GETX with local sharers, waiting for local inv acks";
+
+ // writebacks
+ ILOW, AccessPermission:Busy, desc="local WB request, was ILO";
+ ILOXW, AccessPermission:Busy, desc="local WB request, was ILOX";
+ ILOSW, AccessPermission:Busy, desc="local WB request, was ILOS";
+ ILOSXW, AccessPermission:Busy, desc="local WB request, was ILOSX";
+ SLSW, AccessPermission:Busy, desc="local WB request, was SLS";
+ OLSW, AccessPermission:Busy, desc="local WB request, was OLS";
+ ILSW, AccessPermission:Busy, desc="local WB request, was ILS";
+ IW, AccessPermission:Busy, desc="local WB request from only sharer, was ILS";
+ OW, AccessPermission:Busy, desc="local WB request from only sharer, was OLS";
+ SW, AccessPermission:Busy, desc="local WB request from only sharer, was SLS";
+ OXW, AccessPermission:Busy, desc="local WB request from only sharer, was OLSX";
+ OLSXW, AccessPermission:Busy, desc="local WB request from sharer, was OLSX";
+ ILXW, AccessPermission:Busy, desc="local WB request, was ILX";
+
+ IFLS, AccessPermission:Busy, desc="Blocked, forwarded local GETS to _some_ local sharer";
+ IFLO, AccessPermission:Busy, desc="Blocked, forwarded local GETS to local owner";
+ IFLOX, AccessPermission:Busy, desc="Blocked, forwarded local GETS to local owner but chip is exclusive";
+ IFLOXX, AccessPermission:Busy, desc="Blocked, forwarded local GETX to local owner/exclusive, chip is exclusive";
+ IFLOSX, AccessPermission:Busy, desc="Blocked, forwarded local GETS to local owner w/ other sharers, chip is exclusive";
+ IFLXO, AccessPermission:Busy, desc="Blocked, forwarded local GETX to local owner with other sharers, chip is exclusive";
+
+ IGS, AccessPermission:Busy, desc="Semi-blocked, issued local GETS to directory";
+ IGM, AccessPermission:Busy, desc="Blocked, issued local GETX to directory. Need global acks and data";
+ IGMLS, AccessPermission:Busy, desc="Blocked, issued local GETX to directory but may need to INV local sharers";
+ IGMO, AccessPermission:Busy, desc="Blocked, have data for local GETX but need all acks";
+ IGMIO, AccessPermission:Busy, desc="Blocked, issued local GETX, local owner with possible local sharer, may need to INV";
+ OGMIO, AccessPermission:Busy, desc="Blocked, issued local GETX, was owner, may need to INV";
+ IGMIOF, AccessPermission:Busy, desc="Blocked, issued local GETX, local owner, waiting for global acks, got Fwd_GETX";
+ IGMIOFS, AccessPermission:Busy, desc="Blocked, issued local GETX, local owner, waiting for global acks, got Fwd_GETS";
+ OGMIOF, AccessPermission:Busy, desc="Blocked, issued local GETX, was owner, waiting for global acks, got Fwd_GETX";
+
+ II, AccessPermission:Busy, desc="Blocked, handling invalidations";
+ MM, AccessPermission:Busy, desc="Blocked, was M satisfying local GETX";
+ SS, AccessPermission:Busy, desc="Blocked, was S satisfying local GETS";
+ OO, AccessPermission:Busy, desc="Blocked, was O satisfying local GETS";
+ OLSS, AccessPermission:Busy, desc="Blocked, satisfying local GETS";
+ OLSXS, AccessPermission:Busy, desc="Blocked, satisfying local GETS";
+ SLSS, AccessPermission:Busy, desc="Blocked, satisfying local GETS";
+
+ OI, AccessPermission:Busy, desc="Blocked, doing writeback, was O";
+ MI, AccessPermission:Busy, desc="Blocked, doing writeback, was M";
+ MII, AccessPermission:Busy, desc="Blocked, doing writeback, was M, got Fwd_GETX";
+ OLSI, AccessPermission:Busy, desc="Blocked, doing writeback, was OLS";
+ ILSI, AccessPermission:Busy, desc="Blocked, doing writeback, was OLS got Fwd_GETX";
+
+ // DMA blocking states
+ ILOSD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
+ ILOSXD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
+ ILOD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
+ ILXD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
+ ILOXD, AccessPermission:Busy, desc="Blocked, waiting for DMA ack";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+
+ // Requests
+ L1_GETS, desc="local L1 GETS request";
+ L1_GETX, desc="local L1 GETX request";
+ L1_PUTO, desc="local owner wants to writeback";
+ L1_PUTX, desc="local exclusive wants to writeback";
+ L1_PUTS_only, desc="only local sharer wants to writeback";
+ L1_PUTS, desc="local sharer wants to writeback";
+ Fwd_GETX, desc="A GetX from another processor";
+ Fwd_GETS, desc="A GetS from another processor";
+ Fwd_DMA, desc="A request from DMA";
+ Own_GETX, desc="A GetX from this node";
+ Inv, desc="Invalidations from the directory";
+
+ // Responses
+ IntAck, desc="Received an ack message";
+ ExtAck, desc="Received an ack message";
+ All_Acks, desc="Received all ack messages";
+ Data, desc="Received a data message, responder has a shared copy";
+ Data_Exclusive, desc="Received a data message";
+ L1_WBCLEANDATA, desc="Writeback from L1, with data";
+ L1_WBDIRTYDATA, desc="Writeback from L1, with data";
+
+ Writeback_Ack, desc="Writeback O.K. from directory";
+ Writeback_Nack, desc="Writeback not O.K. from directory";
+
+ Unblock, desc="Local L1 is telling L2 dir to unblock";
+ Exclusive_Unblock, desc="Local L1 is telling L2 dir to unblock";
+
+ DmaAck, desc="DMA ack from local L1";
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ NetDest Sharers, desc="Set of the internal processors that want the block in shared state";
+ MachineID Owner, desc="ID of the L1 cache to forward the block to once we get a response";
+ bool OwnerValid, default="false", desc="true if Owner means something";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+
+ structure(DirEntry, desc="...", interface="AbstractEntry") {
+ NetDest Sharers, desc="Set of the internal processors that want the block in shared state";
+ MachineID Owner, desc="ID of the L1 cache to forward the block to once we get a response";
+ bool OwnerValid, default="false", desc="true if Owner means something";
+ State DirState, desc="directory state";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Addr addr, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ Addr PC, desc="Program counter of request";
+ DataBlock DataBlk, desc="Buffer for the data block";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+
+ int NumExtPendingAcks, default="0", desc="Number of global acks/data messages waiting for";
+ int NumIntPendingAcks, default="0", desc="Number of global acks/data messages waiting for";
+ int Fwd_GETX_ExtAcks, default="0", desc="Number of acks that requestor will need";
+ int Local_GETX_IntAcks, default="0", desc="Number of acks that requestor will need";
+
+ NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ NetDest Fwd_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
+ MachineID Fwd_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ structure(PerfectCacheMemory, external = "yes") {
+ void allocate(Addr);
+ void deallocate(Addr);
+ DirEntry lookup(Addr);
+ bool isTagPresent(Addr);
+ }
+
+ TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
+ PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+ void wakeUpAllBuffers(Addr a);
+
+ // Latency for responses that fetch data from cache
+ Cycles cacheResponseLatency() {
+ if (L2cache.getTagLatency() > response_latency) {
+ return L2cache.getTagLatency();
+ }
+ else {
+ return response_latency;
+ }
+ }
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ return static_cast(Entry, "pointer", L2cache[address]);
+ }
+
+ bool isDirTagPresent(Addr addr) {
+ return (localDirectory.isTagPresent(addr) );
+ }
+
+ DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
+ return localDirectory.lookup(address);
+ }
+
+ bool isOnlySharer(Entry cache_entry, Addr addr, MachineID shar_id) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ if (cache_entry.Sharers.count() > 1) {
+ return false;
+ }
+ else if (cache_entry.Sharers.count() == 1) {
+ if (cache_entry.Sharers.isElement(shar_id)) {
+ return true;
+ }
+ else {
+ return false; // something happened which should cause this PUTS to be nacked
+ }
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ else if (localDirectory.isTagPresent(addr)){
+ DirEntry dir_entry := getDirEntry(addr);
+ if (dir_entry.Sharers.count() > 1) {
+ return false;
+ }
+ else if (dir_entry.Sharers.count() == 1) {
+ if (dir_entry.Sharers.isElement(shar_id)) {
+ return true;
+ }
+ else {
+ return false; // something happened which should cause this PUTS to be nacked
+ }
+ }
+ else {
+ return false;
+ }
+ }
+ else {
+ // shouldn't happen unless L1 issues PUTS before unblock received
+ return false;
+ }
+ }
+
+ void copyCacheStateToDir(Entry cache_entry, Addr addr) {
+ assert(localDirectory.isTagPresent(addr) == false);
+ assert(is_valid(cache_entry));
+ localDirectory.allocate(addr);
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.DirState := cache_entry.CacheState;
+ dir_entry.Sharers := cache_entry.Sharers;
+ dir_entry.Owner := cache_entry.Owner;
+ dir_entry.OwnerValid := cache_entry.OwnerValid;
+
+ }
+
+ void copyDirToCache(Entry cache_entry, Addr addr) {
+ assert(is_valid(cache_entry));
+ DirEntry dir_entry := getDirEntry(addr);
+ cache_entry.Sharers := dir_entry.Sharers;
+ cache_entry.Owner := dir_entry.Owner;
+ cache_entry.OwnerValid := dir_entry.OwnerValid;
+ }
+
+
+ void recordLocalSharerInDir(Entry cache_entry, Addr addr, MachineID shar_id) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ cache_entry.Sharers.add(shar_id);
+ }
+ else {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.clear();
+ dir_entry.OwnerValid := false;
+ }
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.add(shar_id);
+ }
+ }
+
+ void recordNewLocalExclusiveInDir(Entry cache_entry, Addr addr, MachineID exc_id) {
+
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ cache_entry.Sharers.clear();
+ cache_entry.OwnerValid := true;
+ cache_entry.Owner := exc_id;
+ }
+ else {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ }
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.clear();
+ dir_entry.OwnerValid := true;
+ dir_entry.Owner := exc_id;
+ }
+ }
+
+ void removeAllLocalSharersFromDir(Entry cache_entry, Addr addr) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ cache_entry.Sharers.clear();
+ cache_entry.OwnerValid := false;
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.clear();
+ dir_entry.OwnerValid := false;
+ }
+ }
+
+ void removeSharerFromDir(Entry cache_entry, Addr addr, MachineID sender) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ cache_entry.Sharers.remove(sender);
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.remove(sender);
+ }
+ }
+
+ void removeOwnerFromDir(Entry cache_entry, Addr addr, MachineID sender) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ cache_entry.OwnerValid := false;
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.OwnerValid := false;
+ }
+ }
+
+ bool isLocalSharer(Entry cache_entry, Addr addr, MachineID shar_id) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return cache_entry.Sharers.isElement(shar_id);
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.Sharers.isElement(shar_id);
+ }
+ }
+
+ NetDest getLocalSharers(Entry cache_entry, Addr addr) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return cache_entry.Sharers;
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.Sharers;
+ }
+ }
+
+ MachineID getLocalOwner(Entry cache_entry, Addr addr) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return cache_entry.Owner;
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.Owner;
+ }
+ }
+
+ int countLocalSharers(Entry cache_entry, Addr addr) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return cache_entry.Sharers.count();
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.Sharers.count();
+ }
+ }
+
+ bool isLocalOwnerValid(Entry cache_entry, Addr addr) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ return cache_entry.OwnerValid;
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.OwnerValid;
+ }
+ }
+
+ int countLocalSharersExceptRequestor(Entry cache_entry, Addr addr, MachineID requestor) {
+ if (is_valid(cache_entry)) {
+ assert (localDirectory.isTagPresent(addr) == false);
+ if (cache_entry.Sharers.isElement(requestor)) {
+ return ( cache_entry.Sharers.count() - 1 );
+ }
+ else {
+ return cache_entry.Sharers.count();
+ }
+ }
+ else {
+ DirEntry dir_entry := getDirEntry(addr);
+ if (dir_entry.Sharers.isElement(requestor)) {
+ return ( dir_entry.Sharers.count() - 1 );
+ }
+ else {
+ return dir_entry.Sharers.count();
+ }
+ }
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ } else if (isDirTagPresent(addr)) {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.DirState;
+ } else {
+ return State:NP;
+ }
+ }
+
+ std::string getCoherenceRequestTypeStr(CoherenceRequestType type) {
+ return CoherenceRequestType_to_string(type);
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ assert((localDirectory.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
+
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (
+ (state == State:M) ||
+ (state == State:O) ||
+ (state == State:S) ||
+ (state == State:OLS) ||
+ (state == State:SLS) ||
+ (state == State:OLSX) ||
+ (state == State:SLS)
+ ) {
+ assert(is_valid(cache_entry));
+ }
+ else if (
+ (state == State:ILS) ||
+ (state == State:ILX) ||
+ (state == State:ILO) ||
+ (state == State:ILOX) ||
+ (state == State:ILOS) ||
+ (state == State:ILOSX)
+ ) {
+ // assert(isCacheTagPresent(addr) == false);
+ }
+
+ if (is_valid(cache_entry)) {
+ if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
+ ((cache_entry.CacheState != State:S) && (state == State:S)) ||
+ ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
+ cache_entry.CacheState := state;
+ // disable Coherence Checker for now
+ // sequencer.checkCoherence(addr);
+ }
+ else {
+ cache_entry.CacheState := state;
+ }
+ }
+ else if (localDirectory.isTagPresent(addr)) {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.DirState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
+ return L2Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(cache_entry.CacheState));
+ return L2Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L2Cache_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
+ out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
+
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_Acks, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToL2Cache, rank=2) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ if (in_msg.SenderMachine == MachineType:L2Cache) {
+ trigger(Event:ExtAck, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ }
+ else {
+ trigger(Event:IntAck, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data_Exclusive, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ DPRINTF(RubySlicc, "Received Unblock from L1 addr: %x\n", in_msg.addr);
+ trigger(Event:Unblock, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
+ trigger(Event:Exclusive_Unblock, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
+ trigger(Event:DmaAck, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache, rank=1) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
+ if (in_msg.Requestor == machineID) {
+ trigger(Event:Own_GETX, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ trigger(Event:Fwd_GETX, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:Fwd_GETS, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
+ trigger(Event:Fwd_DMA, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Inv, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache, rank=0) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:L1_GETX, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:L1_GETS, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:L1_PUTO, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ trigger(Event:L1_PUTX, in_msg.addr,
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:PUTS) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
+ trigger(Event:L1_PUTS_only, in_msg.addr,
+ cache_entry, TBEs[in_msg.addr]);
+ }
+ else {
+ trigger(Event:L1_PUTS, in_msg.addr,
+ cache_entry, TBEs[in_msg.addr]);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (is_invalid(cache_entry) &&
+ L2cache.cacheAvail(in_msg.addr) == false) {
+ trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
+ getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ }
+ else {
+ trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
+ cache_entry, TBEs[in_msg.addr]);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_CLEAN_DATA) {
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (is_invalid(cache_entry) &&
+ L2cache.cacheAvail(in_msg.addr) == false) {
+ trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
+ getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ }
+ else {
+ trigger(Event:L1_WBCLEANDATA, in_msg.addr,
+ cache_entry, TBEs[in_msg.addr]);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="issue local request globally") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(a_issueGETX, "\a", desc="issue local request globally") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ }
+ }
+ }
+
+ action(b_issuePUTX, "b", desc="Issue PUTX") {
+ enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTX;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(b_issuePUTO, "\b", desc="Issue PUTO") {
+ enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTO;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ /* PUTO, but local sharers exist */
+ action(b_issuePUTO_ls, "\bb", desc="Issue PUTO") {
+ enqueue(globalRequestNetwork_out, RequestMsg, request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTO_SHARERS;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(c_sendDataFromTBEToL1GETS, "c", desc="Send data from TBE to L1 requestors in TBE") {
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(tbe.L1_GetS_IDs);
+ out_msg.DataBlk := tbe.DataBlk;
+ // out_msg.Dirty := tbe.Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, tbe.DataBlk);
+ }
+
+ action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") {
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(tbe.L1_GetX_ID);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Acks := tbe.Local_GETX_IntAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, tbe.DataBlk);
+ }
+
+ action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") {
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(tbe.L1_GetS_IDs);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(c_sendDataFromTBEToFwdGETX, "cc", desc="Send data from TBE to external GETX") {
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(tbe.Fwd_GetX_ID);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+
+ action(cd_sendDataFromTBEToFwdDma, "cd", desc="Send data from TBE to external GETX") {
+ assert(is_valid(tbe));
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ // out_msg.Dirty := tbe.Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, tbe.DataBlk);
+ }
+
+ action(c_sendDataFromTBEToFwdGETS, "ccc", desc="Send data from TBE to external GETX") {
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(tbe.Fwd_GetS_IDs);
+ out_msg.DataBlk := tbe.DataBlk;
+ // out_msg.Dirty := tbe.Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, tbe.DataBlk);
+ }
+
+ action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") {
+ assert(is_valid(tbe));
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(tbe.Fwd_GetS_IDs);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Acks := tbe.Fwd_GETX_ExtAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, tbe.DataBlk);
+ }
+
+ action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") {
+ assert(is_valid(cache_entry));
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ }
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ }
+
+ action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") {
+ assert(is_valid(cache_entry));
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ out_msg.Acks := tbe.Local_GETX_IntAcks;
+ }
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ }
+
+ action(dd_sendDataToFwdGETX, "dd", desc="send data") {
+ assert(is_valid(cache_entry));
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.Acks := in_msg.Acks;
+ }
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ }
+
+
+ action(dd_sendDataToFwdGETS, "\dd", desc="send data") {
+ assert(is_valid(cache_entry));
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
+ // shared data should be clean
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ }
+
+ action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") {
+ assert(is_valid(cache_entry));
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_sendAck, "e", desc="Send ack with the tokens we've collected thus far.") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+
+ out_msg.Destination.add( tbe.Fwd_GetX_ID);
+ out_msg.Acks := 0 - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(e_sendAckToL1Requestor, "\e", desc="Send ack with the tokens we've collected thus far.") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 0 - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+
+ action(e_sendAckToL1RequestorFromTBE, "eee", desc="Send ack with the tokens we've collected thus far.") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(tbe.L1_GetX_ID);
+ out_msg.Acks := 0 - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(ee_sendLocalInv, "\ee", desc="Send local invalidates") {
+ assert(is_valid(tbe));
+ tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
+ DPRINTF(RubySlicc, "Address: %#x, Local Sharers: %s, Pending Acks: %d\n",
+ address, getLocalSharers(cache_entry, address),
+ tbe.NumIntPendingAcks);
+ if (isLocalOwnerValid(cache_entry, address)) {
+ tbe.NumIntPendingAcks := tbe.NumIntPendingAcks + 1;
+ DPRINTF(RubySlicc, "%s\n", getLocalOwner(cache_entry, address));
+ }
+
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
+ if (isLocalOwnerValid(cache_entry, address))
+ {
+ out_msg.Destination.add(getLocalOwner(cache_entry, address));
+ }
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+
+ action(ee_sendLocalInvSharersOnly, "\eee", desc="Send local invalidates to sharers if they exist") {
+
+ // assert(countLocalSharers(address) > 0);
+ assert(is_valid(tbe));
+ tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
+
+ if (countLocalSharers(cache_entry, address) > 0) {
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+
+ action(ee_addLocalIntAck, "e\ee", desc="add a local ack to wait for") {
+ assert(is_valid(tbe));
+ tbe.NumIntPendingAcks := tbe.NumIntPendingAcks + 1;
+ }
+
+ action(ee_issueLocalInvExceptL1Requestor, "\eeee", desc="Send local invalidates to sharers if they exist") {
+ peek(L1requestNetwork_in, RequestMsg) {
+
+// assert(countLocalSharers(address) > 0);
+ if (countLocalSharers(cache_entry, address) == 0) {
+ tbe.NumIntPendingAcks := 0;
+ }
+ else {
+
+ if (isLocalSharer(cache_entry, address, in_msg.Requestor)) {
+ tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address) - 1;
+ }
+ else {
+ tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
+ }
+
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+ }
+
+ action(ee_issueLocalInvExceptL1RequestorInTBE, "\eeeeee", desc="Send local invalidates to sharers if they exist") {
+ assert(is_valid(tbe));
+ if (countLocalSharers(cache_entry, address) == 0) {
+ tbe.NumIntPendingAcks := 0;
+ }
+ else {
+ if (isLocalSharer(cache_entry, address, tbe.L1_GetX_ID)) {
+ tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address) - 1;
+ }
+ else {
+ tbe.NumIntPendingAcks := countLocalSharers(cache_entry, address);
+ }
+ }
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := tbe.L1_GetX_ID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.addNetDest(getLocalSharers(cache_entry, address));
+ out_msg.Destination.remove(tbe.L1_GetX_ID);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+
+
+ action(f_sendUnblock, "f", desc="Send unblock to global directory") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+
+ action(f_sendExclusiveUnblock, "\f", desc="Send unblock to global directory") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+
+ action(g_recordLocalSharer, "g", desc="Record new local sharer from unblock message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ recordLocalSharerInDir(cache_entry, in_msg.addr, in_msg.Sender);
+ }
+ }
+
+ action(g_recordLocalExclusive, "\g", desc="Record new local exclusive sharer from unblock message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ recordNewLocalExclusiveInDir(cache_entry, address, in_msg.Sender);
+ }
+ }
+
+ action(gg_clearLocalSharers, "gg", desc="Clear local sharers") {
+ removeAllLocalSharersFromDir(cache_entry, address);
+ }
+
+ action(gg_clearSharerFromL1Response, "\gg", desc="Clear sharer from L1 response queue") {
+ peek(responseNetwork_in, ResponseMsg) {
+ removeSharerFromDir(cache_entry, in_msg.addr, in_msg.Sender);
+ }
+ }
+
+ action(gg_clearSharerFromL1Request, "clsl1r", desc="Clear sharer from L1 request queue") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ removeSharerFromDir(cache_entry, in_msg.addr, in_msg.Requestor);
+ }
+ }
+
+ action(gg_clearOwnerFromL1Request, "clol1r", desc="Clear owner from L1 request queue") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ removeOwnerFromDir(cache_entry, in_msg.addr, in_msg.Requestor);
+ }
+ }
+
+ action(h_countLocalSharersExceptRequestor, "h", desc="counts number of acks needed for L1 GETX") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.Local_GETX_IntAcks := countLocalSharersExceptRequestor(cache_entry, address, in_msg.Requestor);
+ }
+ }
+
+ action(h_clearIntAcks, "\h", desc="clear IntAcks") {
+ assert(is_valid(tbe));
+ tbe.Local_GETX_IntAcks := 0;
+ }
+
+ action(hh_countLocalSharersExceptL1GETXRequestorInTBE, "hh", desc="counts number of acks needed for L1 GETX") {
+ assert(is_valid(tbe));
+ tbe.Local_GETX_IntAcks := countLocalSharersExceptRequestor(cache_entry, address, tbe.L1_GetX_ID);
+ }
+
+ action(i_copyDataToTBE, "\i", desc="Copy data from response queue to TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ }
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ if(is_valid(cache_entry)) {
+ tbe.DataBlk := cache_entry.DataBlk;
+ tbe.Dirty := cache_entry.Dirty;
+ }
+ tbe.NumIntPendingAcks := 0; // default value
+ tbe.NumExtPendingAcks := 0; // default value
+ tbe.Fwd_GetS_IDs.clear();
+ tbe.L1_GetS_IDs.clear();
+ }
+
+
+
+ action(j_forwardGlobalRequestToLocalOwner, "j", desc="Forward external request to local owner") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
+ out_msg.Type := in_msg.Type;
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.Acks := 0 - 1;
+ }
+ }
+ }
+
+ action(jd_forwardDmaRequestToLocalOwner, "jd", desc="Forward dma request to local owner") {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := in_msg.RequestorMachine;
+ out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
+ out_msg.Type := in_msg.Type;
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.Acks := 0 - 1;
+ }
+ }
+ }
+
+
+ action(k_forwardLocalGETSToLocalSharer, "k", desc="Forward local request to local sharer/owner") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ // should randomize this so one node doesn't get abused more than others
+ DirEntry dir_entry := getDirEntry(in_msg.addr);
+ out_msg.Destination.add(dir_entry.Sharers.smallestElement(MachineType:L1Cache));
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(k_forwardLocalGETXToLocalOwner, "\k", desc="Forward local request to local owner") {
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := tbe.L1_GetX_ID;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ DirEntry dir_entry := getDirEntry(address);
+ out_msg.Destination.add(dir_entry.Owner);
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.Acks := 1 + tbe.Local_GETX_IntAcks;
+ }
+ }
+
+ // same as previous except that it assumes to TBE is present to get number of acks
+ action(kk_forwardLocalGETXToLocalExclusive, "kk", desc="Forward local request to local owner") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ out_msg.Acks := 1;
+ }
+ }
+ }
+
+ action(kk_forwardLocalGETSToLocalOwner, "\kk", desc="Forward local request to local owner") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := MachineType:L1Cache;
+ out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+
+ action(l_writebackAckNeedData, "l", desc="Send writeback ack to L1 requesting data") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( responseNetwork_out, ResponseMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := CoherenceResponseType:WB_ACK_DATA;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(l_writebackAckDropData, "\l", desc="Send writeback ack to L1 indicating to drop data") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( responseNetwork_out, ResponseMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := CoherenceResponseType:WB_ACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(ll_writebackNack, "\ll", desc="Send writeback nack to L1") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue( responseNetwork_out, ResponseMsg, response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := CoherenceResponseType:WB_NACK;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(m_popRequestQueue, "m", desc="Pop request queue.") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(m_decrementNumberOfMessagesInt, "\m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.NumIntPendingAcks := tbe.NumIntPendingAcks + in_msg.Acks;
+ }
+ }
+
+ action(m_decrementNumberOfMessagesExt, "\mmm", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.NumExtPendingAcks := tbe.NumExtPendingAcks - in_msg.Acks;
+ }
+ }
+
+ action(mm_decrementNumberOfMessagesExt, "\mm", desc="Decrement the number of messages for which we're waiting") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.NumExtPendingAcks := tbe.NumExtPendingAcks - in_msg.Acks;
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(n_popTriggerQueue, "\n", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
+ L1requestNetwork_in.dequeue(clockEdge());
+ }
+
+
+ action(o_checkForIntCompletion, "\o", desc="Check if we have received all the messages required for completion") {
+ assert(is_valid(tbe));
+ if (tbe.NumIntPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(o_checkForExtCompletion, "\oo", desc="Check if we have received all the messages required for completion") {
+ assert(is_valid(tbe));
+ if (tbe.NumExtPendingAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+
+ action( qq_sendDataFromTBEToMemory, "qq", desc="Send data from TBE to directory") {
+ enqueue(globalRequestNetwork_out, RequestMsg, response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:L2Cache;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ if (tbe.Dirty) {
+ out_msg.Type := CoherenceRequestType:WRITEBACK_DIRTY_DATA;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceRequestType:WRITEBACK_CLEAN_ACK;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action( r_setMRU, "\rrr", desc="manually set the MRU bit for cache line" ) {
+ if(is_valid(cache_entry)) {
+ L2cache.setMRU(address);
+ }
+ }
+
+ action( s_recordGetXL1ID, "ss", desc="record local GETX requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.L1_GetX_ID := in_msg.Requestor;
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action( s_recordGetSL1ID, "\ss", desc="record local GETS requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.L1_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+ action(t_recordFwdXID, "t", desc="record global GETX requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.Fwd_GetX_ID := in_msg.Requestor;
+ tbe.Fwd_GETX_ExtAcks := in_msg.Acks;
+ }
+ }
+
+ action(t_recordFwdSID, "\t", desc="record global GETS requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.Fwd_GetS_IDs.clear();
+ tbe.Fwd_GetS_IDs.add(in_msg.Requestor);
+ }
+ }
+
+
+ action(u_writeCleanDataToCache, "wCd", desc="Write clean data to cache") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ assert(cache_entry.Dirty == false);
+ }
+ }
+
+ action(u_writeDirtyDataToCache, "wDd", desc="Write dirty data to cache") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ cache_entry.Dirty := true;
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cache.deallocate(address);
+ unset_cache_entry();
+ }
+
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cache.demand_misses;
+ }
+
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cache.demand_hits;
+ }
+
+ action(y_copyCacheStateToDir, "y", desc="Copy cache state to directory state") {
+ copyCacheStateToDir(cache_entry, address);
+ }
+
+ action(y_copyDirToCacheAndRemove, "/y", desc="Copy dir state to cache and remove") {
+ copyDirToCache(cache_entry, address);
+ localDirectory.deallocate(address);
+ }
+
+ action(zz_recycleGlobalRequestQueue, "\zglb", desc="Send the head of the mandatory queue to the back of the queue.") {
+ peek(requestNetwork_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(zz_recycleL1RequestQueue, "\zl1", desc="Send the head of the mandatory queue to the back of the queue.") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ L1requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(st_stallAndWaitL1RequestQueue, "st", desc="Stall and wait on the address") {
+ stall_and_wait(L1requestNetwork_in, address);
+ }
+
+ action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
+ wakeUpAllBuffers(address);
+ }
+
+ action(da_sendDmaAckUnblock, "da", desc="Send dma ack to global directory") {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DMA_ACK;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:L2Cache;
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ transition({II, IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX, OLSXS, IGS, IGM, IGMLS, IGMO, IGMIO, OGMIO, IGMIOF, OGMIOF, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {L1_PUTO, L1_PUTS, L1_PUTS_only, L1_PUTX}) {
+ st_stallAndWaitL1RequestQueue;
+ }
+
+ transition({II, IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX, OLSXS, IGS, IGM, IGMLS, IGMO, IGMIO, OGMIO, IGMIOF, OGMIOF, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {L1_GETX, L1_GETS}) {
+ st_stallAndWaitL1RequestQueue;
+ }
+
+ transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, ILXW, OW, SW, OXW, OLSXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, IGS, IGM, IGMLS, IGMO, MM, SS, OO, OI, MI, MII, OLSI, ILSI, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, L2_Replacement) {
+ zz_recycleL1RequestQueue;
+ }
+
+ transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, IGS, IGM, MM, SS, OO, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {Fwd_GETX, Fwd_GETS, Fwd_DMA}) {
+ zz_recycleGlobalRequestQueue;
+ }
+
+ transition({OGMIO, IGMIO, IGMO}, Fwd_DMA) {
+ zz_recycleGlobalRequestQueue;
+ }
+
+ transition({IFGX, IFGS, ISFGS, IFGXX, IFLXO, ILOW, ILOXW, ILOSW, ILOSXW, SLSW, OLSW, ILSW, IW, OW, SW, OXW, OLSXW, ILXW, IFLS, IFLO, IFLOX, IFLOXX, IFLOSX,OLSXS, MM, SS, OO, SLSS, OLSS, OLSF, IGMIOFS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {Inv}) {
+ zz_recycleGlobalRequestQueue;
+ }
+
+ transition({IGM, IGS, ILOSD, ILOSXD, ILOD, ILXD, ILOXD}, {Own_GETX}) {
+ zz_recycleGlobalRequestQueue;
+ }
+
+ // must happened because we forwarded GETX to local exclusive trying to do wb
+ transition({I, M, O, ILS, ILOX, OLS, OLSX, SLS, S}, L1_PUTX) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition({M}, {L1_PUTS, L1_PUTO} ) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILS, OLSX}, L1_PUTO){
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+// happened if we forwarded GETS to exclusive who tried to do writeback
+// ?? should we just Nack these instead? Could be a bugs here
+ transition(ILO, L1_PUTX, ILOW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ // this can happen if we forwarded a L1_GETX to exclusiver after it issued a PUTX
+ transition(ILOS, L1_PUTX, ILOSW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_PUTX, ILOSXW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ // must happened because we got Inv when L1 attempted PUTS
+ transition(I, L1_PUTS) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(I, L1_PUTO) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ // FORWARDED REQUESTS
+
+ transition({ILO, ILX, ILOX}, Fwd_GETS, IFGS) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition({ILOS, ILOSX}, Fwd_GETS, ISFGS) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(ILOS, Fwd_DMA, ILOSD) {
+ i_allocateTBE;
+ jd_forwardDmaRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(ILOSD, DmaAck, ILOS) {
+ s_deallocateTBE;
+ da_sendDmaAckUnblock;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOSX, Fwd_DMA, ILOSXD) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ jd_forwardDmaRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(ILOSXD, DmaAck, ILOSX) {
+ s_deallocateTBE;
+ da_sendDmaAckUnblock;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILO, Fwd_DMA, ILOD) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ jd_forwardDmaRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(ILOD, DmaAck, ILO) {
+ s_deallocateTBE;
+ da_sendDmaAckUnblock;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILX, Fwd_DMA, ILXD) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ jd_forwardDmaRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(ILXD, DmaAck, ILX) {
+ s_deallocateTBE;
+ da_sendDmaAckUnblock;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOX, Fwd_DMA, ILOXD) {
+ i_allocateTBE;
+ t_recordFwdSID;
+ jd_forwardDmaRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(ILOXD, DmaAck, ILOX) {
+ s_deallocateTBE;
+ da_sendDmaAckUnblock;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition({ILOS, ILOSX, ILO, ILX, ILOX, ILXW}, Data) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETS;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ }
+
+ transition(IFGS, Data, ILO) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETS;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ISFGS, Data, ILOS) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETS;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IFGS, Data_Exclusive, I) {
+ i_copyDataToTBE;
+ c_sendExclusiveDataFromTBEToFwdGETS;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+
+ transition({ILX, ILO, ILOX}, Fwd_GETX, IFGX) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(IFGX, {Data_Exclusive, Data}, I) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETX;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition({ILOSX, ILOS}, Fwd_GETX, IFGXX) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ j_forwardGlobalRequestToLocalOwner;
+ ee_sendLocalInvSharersOnly;
+ ee_addLocalIntAck;
+ m_popRequestQueue;
+ }
+
+
+ transition(IFGXX, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IFGXX, Data_Exclusive) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IFGXX, All_Acks, I) {
+ c_sendDataFromTBEToFwdGETX;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ n_popTriggerQueue;
+ wa_wakeUpDependents;
+ }
+
+
+ // transition({O, OX}, Fwd_GETX, I) {
+ transition(O, Fwd_GETX, I) {
+ dd_sendDataToFwdGETX;
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+ transition({O, OLS}, Fwd_GETS) {
+ dd_sendDataToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition({O, OLS}, Fwd_DMA) {
+ dd_sendDataToFwdGETS;
+ da_sendDmaAckUnblock;
+ m_popRequestQueue;
+ }
+
+ // transition({OLSX, OX}, Fwd_GETS, O) {
+ transition(OLSX, Fwd_GETS, OLS) {
+ dd_sendDataToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition(OLSX, Fwd_DMA) {
+ dd_sendDataToFwdGETS;
+ da_sendDmaAckUnblock;
+ m_popRequestQueue;
+ }
+
+ transition(M, Fwd_GETX, I) {
+ dd_sendDataToFwdGETX;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+ // MAKE THIS THE SAME POLICY FOR NOW
+
+ // transition(M, Fwd_GETS, O) {
+ // dd_sendDataToFwdGETS;
+ // m_popRequestQueue;
+ // }
+
+ transition(M, Fwd_GETS, I) {
+ dd_sendExclusiveDataToFwdGETS;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+ transition(M, Fwd_DMA) {
+ dd_sendExclusiveDataToFwdGETS;
+ da_sendDmaAckUnblock;
+ m_popRequestQueue;
+ }
+
+ transition({OLS, OLSX}, Fwd_GETX, OLSF) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ m_popRequestQueue;
+ }
+
+ transition(OLSF, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OLSF, All_Acks, I) {
+ c_sendDataFromTBEToFwdGETX;
+ gg_clearLocalSharers;
+ s_deallocateTBE;
+ rr_deallocateL2CacheBlock;
+ n_popTriggerQueue;
+ wa_wakeUpDependents;
+ }
+
+
+
+ // INVALIDATIONS FROM GLOBAL DIRECTORY
+
+ transition({IGM, IGS}, Inv) {
+ t_recordFwdXID;
+ e_sendAck;
+ m_popRequestQueue;
+ }
+
+ transition({I,NP}, Inv) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ e_sendAck;
+ s_deallocateTBE;
+ m_popRequestQueue;
+ }
+
+ // NEED INV for S state
+
+ transition({ILS, ILO, ILX}, Inv, II) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ gg_clearLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(SLS, Inv, II) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+ transition(II, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(II, All_Acks, I) {
+ e_sendAck;
+ s_deallocateTBE;
+ n_popTriggerQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(S, Inv, I) {
+ i_allocateTBE;
+ t_recordFwdXID;
+ e_sendAck;
+ s_deallocateTBE;
+ rr_deallocateL2CacheBlock;
+ m_popRequestQueue;
+ }
+
+
+ // LOCAL REQUESTS SATISFIED LOCALLY
+
+ transition(OLSX, L1_GETX, IFLOX) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ // count number of INVs needed that doesn't include requestor
+ h_countLocalSharersExceptRequestor;
+ // issue INVs to everyone except requestor
+ ee_issueLocalInvExceptL1Requestor;
+ d_sendDataToL1GETX
+ y_copyCacheStateToDir;
+ r_setMRU;
+ rr_deallocateL2CacheBlock;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(IFLOX, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(OLSX, L1_GETS, OLSXS) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSXS, Unblock, OLSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ // after this, can't get Fwd_GETX
+ transition(IGMO, Own_GETX) {
+ mm_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ m_popRequestQueue;
+
+ }
+
+
+ transition(ILX, L1_GETS, IFLOXX) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_GETS, IFLOSX) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILOS, ILO}, L1_GETS, IFLO) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_GETS, IFLS) {
+ k_forwardLocalGETSToLocalSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILX, ILOX}, L1_GETX, IFLOXX) {
+ kk_forwardLocalGETXToLocalExclusive;
+ e_sendAckToL1Requestor;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOX, L1_GETS, IFLOX) {
+ kk_forwardLocalGETSToLocalOwner;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(IFLOX, Unblock, ILOSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IFLS, Unblock, ILS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IFLOXX, Unblock, ILOSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IFLOSX, Unblock, ILOSX) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition({IFLOSX, IFLOXX}, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IFLO, Unblock, ILOS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+
+ transition(ILOSX, L1_GETX, IFLXO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ h_countLocalSharersExceptRequestor;
+ ee_issueLocalInvExceptL1Requestor;
+ k_forwardLocalGETXToLocalOwner;
+ e_sendAckToL1RequestorFromTBE;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(IFLXO, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ // LOCAL REQUESTS THAT MUST ISSUE
+
+ transition(NP, {L1_PUTS, L1_PUTX, L1_PUTO}) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition({NP, I}, L1_GETS, IGS) {
+ i_allocateTBE;
+ s_recordGetSL1ID;
+ a_issueGETS;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({NP, I}, L1_GETX, IGM) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_GETX, IGM) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ y_copyCacheStateToDir;
+ r_setMRU;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_GETX, IGMLS) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ // count number of INVs (just sharers?) needed that doesn't include requestor
+ h_countLocalSharersExceptRequestor;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(IGMLS, Inv) {
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ m_popRequestQueue;
+ }
+
+ transition(IGMLS, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMLS, All_Acks, IGM) {
+ gg_clearLocalSharers;
+ h_clearIntAcks;
+ e_sendAck;
+ n_popTriggerQueue;
+ }
+
+ // transition(IGMLS, ExtAck, IGMO) {
+ transition(IGMLS, ExtAck) {
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMLS, {Data, Data_Exclusive}, IGMO) {
+ ee_issueLocalInvExceptL1RequestorInTBE;
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+
+ transition(ILOS, L1_GETX, IGMIO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ // new exclusive happened while sharer attempted writeback
+ transition(ILX, {L1_PUTS, L1_PUTS_only, L1_PUTO}) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_PUTS) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLS, L1_GETX, OGMIO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ h_countLocalSharersExceptRequestor;
+ // COPY DATA FROM CACHE TO TBE (happens during i_allocateTBE)
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(OGMIO, Fwd_GETS) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition(ILO, L1_GETX, IGMIO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ // the following, of course, returns 0 sharers but do anyways for consistency
+ h_countLocalSharersExceptRequestor;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition({ILO, ILOX}, L1_PUTS) {
+ ll_writebackNack;
+ o_popL1RequestQueue;
+ }
+
+ transition(IGMIO, Fwd_GETX, IGMIOF) {
+ t_recordFwdXID;
+ j_forwardGlobalRequestToLocalOwner;
+ ee_sendLocalInvSharersOnly;
+ ee_addLocalIntAck;
+ m_popRequestQueue;
+ }
+
+ transition(IGMIO, Fwd_GETS, IGMIOFS) {
+ t_recordFwdSID;
+ j_forwardGlobalRequestToLocalOwner;
+ m_popRequestQueue;
+ }
+
+ transition(IGMIOFS, Data, IGMIO) {
+ i_copyDataToTBE;
+ c_sendDataFromTBEToFwdGETS;
+ n_popResponseQueue;
+ }
+
+ transition(OGMIO, Fwd_GETX, OGMIOF) {
+ t_recordFwdXID;
+ ee_sendLocalInvSharersOnly;
+ m_popRequestQueue;
+ }
+
+ transition(OGMIOF, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OGMIOF, All_Acks, IGM) {
+ gg_clearLocalSharers;
+ hh_countLocalSharersExceptL1GETXRequestorInTBE;
+ c_sendDataFromTBEToFwdGETX;
+ n_popTriggerQueue;
+ }
+
+ transition(IGMIOF, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMIOF, Data_Exclusive) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMIOF, All_Acks, IGM) {
+ gg_clearLocalSharers;
+ c_sendDataFromTBEToFwdGETX;
+ n_popTriggerQueue;
+ }
+
+ transition(IGMIO, All_Acks, IGMO) {
+ hh_countLocalSharersExceptL1GETXRequestorInTBE;
+ ee_issueLocalInvExceptL1RequestorInTBE;
+ k_forwardLocalGETXToLocalOwner;
+ e_sendAckToL1RequestorFromTBE;
+ n_popTriggerQueue;
+ }
+
+ transition(OGMIO, All_Acks, IGMO) {
+ ee_issueLocalInvExceptL1RequestorInTBE;
+ c_sendDataFromTBEToL1GETX;
+ n_popTriggerQueue;
+ }
+
+ transition({IGMIO, OGMIO}, Own_GETX) {
+ mm_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ m_popRequestQueue;
+
+ }
+
+ transition(IGM, {Data, Data_Exclusive}, IGMO) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition({IGM, IGMIO, OGMIO}, ExtAck) {
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGMO, ExtAck) {
+ m_decrementNumberOfMessagesExt;
+ o_checkForExtCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IGS, Data) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ c_sendDataFromTBEToL1GETS;
+ n_popResponseQueue;
+ }
+
+ transition(IGS, Data_Exclusive) {
+ i_copyDataToTBE;
+ m_decrementNumberOfMessagesExt;
+ c_sendExclusiveDataFromTBEToL1GETS;
+ n_popResponseQueue;
+ }
+
+ transition(IGS, Unblock, ILS) {
+ g_recordLocalSharer;
+ f_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IGS, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ f_sendExclusiveUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IGMO, All_Acks) {
+ c_sendDataFromTBEToL1GETX;
+ n_popTriggerQueue;
+ }
+
+ transition(IGMO, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ f_sendExclusiveUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+
+ transition(SLS, L1_GETX, IGMLS) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ // count number of INVs needed that doesn't include requestor
+ h_countLocalSharersExceptRequestor;
+ // issue INVs to everyone except requestor
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+
+ }
+
+ transition(SLS, L1_GETS, SLSS ) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(SLSS, Unblock, SLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+
+ transition(O, L1_GETX, IGMO) {
+ i_allocateTBE;
+ s_recordGetXL1ID;
+ a_issueGETX;
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLS, L1_GETS, OLSS) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSS, Unblock, OLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IGMO, Fwd_GETX, IGM) {
+ t_recordFwdXID;
+ c_sendDataFromTBEToFwdGETX;
+ m_popRequestQueue;
+
+ }
+
+ transition(IGMO, Fwd_GETS) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+
+ // LOCAL REQUESTS SATISFIED DIRECTLY BY L2
+
+ transition(M, L1_GETX, MM) {
+ i_allocateTBE;
+ // should count 0 of course
+ h_countLocalSharersExceptRequestor;
+ d_sendDataToL1GETX;
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ s_deallocateTBE;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(MM, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(M, L1_GETS, OO) {
+ i_allocateTBE;
+ // should count 0 of course
+ h_countLocalSharersExceptRequestor;
+ d_sendDataToL1GETX;
+ r_setMRU;
+ s_deallocateTBE;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_GETS, SS) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(SS, Unblock, SLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(O, L1_GETS, OO) {
+ d_sendDataToL1GETS;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(OO, Unblock, OLS) {
+ g_recordLocalSharer;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(OO, Exclusive_Unblock, ILX) {
+ g_recordLocalExclusive
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+
+ // L1 WRITEBACKS
+ transition(ILO, L1_PUTO, ILOW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOX, L1_PUTO, ILOXW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+
+ transition(ILOS, L1_PUTO, ILOSW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_PUTO, ILOSXW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+
+ // hmmm...keep data or drop. Just drop for now
+ transition(ILOS, L1_PUTS_only, ILOW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILSW, Unblock, ILS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOW, Unblock, ILO) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOSX, L1_PUTS_only, ILOXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOXW, Unblock, ILOX) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ // hmmm...keep data or drop. Just drop for now
+ transition(ILOS, L1_PUTS, ILOSW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSX, L1_PUTS, ILOSXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILOSW, Unblock, ILOS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOSXW, Unblock, ILOSX) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(SLS, L1_PUTS, SLSW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(SLS, L1_PUTS_only, SW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(SW, {Unblock}, S) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(OLS, L1_PUTS, OLSW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_PUTS, ILSW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILS, L1_PUTS_only, IW) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLS, L1_PUTS_only, OW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSX, L1_PUTS_only, OXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSX, L1_PUTS, OLSXW) {
+ l_writebackAckDropData;
+ o_popL1RequestQueue;
+ }
+
+ transition(OLSXW, {Unblock}, OLSX) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(OW, {Unblock}, O) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(OXW, {Unblock}, M) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILX, L1_PUTX, ILXW ) {
+ l_writebackAckNeedData;
+ o_popL1RequestQueue;
+ }
+
+ transition(ILXW, L1_WBDIRTYDATA, M) {
+ gg_clearLocalSharers;
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeDirtyDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ // clean writeback
+ transition(ILXW, L1_WBCLEANDATA, M) {
+ gg_clearLocalSharers;
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeCleanDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILXW, Unblock, ILX) {
+ // writeback canceled because L1 invalidated
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILSW, L1_WBCLEANDATA, SLS) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeCleanDataToCache;
+ gg_clearSharerFromL1Request;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(IW, L1_WBCLEANDATA, S) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ u_writeCleanDataToCache;
+ gg_clearSharerFromL1Request;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ // Owner can have dirty data
+ transition(ILOW, L1_WBDIRTYDATA, O) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeDirtyDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOW, L1_WBCLEANDATA, O) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeCleanDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOXW, L1_WBDIRTYDATA, M) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeDirtyDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOXW, L1_WBCLEANDATA, M) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeCleanDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOSW, L1_WBDIRTYDATA, OLS) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeDirtyDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOSW, L1_WBCLEANDATA, OLS) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeCleanDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOSXW, L1_WBDIRTYDATA, OLSX) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeDirtyDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILOSXW, L1_WBCLEANDATA, OLSX) {
+ vv_allocateL2CacheBlock;
+ y_copyDirToCacheAndRemove;
+ gg_clearOwnerFromL1Request;
+ u_writeCleanDataToCache;
+ o_popL1RequestQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(SLSW, {Unblock}, SLS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(OLSW, {Unblock}, OLS) {
+ gg_clearSharerFromL1Response;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+
+ // L2 WRITEBACKS
+ transition({I, S}, L2_Replacement, I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(ILS, L2_Replacement) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(ILX, L2_Replacement ) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({ILO, ILOS}, L2_Replacement ) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(SLS, L2_Replacement, ILS) {
+ y_copyCacheStateToDir;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition({OLS, OLSX}, L2_Replacement, OLSI) {
+ y_copyCacheStateToDir;
+ b_issuePUTO_ls;
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+
+ transition(O, L2_Replacement, OI) {
+ b_issuePUTO;
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(M, L2_Replacement, MI) {
+ b_issuePUTX;
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(OLSI, Fwd_GETX, ILSI) {
+ t_recordFwdXID;
+ ee_sendLocalInv;
+ m_popRequestQueue;
+ }
+
+ transition(ILSI, IntAck) {
+ m_decrementNumberOfMessagesInt;
+ o_checkForIntCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(ILSI, All_Acks, MII) {
+ gg_clearLocalSharers;
+ c_sendDataFromTBEToFwdGETX;
+ n_popTriggerQueue;
+ }
+
+ transition(OLSI, Fwd_GETS) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition({MI, OI}, Fwd_GETS, OI) {
+ t_recordFwdSID;
+ c_sendDataFromTBEToFwdGETS;
+ m_popRequestQueue;
+ }
+
+ transition({MI, OI}, Fwd_DMA, OI) {
+ cd_sendDataFromTBEToFwdDma;
+ da_sendDmaAckUnblock;
+ m_popRequestQueue;
+ }
+
+ transition(OLSI, Fwd_DMA) {
+ cd_sendDataFromTBEToFwdDma;
+ da_sendDmaAckUnblock;
+ m_popRequestQueue;
+ }
+
+ transition({MI, OI}, Fwd_GETX, MII) {
+ t_recordFwdXID;
+ c_sendDataFromTBEToFwdGETX;
+ m_popRequestQueue;
+ }
+
+ transition({MI, OI}, Writeback_Ack, I) {
+ qq_sendDataFromTBEToMemory;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(MII, Writeback_Nack, I) {
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(OI, Writeback_Nack) {
+ b_issuePUTO;
+ n_popResponseQueue;
+ }
+
+ transition(OLSI, Writeback_Ack, ILS) {
+ qq_sendDataFromTBEToMemory;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(MII, Writeback_Ack, I) {
+ f_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+
+ transition(ILSI, Writeback_Ack, ILS) {
+ f_sendUnblock;
+ s_deallocateTBE;
+ n_popResponseQueue;
+ wa_wakeUpDependents;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:Directory, "Directory protocol")
+: DirectoryMemory * directory;
+ Cycles directory_latency := 6;
+ Cycles to_memory_controller_latency := 1;
+
+ // Message Queues
+ MessageBuffer * requestToDir, network="From", virtual_network="1",
+ vnet_type="request"; // a mod-L2 bank -> this Dir
+ MessageBuffer * responseToDir, network="From", virtual_network="2",
+ vnet_type="response"; // a mod-L2 bank -> this Dir
+
+ MessageBuffer * forwardFromDir, network="To", virtual_network="1",
+ vnet_type="forward";
+ MessageBuffer * responseFromDir, network="To", virtual_network="2",
+ vnet_type="response"; // Dir -> mod-L2 bank
+
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_I") {
+ // Base states
+ I, AccessPermission:Read_Write, desc="Invalid";
+ S, AccessPermission:Read_Only, desc="Shared";
+ O, AccessPermission:Maybe_Stale, desc="Owner";
+ M, AccessPermission:Maybe_Stale, desc="Modified";
+
+ IS, AccessPermission:Busy, desc="Blocked, was in idle";
+ SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
+ OO, AccessPermission:Busy, desc="Blocked, was in owned";
+ MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
+ MM, AccessPermission:Busy, desc="Blocked, going to modified";
+
+ MI, AccessPermission:Busy, desc="Blocked on a writeback";
+ MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
+ OS, AccessPermission:Busy, desc="Blocked on a writeback";
+ OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
+
+ XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
+ XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
+ OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
+
+ OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
+ MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUTX, desc="A PUTX arrives";
+ PUTO, desc="A PUTO arrives";
+ PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
+ Unblock, desc="An unblock message arrives";
+ Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
+ Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
+ Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
+ Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
+ DMA_ACK, desc="DMA Ack";
+ Data, desc="Data to directory";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface='AbstractEntry') {
+ State DirectoryState, desc="Directory state";
+ NetDest Sharers, desc="Sharers for this block";
+ NetDest Owner, desc="Owner of this block";
+ int WaitingUnblocks, desc="Number of acks we're waiting for";
+ }
+
+ structure(TBE, desc="...") {
+ Addr PhysicalAddress, desc="Physical address for this entry";
+ int Len, desc="Length of request";
+ DataBlock DataBlk, desc="DataBlk";
+ MachineID Requestor, desc="original requestor";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ // ** OBJECTS **
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ void set_tbe(TBE b);
+ void unset_tbe();
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+
+ if (is_valid(dir_entry)) {
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ State getState(TBE tbe, Addr addr) {
+ return getDirectoryEntry(addr).DirectoryState;
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+ if (directory.isPresent(addr)) {
+
+ if (state == State:I) {
+ assert(getDirectoryEntry(addr).Owner.count() == 0);
+ assert(getDirectoryEntry(addr).Sharers.count() == 0);
+ }
+
+ if (state == State:S) {
+ assert(getDirectoryEntry(addr).Owner.count() == 0);
+ }
+
+ if (state == State:O) {
+ assert(getDirectoryEntry(addr).Owner.count() == 1);
+ assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
+ }
+
+ if (state == State:M) {
+ assert(getDirectoryEntry(addr).Owner.count() == 1);
+ assert(getDirectoryEntry(addr).Sharers.count() == 0);
+ }
+
+ if ((state != State:SS) && (state != State:OO)) {
+ assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
+ }
+
+ if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
+ getDirectoryEntry(addr).DirectoryState := state;
+ // disable coherence checker
+ // sequencer.checkCoherence(addr);
+ }
+ else {
+ getDirectoryEntry(addr).DirectoryState := state;
+ }
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ if (directory.isPresent(addr)) {
+ DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ if (directory.isPresent(addr)) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ functionalMemoryRead(pkt);
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ // if no sharers, then directory can be considered
+ // both a sharer and exclusive w.r.t. coherence checking
+ bool isBlockShared(Addr addr) {
+ if (directory.isPresent(addr)) {
+ if (getDirectoryEntry(addr).DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool isBlockExclusive(Addr addr) {
+ if (directory.isPresent(addr)) {
+ if (getDirectoryEntry(addr).DirectoryState == State:I) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // ** OUT_PORTS **
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+
+ // ** IN_PORTS **
+
+ in_port(unblockNetwork_in, ResponseMsg, responseToDir, rank=2) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
+ trigger(Event:Last_Unblock, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else {
+ trigger(Event:Unblock, in_msg.addr,
+ TBEs[in_msg.addr]);
+ }
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
+ trigger(Event:Exclusive_Unblock, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
+ trigger(Event:DMA_ACK, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
+ if (requestQueue_in.isReady(clockEdge())) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:PUTX) {
+ trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO) {
+ trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
+ trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
+ trigger(Event:Dirty_Writeback, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_CLEAN_ACK) {
+ trigger(Event:Clean_Writeback, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
+ trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
+ trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=0) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:WB_ACK;
+ out_msg.Sender := in_msg.Requestor;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:WB_NACK;
+ out_msg.Sender := in_msg.Requestor;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(c_clearOwner, "c", desc="Clear the owner field") {
+ getDirectoryEntry(address).Owner.clear();
+ }
+
+ action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
+ getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
+ getDirectoryEntry(address).Owner.clear();
+ }
+
+ action(cc_clearSharers, "\c", desc="Clear the sharers field") {
+ getDirectoryEntry(address).Sharers.clear();
+ }
+
+ action(d_sendDataMsg, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := in_msg.Acks;
+ if (in_msg.ReadX) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
+ }
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ getDirectoryEntry(address).Owner.clear();
+ getDirectoryEntry(address).Owner.add(in_msg.Sender);
+ }
+ }
+
+ action(f_forwardRequest, "f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
+ out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
+ out_msg.Acks := getDirectoryEntry(address).Sharers.count();
+ if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
+ out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
+ out_msg.Acks := getDirectoryEntry(address).Sharers.count();
+ if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
+ action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
+ peek(requestQueue_in, RequestMsg) {
+ if ((getDirectoryEntry(in_msg.addr).Sharers.count() > 1) ||
+ ((getDirectoryEntry(in_msg.addr).Sharers.count() > 0) &&
+ (getDirectoryEntry(in_msg.addr).Sharers.isElement(in_msg.Requestor) == false))) {
+ enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
+ // out_msg.Destination := getDirectoryEntry(in_msg.addr).Sharers;
+ out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Sharers);
+ out_msg.Destination.remove(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Invalidate_Control;
+ }
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue(clockEdge());
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ unblockNetwork_in.dequeue(clockEdge());
+ }
+
+ action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ getDirectoryEntry(address).Sharers.add(in_msg.Sender);
+ }
+ }
+
+ action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
+ getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
+ }
+
+ action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
+ getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
+ assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
+ }
+
+ action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
+ }
+ }
+
+ action(qw_queueMemoryWBFromCacheRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(requestQueue_in, RequestMsg) {
+ if (is_valid(tbe)) {
+ queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ } else {
+ queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt",
+ desc="Queue off-chip writeback request") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ DataBlock DataBlk := in_msg.DataBlk;
+ DataBlk.copyPartial(tbe.DataBlk, getOffset(tbe.PhysicalAddress),
+ tbe.Len);
+ queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
+ DataBlk);
+ }
+ }
+
+ action(qw_queueMemoryWBFromDMARequest, "/qw", desc="Queue off-chip writeback request") {
+ peek(requestQueue_in, RequestMsg) {
+ queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+ action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
+ out_msg.Type := CoherenceResponseType:DMA_ACK;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ if (is_valid(tbe)) {
+ out_msg.Destination.add(tbe.Requestor);
+ }
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
+ out_msg.Type := CoherenceResponseType:DMA_ACK;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ peek (requestQueue_in, RequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.PhysicalAddress := in_msg.addr;
+ tbe.Len := in_msg.Len;
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Requestor := in_msg.Requestor;
+ }
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+
+ // TRANSITIONS
+ transition(I, GETX, MM) {
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, DMA_READ, XI_M) {
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, DMA_WRITE, XI_U) {
+ qw_queueMemoryWBFromDMARequest;
+ a_sendDMAAck; // ack count may be zero
+ i_popIncomingRequestQueue;
+ }
+
+ transition(XI_M, Memory_Data, I) {
+ d_sendDataMsg; // ack count may be zero
+ q_popMemQueue;
+ }
+
+ transition(XI_U, Exclusive_Unblock, I) {
+ cc_clearSharers;
+ c_clearOwner;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(S, GETX, MM) {
+ qf_queueMemoryFetchRequest;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, DMA_READ) {
+ //qf_queueMemoryFetchRequest;
+ p_fwdDataToDMA;
+ //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, DMA_WRITE, XI_U) {
+ qw_queueMemoryWBFromDMARequest;
+ a_sendDMAAck; // ack count may be zero
+ g_sendInvalidations; // the DMA will collect invalidations
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, GETS, IS) {
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({S, SS}, GETS, SS) {
+ qf_queueMemoryFetchRequest;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S}, PUTO) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({I, S, O}, PUTX) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, GETX, MM) {
+ f_forwardRequest;
+ g_sendInvalidations;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, DMA_READ, OD) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ //g_sendInvalidations; // this will cause acks to be sent to the DMA
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OD, DMA_ACK, O) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({O,M}, DMA_WRITE, OI_D) {
+ f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
+ g_sendInvalidations; // these go to the DMA Controller
+ v_allocateTBE;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OI_D, Data, XI_U) {
+ qw_queueMemoryWBRequestFromMessageAndTBE;
+ a_sendDMAAck2; // ack count may be zero
+ w_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({O, OO}, GETS, OO) {
+ f_forwardRequest;
+ n_incrementOutstanding;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, GETX, MM) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ // no exclusive unblock will show up to the directory
+ transition(M, DMA_READ, MD) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ i_popIncomingRequestQueue;
+ }
+
+ transition(MD, DMA_ACK, M) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(M, GETS, MO) {
+ f_forwardRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTX, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ // happens if M->O transition happens on-chip
+ transition(M, PUTO, MI) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(M, PUTO_SHARERS, MIS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO, OS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, PUTO_SHARERS, OSS) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+
+ transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
+ zz_recycleRequest;
+ }
+
+ transition({MM, MO}, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MO, Unblock, O) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, GETS) {
+ zz_recycleRequest;
+ }
+
+ transition(IS, Unblock, S) {
+ m_addUnlockerToSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(IS, Exclusive_Unblock, M) {
+ cc_clearSharers;
+ e_ownerIsUnblocker;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(SS, Last_Unblock, S) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Unblock) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(OO, Last_Unblock, O) {
+ m_addUnlockerToSharers;
+ o_decrementOutstanding;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(MI, Dirty_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ qw_queueMemoryWBFromCacheRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(MIS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ qw_queueMemoryWBFromCacheRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(MIS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OS, Dirty_Writeback, S) {
+ c_clearOwner;
+ qw_queueMemoryWBFromCacheRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OSS, Dirty_Writeback, S) {
+ c_moveOwnerToSharer;
+ qw_queueMemoryWBFromCacheRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OSS, Clean_Writeback, S) {
+ c_moveOwnerToSharer;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(MI, Clean_Writeback, I) {
+ c_clearOwner;
+ cc_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OS, Clean_Writeback, S) {
+ c_clearOwner;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({MI, MIS}, Unblock, M) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({OS, OSS}, Unblock, O) {
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
+ d_sendDataMsg;
+ q_popMemQueue;
+ }
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
+ //a_sendAck;
+ q_popMemQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2009-2013 Mark D. Hill and David A. Wood
+ * Copyright (c) 2010-2011 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:DMA, "DMA Controller")
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 14;
+ Cycles response_latency := 14;
+
+ MessageBuffer * responseFromDir, network="From", virtual_network="2",
+ vnet_type="response";
+
+ MessageBuffer * reqToDir, network="To", virtual_network="1",
+ vnet_type="request";
+ MessageBuffer * respToDir, network="To", virtual_network="2",
+ vnet_type="dmaresponse";
+
+ MessageBuffer * mandatoryQueue;
+ MessageBuffer * triggerQueue;
+{
+ state_declaration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, AccessPermission:Invalid, desc="Ready to accept a new request";
+ BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
+ BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ DMA_Ack, desc="DMA write to memory completed";
+ Inv_Ack, desc="Invalidation Ack from a sharer";
+ All_Acks, desc="All acks received";
+ }
+
+ structure(TBE, desc="...") {
+ Addr address, desc="Physical address";
+ int NumAcks, default="0", desc="Number of Acks pending";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
+ State cur_state;
+
+ Tick clockEdge();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ State getState(TBE tbe, Addr addr) {
+ return cur_state;
+ }
+ void setState(TBE tbe, Addr addr, State state) {
+ cur_state := state;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("DMA does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("DMA does not support functional write.");
+ }
+
+ out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
+ out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
+
+ in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, rank=2) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
+ peek( dmaResponseQueue_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
+ trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
+ in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
+ TBEs[makeLineAddress(in_msg.addr)]);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=1) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, rank=0) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.LineAddress,
+ TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.LineAddress,
+ TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+ out_msg.addr := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:DMA_READ;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:DMA;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, request_latency) {
+ out_msg.addr := in_msg.PhysicalAddress;
+ out_msg.Type := CoherenceRequestType:DMA_WRITE;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Requestor := machineID;
+ out_msg.RequestorMachine := MachineType:DMA;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback(address);
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ assert(is_valid(tbe));
+ if (tbe.NumAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(u_updateAckCount, "u", desc="Update ack count") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.NumAcks := tbe.NumAcks - in_msg.Acks;
+ }
+ }
+
+ action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
+ enqueue(respToDirectory_out, ResponseMsg, response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:DMA;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
+ assert(is_valid(tbe));
+ dma_sequencer.dataCallback(tbe.DataBlk, address);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
+ stall_and_wait(dmaRequestQueue_in, address);
+ }
+
+ action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
+ }
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ s_sendReadRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_RD, Data, READY) {
+ t_updateTBEData;
+ d_dataCallbackFromTBE;
+ w_deallocateTBE;
+ //u_updateAckCount;
+ //o_checkForCompletion;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition(BUSY_RD, All_Acks, READY) {
+ d_dataCallbackFromTBE;
+ //u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ s_sendWriteRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_WR, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, DMA_Ack) {
+ u_updateAckCount; // actually increases
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, All_Acks, READY) {
+ a_ackCallback;
+ u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
+ zz_stallAndWaitRequestQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+ PUTX, desc="Put eXclusive";
+ PUTO, desc="Put Owned";
+ PUTO_SHARERS, desc="Put Owned, but sharers exist so don't remove from sharers list";
+ PUTS, desc="Put Shared";
+ INV, desc="Invalidation";
+ WRITEBACK_CLEAN_DATA, desc="Clean writeback (contains data)";
+ WRITEBACK_CLEAN_ACK, desc="Clean writeback (contains no data)";
+ WRITEBACK_DIRTY_DATA, desc="Dirty writeback (contains data)";
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment, responder doesn't have a copy";
+ DATA, desc="Data";
+ DATA_EXCLUSIVE, desc="Data, no processor has a copy";
+ UNBLOCK, desc="Unblock";
+ UNBLOCK_EXCLUSIVE, desc="Unblock, we're in E/M";
+ WB_ACK, desc="Writeback ack";
+ WB_ACK_DATA, desc="Writeback ack";
+ WB_NACK, desc="Writeback neg. ack";
+ DMA_ACK, desc="Ack that a DMA write completed";
+}
+
+// TriggerType
+enumeration(TriggerType, desc="...") {
+ ALL_ACKS, desc="See corresponding event";
+}
+
+// TriggerMsg
+structure(TriggerMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ TriggerType Type, desc="Type of trigger";
+
+ bool functionalRead(Packet *pkt) {
+ // Trigger message does not hold data
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // Trigger message does not hold data
+ return false;
+ }
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ int Len, desc="Length of Request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ MachineType RequestorMachine, desc="type of component";
+ NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="data for the cache line (DMA WRITE request)";
+ int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+
+ bool functionalRead(Packet *pkt) {
+ // Read only those messages that contain the data
+ if (Type == CoherenceRequestType:DMA_READ ||
+ Type == CoherenceRequestType:DMA_WRITE ||
+ Type == CoherenceRequestType:WRITEBACK_CLEAN_DATA ||
+ Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check required since all messages are written
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+// ResponseMsg (and also unblock requests)
+structure(ResponseMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ MachineType SenderMachine, desc="type of component sending msg";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Acks, desc="How many acks to expect";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ // Read only those messages that contain the data
+ if (Type == CoherenceResponseType:DATA ||
+ Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check required since all messages are written
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
--- /dev/null
+protocol "MOESI_CMP_directory";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_CMP_directory-msg.sm";
+include "MOESI_CMP_directory-L1cache.sm";
+include "MOESI_CMP_directory-L2cache.sm";
+include "MOESI_CMP_directory-dma.sm";
+include "MOESI_CMP_directory-dir.sm";
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: MOESI_CMP_token-L1cache.sm 1.22 05/01/19 15:55:39-06:00 beckmann@s0-28.cs.wisc.edu $
+ *
+ */
+
+machine(MachineType:L1Cache, "Token protocol")
+ : Sequencer * sequencer;
+ CacheMemory * L1Icache;
+ CacheMemory * L1Dcache;
+ int l2_select_num_bits;
+ int N_tokens;
+
+ Cycles l1_request_latency := 2;
+ Cycles l1_response_latency := 2;
+ int retry_threshold := 1;
+ Cycles fixed_timeout_latency := 100;
+ Cycles reissue_wakeup_latency := 10;
+ Cycles use_timeout_latency := 50;
+
+ bool dynamic_timeout_enabled := "True";
+ bool no_mig_atomic := "True";
+ bool send_evictions;
+
+ // Message Queues
+ // From this node's L1 cache TO the network
+
+ // a local L1 -> this L2 bank
+ MessageBuffer * responseFromL1Cache, network="To", virtual_network="4",
+ vnet_type="response";
+ MessageBuffer * persistentFromL1Cache, network="To", virtual_network="3",
+ vnet_type="persistent";
+ // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
+ MessageBuffer * requestFromL1Cache, network="To", virtual_network="1",
+ vnet_type="request";
+
+ // To this node's L1 cache FROM the network
+
+ // a L2 bank -> this L1
+ MessageBuffer * responseToL1Cache, network="From", virtual_network="4",
+ vnet_type="response";
+ MessageBuffer * persistentToL1Cache, network="From", virtual_network="3",
+ vnet_type="persistent";
+ // a L2 bank -> this L1
+ MessageBuffer * requestToL1Cache, network="From", virtual_network="1",
+ vnet_type="request";
+
+ MessageBuffer * mandatoryQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ NP, AccessPermission:Invalid, "NP", desc="Not Present";
+ I, AccessPermission:Invalid, "I", desc="Idle";
+ S, AccessPermission:Read_Only, "S", desc="Shared";
+ O, AccessPermission:Read_Only, "O", desc="Owned";
+ M, AccessPermission:Read_Only, "M", desc="Modified (dirty)";
+ MM, AccessPermission:Read_Write, "MM", desc="Modified (dirty and locally modified)";
+ M_W, AccessPermission:Read_Only, "M^W", desc="Modified (dirty), waiting";
+ MM_W, AccessPermission:Read_Write, "MM^W", desc="Modified (dirty and locally modified), waiting";
+
+ // Transient States
+ IM, AccessPermission:Busy, "IM", desc="Issued GetX";
+ SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
+ OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
+ IS, AccessPermission:Busy, "IS", desc="Issued GetS";
+
+ // Locked states
+ I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
+ S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
+ IM_L, AccessPermission:Busy, "IM^L", desc="Invalid, Locked, trying to go to Modified";
+ SM_L, AccessPermission:Busy, "SM^L", desc="Shared, Locked, trying to go to Modified";
+ IS_L, AccessPermission:Busy, "IS^L", desc="Invalid, Locked, trying to go to Shared";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ Atomic, desc="Atomic request from the processor";
+ L1_Replacement, desc="L1 Replacement";
+
+ // Responses
+ Data_Shared, desc="Received a data message, we are now a sharer";
+ Data_Owner, desc="Received a data message, we are now the owner";
+ Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
+ Ack, desc="Received an ack message";
+ Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
+
+ // Requests
+ Transient_GETX, desc="A GetX from another processor";
+ Transient_Local_GETX, desc="A GetX from another processor";
+ Transient_GETS, desc="A GetS from another processor";
+ Transient_Local_GETS, desc="A GetS from another processor";
+ Transient_GETS_Last_Token, desc="A GetS from another processor";
+ Transient_Local_GETS_Last_Token, desc="A GetS from another processor";
+
+ // Lock/Unlock for distributed
+ Persistent_GETX, desc="Another processor has priority to read/write";
+ Persistent_GETS, desc="Another processor has priority to read";
+ Persistent_GETS_Last_Token, desc="Another processor has priority to read, no more tokens";
+ Own_Lock_or_Unlock, desc="This processor now has priority";
+
+ // Triggers
+ Request_Timeout, desc="Timeout";
+ Use_TimeoutStarverX, desc="Timeout";
+ Use_TimeoutStarverS, desc="Timeout";
+ Use_TimeoutNoStarvers, desc="Timeout";
+ Use_TimeoutNoStarvers_NoMig, desc="Timeout Don't Migrate";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Tokens, desc="The number of tokens we're holding for the line";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ Addr addr, desc="Physical address for this TBE";
+ State TBEState, desc="Transient state";
+ int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
+ Addr PC, desc="Program counter of request";
+
+ bool WentPersistent, default="false", desc="Request went persistent";
+ bool ExternalResponse, default="false", desc="Response came from an external controller";
+ bool IsAtomic, default="false", desc="Request was an atomic request";
+
+ AccessType TypeOfAccess, desc="Type of request (used for profiling)";
+ Cycles IssueTime, desc="Time the request was issued";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ structure(PersistentTable, external="yes") {
+ void persistentRequestLock(Addr, MachineID, AccessType);
+ void persistentRequestUnlock(Addr, MachineID);
+ bool okToIssueStarving(Addr, MachineID);
+ MachineID findSmallest(Addr);
+ AccessType typeOfSmallest(Addr);
+ void markEntries(Addr);
+ bool isLocked(Addr);
+ int countStarvingForAddress(Addr);
+ int countReadStarvingForAddress(Addr);
+ }
+
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ TBETable L1_TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+
+ bool starving, default="false";
+ int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ PersistentTable persistentTable;
+ TimerTable useTimerTable;
+ TimerTable reissueTimerTable;
+
+ int outstandingRequests, default="0";
+ int outstandingPersistentRequests, default="0";
+
+ // Constant that provides hysteresis for calculated the estimated average
+ int averageLatencyHysteresis, default="(8)";
+ Cycles averageLatencyCounter,
+ default="(Cycles(500) << (*m_averageLatencyHysteresis_ptr))";
+
+ Cycles averageLatencyEstimate() {
+ DPRINTF(RubySlicc, "%d\n",
+ (averageLatencyCounter >> averageLatencyHysteresis));
+ return averageLatencyCounter >> averageLatencyHysteresis;
+ }
+
+ void updateAverageLatencyEstimate(Cycles latency) {
+ DPRINTF(RubySlicc, "%d\n", latency);
+
+ // By subtracting the current average and then adding the most
+ // recent sample, we calculate an estimate of the recent average.
+ // If we simply used a running sum and divided by the total number
+ // of entries, the estimate of the average would adapt very slowly
+ // after the execution has run for a long time.
+ // averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
+
+ averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
+ }
+
+ Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ if(is_valid(L1Dcache_entry)) {
+ return L1Dcache_entry;
+ }
+
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ return L1Icache_entry;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ return L1Dcache_entry;
+ }
+
+ Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ return L1Icache_entry;
+ }
+
+ int getTokens(Entry cache_entry) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.Tokens;
+ }
+ return 0;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ } else {
+ if (persistentTable.isLocked(addr) && (persistentTable.findSmallest(addr) != machineID)) {
+ // Not in cache, in persistent table, but this processor isn't highest priority
+ return State:I_L;
+ } else {
+ return State:NP;
+ }
+ }
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
+
+ if (is_valid(tbe)) {
+ assert(state != State:I);
+ assert(state != State:S);
+ assert(state != State:O);
+ assert(state != State:MM);
+ assert(state != State:M);
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ // Make sure the token count is in range
+ assert(cache_entry.Tokens >= 0);
+ assert(cache_entry.Tokens <= max_tokens());
+ assert(cache_entry.Tokens != (max_tokens() / 2));
+
+ if ((state == State:I_L) ||
+ (state == State:IM_L) ||
+ (state == State:IS_L)) {
+ // Make sure we have no tokens in the "Invalid, locked" states
+ assert(cache_entry.Tokens == 0);
+
+ // Make sure the line is locked
+ // assert(persistentTable.isLocked(addr));
+
+ // But we shouldn't have highest priority for it
+ // assert(persistentTable.findSmallest(addr) != id);
+
+ } else if ((state == State:S_L) ||
+ (state == State:SM_L)) {
+ assert(cache_entry.Tokens >= 1);
+ assert(cache_entry.Tokens < (max_tokens() / 2));
+
+ // Make sure the line is locked...
+ // assert(persistentTable.isLocked(addr));
+
+ // ...But we shouldn't have highest priority for it...
+ // assert(persistentTable.findSmallest(addr) != id);
+
+ // ...And it must be a GETS request
+ // assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);
+
+ } else {
+
+ // If there is an entry in the persistent table of this block,
+ // this processor needs to have an entry in the table for this
+ // block, and that entry better be the smallest (highest
+ // priority). Otherwise, the state should have been one of
+ // locked states
+
+ //if (persistentTable.isLocked(addr)) {
+ // assert(persistentTable.findSmallest(addr) == id);
+ //}
+ }
+
+ // in M and E you have all the tokens
+ if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
+ assert(cache_entry.Tokens == max_tokens());
+ }
+
+ // in NP you have no tokens
+ if (state == State:NP) {
+ assert(cache_entry.Tokens == 0);
+ }
+
+ // You have at least one token in S-like states
+ if (state == State:S || state == State:SM) {
+ assert(cache_entry.Tokens > 0);
+ }
+
+ // You have at least half the token in O-like states
+ if (state == State:O && state == State:OM) {
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ }
+
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := L1_TBEs[addr];
+ if(is_valid(tbe)) {
+ return L1Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return L1Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L1Cache_State_to_permission(state));
+ }
+ }
+
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Load;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if (type == RubyRequestType:ST) {
+ return Event:Store;
+ } else if (type == RubyRequestType:ATOMIC) {
+ if (no_mig_atomic) {
+ return Event:Atomic;
+ } else {
+ return Event:Store;
+ }
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ AccessType cache_request_type_to_access_type(RubyRequestType type) {
+ if ((type == RubyRequestType:LD) || (type == RubyRequestType:IFETCH)) {
+ return AccessType:Read;
+ } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
+ return AccessType:Write;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ // NOTE: direct local hits should not call this function
+ bool isExternalHit(Addr addr, MachineID sender) {
+ if (machineIDToMachineType(sender) == MachineType:L1Cache) {
+ return true;
+ } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
+
+ if (sender == mapAddressToRange(addr, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_num_bits, intToID(0))) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ return true;
+ }
+
+ bool okToIssueStarving(Addr addr, MachineID machineID) {
+ return persistentTable.okToIssueStarving(addr, machineID);
+ }
+
+ void markPersistentEntries(Addr addr) {
+ persistentTable.markEntries(addr);
+ }
+
+ void setExternalResponse(TBE tbe) {
+ assert(is_valid(tbe));
+ tbe.ExternalResponse := true;
+ }
+
+ bool IsAtomic(TBE tbe) {
+ assert(is_valid(tbe));
+ return tbe.IsAtomic;
+ }
+
+ // ** OUT_PORTS **
+ out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
+ out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
+ out_port(requestRecycle_out, RequestMsg, requestToL1Cache);
+
+ // ** IN_PORTS **
+
+ // Use Timer
+ in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ TBE tbe := L1_TBEs.lookup(readyAddress);
+
+ if (persistentTable.isLocked(readyAddress) &&
+ (persistentTable.findSmallest(readyAddress) != machineID)) {
+ if (persistentTable.typeOfSmallest(readyAddress) == AccessType:Write) {
+ trigger(Event:Use_TimeoutStarverX, readyAddress,
+ getCacheEntry(readyAddress), tbe);
+ } else {
+ trigger(Event:Use_TimeoutStarverS, readyAddress,
+ getCacheEntry(readyAddress), tbe);
+ }
+ } else {
+ if (no_mig_atomic && IsAtomic(tbe)) {
+ trigger(Event:Use_TimeoutNoStarvers_NoMig, readyAddress,
+ getCacheEntry(readyAddress), tbe);
+ } else {
+ trigger(Event:Use_TimeoutNoStarvers, readyAddress,
+ getCacheEntry(readyAddress), tbe);
+ }
+ }
+ }
+ }
+
+ // Reissue Timer
+ in_port(reissueTimerTable_in, Addr, reissueTimerTable, rank=4) {
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, getCacheEntry(addr),
+ L1_TBEs.lookup(addr));
+ }
+ }
+
+ // Persistent Network
+ in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
+ peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
+ assert(in_msg.Destination.isElement(machineID));
+
+ // Apply the lockdown or unlockdown message to the table
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
+ } else {
+ error("Unexpected message");
+ }
+
+ // React to the message based on the current state of the table
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
+
+ if (persistentTable.isLocked(in_msg.addr)) {
+ if (persistentTable.findSmallest(in_msg.addr) == machineID) {
+ // Our Own Lock - this processor is highest priority
+ trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
+ cache_entry, tbe);
+ } else {
+ if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
+ cache_entry, tbe);
+ } else {
+ trigger(Event:Persistent_GETS, in_msg.addr,
+ cache_entry, tbe);
+ }
+ } else {
+ trigger(Event:Persistent_GETX, in_msg.addr,
+ cache_entry, tbe);
+ }
+ }
+ } else {
+ // Unlock case - no entries in the table
+ trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
+ cache_entry, tbe);
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg, block_on="addr") {
+ assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
+
+ // Mark TBE flag if response received off-chip. Use this to update average latency estimate
+ if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
+
+ if (in_msg.Sender == mapAddressToRange(in_msg.addr,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0))) {
+
+ // came from an off-chip L2 cache
+ if (is_valid(tbe)) {
+ // L1_TBEs[in_msg.addr].ExternalResponse := true;
+ // profile_offchipL2_response(in_msg.addr);
+ }
+ }
+ else {
+ // profile_onchipL2_response(in_msg.addr );
+ }
+ } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:Directory ) {
+ if (is_valid(tbe)) {
+ setExternalResponse(tbe);
+ // profile_memory_response( in_msg.addr);
+ }
+ } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
+ //if (isLocalProcessor(machineID, in_msg.Sender) == false) {
+ //if (is_valid(tbe)) {
+ // tbe.ExternalResponse := true;
+ // profile_offchipL1_response(in_msg.addr );
+ //}
+ //}
+ //else {
+ // profile_onchipL1_response(in_msg.addr );
+ //}
+ } else {
+ error("unexpected SenderMachine");
+ }
+
+
+ if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Data_Shared, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected message");
+ }
+ } else {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+ }
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, RequestMsg, block_on="addr") {
+ assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
+
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETX, in_msg.addr,
+ cache_entry, tbe);
+ }
+ else {
+ trigger(Event:Transient_GETX, in_msg.addr,
+ cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETS_Last_Token, in_msg.addr,
+ cache_entry, tbe);
+ }
+ else {
+ trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
+ cache_entry, tbe);
+ }
+ }
+ else {
+ if (in_msg.isLocal) {
+ trigger(Event:Transient_Local_GETS, in_msg.addr,
+ cache_entry, tbe);
+ }
+ else {
+ trigger(Event:Transient_GETS, in_msg.addr,
+ cache_entry, tbe);
+ }
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+
+ TBE tbe := L1_TBEs[in_msg.LineAddress];
+
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion.
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
+ } else {
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The block is in the wrong L1, try to write it to the L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Dcache_entry, tbe);
+ }
+
+ if (L1Icache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it in the L1
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
+ } else {
+ // No room in the L1, so we need to make room
+ trigger(Event:L1_Replacement,
+ L1Icache.cacheProbe(in_msg.LineAddress),
+ getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion.
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
+ } else {
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The block is in the wrong L1, try to write it to the L2
+ trigger(Event:L1_Replacement, in_msg.LineAddress,
+ L1Icache_entry, tbe);
+ }
+
+ if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it in the L1
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
+ } else {
+ // No room in the L1, so we need to make room
+ trigger(Event:L1_Replacement,
+ L1Dcache.cacheProbe(in_msg.LineAddress),
+ getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueReadRequest, "a", desc="Issue GETS") {
+ assert(is_valid(tbe));
+ if (tbe.IssueCount == 0) {
+ // Update outstanding requests
+ //profile_outstanding_request(outstandingRequests);
+ outstandingRequests := outstandingRequests + 1;
+ }
+
+ if (tbe.IssueCount >= retry_threshold) {
+ // Issue a persistent request if possible
+ if (okToIssueStarving(address, machineID) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
+ }
+ markPersistentEntries(address);
+ starving := true;
+
+ if (tbe.IssueCount == 0) {
+ //profile_persistent_prediction(address, tbe.TypeOfAccess);
+ }
+
+ // Update outstanding requests
+ //profile_outstanding_persistent_request(outstandingPersistentRequests);
+ outstandingPersistentRequests := outstandingPersistentRequests + 1;
+
+ // Increment IssueCount
+ tbe.IssueCount := tbe.IssueCount + 1;
+
+ tbe.WentPersistent := true;
+
+ // Do not schedule a wakeup, a persistent requests will always complete
+ }
+ else {
+
+ // We'd like to issue a persistent request, but are not allowed
+ // to issue a P.R. right now. This, we do not increment the
+ // IssueCount.
+
+ // Set a wakeup timer
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
+
+ }
+ } else {
+ // Make a normal request
+ enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.RetryNum := tbe.IssueCount;
+ if (tbe.IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ }
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
+ }
+
+ // send to other local L1s, with local bit set
+ enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ //
+ // Since only one chip, assuming all L1 caches are local
+ //
+ //out_msg.Destination := getOtherLocalL1IDs(machineID);
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.remove(machineID);
+
+ out_msg.RetryNum := tbe.IssueCount;
+ out_msg.isLocal := true;
+ if (tbe.IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ }
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
+ }
+
+ // Increment IssueCount
+ tbe.IssueCount := tbe.IssueCount + 1;
+
+ // Set a wakeup timer
+
+ if (dynamic_timeout_enabled) {
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
+ } else {
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
+ }
+
+ }
+ }
+
+ action(b_issueWriteRequest, "b", desc="Issue GETX") {
+
+ assert(is_valid(tbe));
+ if (tbe.IssueCount == 0) {
+ // Update outstanding requests
+ //profile_outstanding_request(outstandingRequests);
+ outstandingRequests := outstandingRequests + 1;
+ }
+
+ if (tbe.IssueCount >= retry_threshold) {
+ // Issue a persistent request if possible
+ if ( okToIssueStarving(address, machineID) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
+ }
+ markPersistentEntries(address);
+ starving := true;
+
+ // Update outstanding requests
+ //profile_outstanding_persistent_request(outstandingPersistentRequests);
+ outstandingPersistentRequests := outstandingPersistentRequests + 1;
+
+ if (tbe.IssueCount == 0) {
+ //profile_persistent_prediction(address, tbe.TypeOfAccess);
+ }
+
+ // Increment IssueCount
+ tbe.IssueCount := tbe.IssueCount + 1;
+
+ tbe.WentPersistent := true;
+
+ // Do not schedule a wakeup, a persistent requests will always complete
+ }
+ else {
+
+ // We'd like to issue a persistent request, but are not allowed
+ // to issue a P.R. right now. This, we do not increment the
+ // IssueCount.
+
+ // Set a wakeup timer
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
+ }
+
+ } else {
+ // Make a normal request
+ enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.RetryNum := tbe.IssueCount;
+
+ if (tbe.IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Reissue_Control;
+ }
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
+ }
+
+ // send to other local L1s too
+ enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.isLocal := true;
+
+ //
+ // Since only one chip, assuming all L1 caches are local
+ //
+ //out_msg.Destination := getOtherLocalL1IDs(machineID);
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.remove(machineID);
+
+ out_msg.RetryNum := tbe.IssueCount;
+ if (tbe.IssueCount == 0) {
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ }
+ out_msg.Prefetch := tbe.Prefetch;
+ out_msg.AccessMode := tbe.AccessMode;
+ }
+
+ // Increment IssueCount
+ tbe.IssueCount := tbe.IssueCount + 1;
+
+ DPRINTF(RubySlicc, "incremented issue count to %d\n",
+ tbe.IssueCount);
+
+ // Set a wakeup timer
+ if (dynamic_timeout_enabled) {
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
+ } else {
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
+ }
+ }
+ }
+
+ action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(c_ownedReplacement, "c", desc="Issue writeback") {
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Type := CoherenceResponseType:WB_OWNED;
+
+ // always send the data?
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
+
+ // don't send writeback if replacing block with no tokens
+ assert(is_valid(cache_entry));
+ assert (cache_entry.Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // assert(cache_entry.Dirty == false);
+ out_msg.Dirty := false;
+
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // assert(cache_entry.Dirty == false);
+ out_msg.Dirty := false;
+
+ // always send the data?
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.Type := CoherenceResponseType:WB_TOKENS;
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+
+ action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
+ assert(is_valid(cache_entry));
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := 1;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Dirty := false;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ cache_entry.Tokens := cache_entry.Tokens - 1;
+ assert(cache_entry.Tokens >= 1);
+ }
+
+ action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
+ assert(is_valid(cache_entry));
+ peek(requestNetwork_in, RequestMsg) {
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := N_tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Dirty := false;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ cache_entry.Tokens := cache_entry.Tokens - N_tokens;
+ }
+ else if (cache_entry.Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := 1;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ // out_msg.Dirty := cache_entry.Dirty;
+ out_msg.Dirty := false;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ cache_entry.Tokens := cache_entry.Tokens - 1;
+ }
+ }
+// assert(cache_entry.Tokens >= 1);
+ }
+
+ action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ if (in_msg.isLocal) {
+ out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
+ // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ if (cache_entry.Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(cache_entry.Tokens >= 1);
+ if (cache_entry.Tokens > N_tokens) {
+ out_msg.Tokens := cache_entry.Tokens - N_tokens;
+ } else {
+ out_msg.Tokens := cache_entry.Tokens - 1;
+ }
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ if (cache_entry.Tokens > N_tokens) {
+ cache_entry.Tokens := N_tokens;
+ } else {
+ cache_entry.Tokens := 1;
+ }
+ }
+
+ action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > ((max_tokens() / 2) + 1));
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ out_msg.Tokens := cache_entry.Tokens - N_tokens;
+ } else {
+ out_msg.Tokens := cache_entry.Tokens - 1;
+ }
+ assert(out_msg.Tokens > (max_tokens() / 2));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ cache_entry.Tokens := N_tokens;
+ } else {
+ cache_entry.Tokens := 1;
+ }
+ }
+
+ action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == ((max_tokens() / 2) + 1));
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := cache_entry.Tokens;
+ assert(out_msg.Tokens > (max_tokens() / 2));
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
+ // assert(persistentTable.isLocked(address));
+
+ peek(responseNetwork_in, ResponseMsg) {
+ // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+
+ L1Dcache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, false,
+ MachineType:L1Cache);
+ }
+
+ action(h_ifetch_hit, "hi", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+
+ L1Icache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, false,
+ MachineType:L1Cache);
+ }
+
+ action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ peek(responseNetwork_in, ResponseMsg) {
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.readCallback(address, cache_entry.DataBlk,
+ isExternalHit(address, in_msg.Sender),
+ machineIDToMachineType(in_msg.Sender));
+ }
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+
+ L1Dcache.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk, false,
+ MachineType:L1Cache);
+ cache_entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+
+ action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
+ address, cache_entry.DataBlk);
+ peek(responseNetwork_in, ResponseMsg) {
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.writeCallback(address, cache_entry.DataBlk,
+ isExternalHit(address, in_msg.Sender),
+ machineIDToMachineType(in_msg.Sender));
+ }
+ cache_entry.Dirty := true;
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(L1_TBEs);
+ L1_TBEs.allocate(address);
+ set_tbe(L1_TBEs[address]);
+ tbe.IssueCount := 0;
+ peek(mandatoryQueue_in, RubyRequest) {
+ tbe.PC := in_msg.ProgramCounter;
+ tbe.TypeOfAccess := cache_request_type_to_access_type(in_msg.Type);
+ if (in_msg.Type == RubyRequestType:ATOMIC) {
+ tbe.IsAtomic := true;
+ }
+ tbe.Prefetch := in_msg.Prefetch;
+ tbe.AccessMode := in_msg.AccessMode;
+ }
+ tbe.IssueTime := curCycle();
+ }
+
+ action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
+ }
+ }
+
+ action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
+ if (reissueTimerTable.isSet(address)) {
+ reissueTimerTable.unset(address);
+ }
+ }
+
+ action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
+ useTimerTable.unset(address);
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
+ persistentNetwork_in.dequeue(clockEdge());
+ }
+
+ action(m_popRequestQueue, "m", desc="Pop request queue.") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
+ useTimerTable.set(
+ address, clockEdge() + cyclesToTicks(use_timeout_latency));
+ }
+
+ action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:INV;
+ out_msg.Tokens := 0;
+ out_msg.Sender := machineID;
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+
+ action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ assert(in_msg.Tokens != 0);
+ DPRINTF(RubySlicc, "L1 received tokens for address: %#x, tokens: %d\n",
+ in_msg.addr, in_msg.Tokens);
+ cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
+ DPRINTF(RubySlicc, "%d\n", cache_entry.Tokens);
+
+ if (cache_entry.Dirty == false && in_msg.Dirty) {
+ cache_entry.Dirty := true;
+ }
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+
+ assert(is_valid(tbe));
+ if (tbe.WentPersistent) {
+ // assert(starving);
+ outstandingRequests := outstandingRequests - 1;
+ enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
+ out_msg.addr := address;
+ out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ }
+ starving := false;
+ }
+
+ // Update average latency
+ if (tbe.IssueCount <= 1) {
+ if (tbe.ExternalResponse) {
+ updateAverageLatencyEstimate(curCycle() - tbe.IssueTime);
+ }
+ }
+
+ // Profile
+ //if (tbe.WentPersistent) {
+ // profile_token_retry(address, tbe.TypeOfAccess, 2);
+ //}
+ //else {
+ // profile_token_retry(address, tbe.TypeOfAccess, 1);
+ //}
+
+ //profile_token_retry(address, tbe.TypeOfAccess, tbe.IssueCount);
+ L1_TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
+ out_msg.addr := address;
+ if (cache_entry.Tokens > (max_tokens() / 2)) {
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ if (cache_entry.Dirty == false && in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+
+ }
+ }
+
+ action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ assert(getTokens(cache_entry) == 0);
+ if (L1Dcache.isTagPresent(address)) {
+ L1Dcache.deallocate(address);
+ } else {
+ L1Icache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (is_valid(cache_entry)) {
+ } else {
+ set_cache_entry(L1Dcache.allocate(address, new Entry));
+ }
+ }
+
+ action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (is_valid(cache_entry)) {
+ } else {
+ set_cache_entry(L1Icache.allocate(address, new Entry));
+ }
+ }
+
+ action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1Icache.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1Dcache.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1Dcache.demand_hits;
+ }
+
+ action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.DataBlk == in_msg.DataBlk);
+ }
+ }
+
+ action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ peek(mandatoryQueue_in, RubyRequest) {
+ APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
+ }
+ stall_and_wait(mandatoryQueue_in, address);
+ }
+
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
+
+ action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L1_Replacement) {
+ ta_traceStalledAddress;
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, {Store, Atomic}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ // Lockdowns
+ transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from NP
+ transition(NP, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, Ifetch, IS) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileInstMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, {Store, Atomic}, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
+ bb_bounceResponse;
+ n_popResponseQueue;
+ }
+
+ transition(NP, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition(NP, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from Idle
+ transition(I, Load, IS) {
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, Ifetch, IS) {
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileInstMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, {Store, Atomic}, IM) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, L1_Replacement) {
+ ta_traceStalledAddress;
+ tr_tokenReplacement;
+ gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(I, {Transient_GETX, Transient_Local_GETX}) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition(I, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition(I, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(I_L, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}) {
+ l_popPersistentQueue;
+ }
+
+ transition(I, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_All_Tokens, M) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, SM, S_L, SM_L}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, S_L, SM_L}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, {Store, Atomic}, SM) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L1_Replacement, I) {
+ ta_traceStalledAddress;
+ cc_sharedReplacement; // Only needed in some cases
+ forward_eviction_to_cpu;
+ gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(S, {Transient_GETX, Transient_Local_GETX}, I) {
+ t_sendAckWithCollectedTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ m_popRequestQueue;
+ }
+
+ // only owner responds to non-local requests
+ transition(S, Transient_GETS) {
+ m_popRequestQueue;
+ }
+
+ transition(S, Transient_Local_GETS) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(S, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
+ m_popRequestQueue;
+ }
+
+ transition({S, S_L}, Persistent_GETX, I_L) {
+ e_sendAckWithCollectedTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ l_popPersistentQueue;
+ }
+
+ transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
+ f_sendAckWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
+ l_popPersistentQueue;
+ }
+
+ transition(S, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Owner, O) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Owned
+ transition({O, OM}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({O, OM}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, {Store, Atomic}, OM) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L1_Replacement, I) {
+ ta_traceStalledAddress;
+ c_ownedReplacement;
+ forward_eviction_to_cpu
+ gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(O, {Transient_GETX, Transient_Local_GETX}, I) {
+ dd_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ m_popRequestQueue;
+ }
+
+ transition(O, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ l_popPersistentQueue;
+ }
+
+ transition(O, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Persistent_GETS_Last_Token, I_L) {
+ fo_sendDataWithOwnerToken;
+ forward_eviction_to_cpu
+ l_popPersistentQueue;
+ }
+
+ transition(O, Transient_GETS) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(O, Transient_Local_GETS) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ // ran out of tokens, wait for it to go persistent
+ transition(O, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
+ m_popRequestQueue;
+ }
+
+ transition(O, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Ack_All_Tokens, M) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Modified
+ transition({MM, MM_W}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, MM_W}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM_W}, {Store, Atomic}) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Store) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Atomic, M) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, L1_Replacement, I) {
+ ta_traceStalledAddress;
+ c_ownedReplacement;
+ forward_eviction_to_cpu
+ gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}, I) {
+ dd_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ m_popRequestQueue;
+ }
+
+ transition({MM_W}, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
+ m_popRequestQueue;
+ }
+
+ // Implement the migratory sharing optimization, even for persistent requests
+ transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ l_popPersistentQueue;
+ }
+
+ // ignore persistent requests in lockout period
+ transition(MM_W, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(MM_W, Use_TimeoutNoStarvers, MM) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ kd_wakeUpDependents;
+ }
+
+ transition(MM_W, Use_TimeoutNoStarvers_NoMig, M) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from Dirty Exclusive
+ transition({M, M_W}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({M, M_W}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Store, MM) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, Atomic) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Store, MM_W) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Atomic) {
+ hh_store_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M, L1_Replacement, I) {
+ ta_traceStalledAddress;
+ c_ownedReplacement;
+ forward_eviction_to_cpu
+ gg_deallocateL1CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(M, {Transient_GETX, Transient_Local_GETX}, I) {
+ dd_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ m_popRequestQueue;
+ }
+
+ transition(M, Transient_Local_GETS, O) {
+ d_sendDataWithToken;
+ m_popRequestQueue;
+ }
+
+ transition(M, Transient_GETS, O) {
+ d_sendDataWithNTokenIfAvail;
+ m_popRequestQueue;
+ }
+
+ transition(M_W, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
+ m_popRequestQueue;
+ }
+
+ transition(M, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ p_informL2AboutTokenLoss;
+ forward_eviction_to_cpu
+ l_popPersistentQueue;
+ }
+
+ transition(M, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ // ignore persistent requests in lockout period
+ transition(M_W, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(M_W, Use_TimeoutStarverS, S_L) {
+ s_deallocateTBE;
+ ff_sendDataWithAllButNorOneTokens;
+ jj_unsetUseTimer;
+ }
+
+ // someone unlocked during timeout
+ transition(M_W, {Use_TimeoutNoStarvers, Use_TimeoutNoStarvers_NoMig}, M) {
+ s_deallocateTBE;
+ jj_unsetUseTimer;
+ kd_wakeUpDependents;
+ }
+
+ transition(M_W, Use_TimeoutStarverX, I_L) {
+ s_deallocateTBE;
+ ee_sendDataWithAllTokens;
+ forward_eviction_to_cpu;
+ p_informL2AboutTokenLoss;
+ jj_unsetUseTimer;
+ }
+
+ // migratory
+ transition(MM_W, {Use_TimeoutStarverX, Use_TimeoutStarverS}, I_L) {
+ s_deallocateTBE;
+ ee_sendDataWithAllTokens;
+ forward_eviction_to_cpu;
+ p_informL2AboutTokenLoss;
+ jj_unsetUseTimer;
+
+ }
+
+ // Transient_GETX and Transient_GETS in transient states
+ transition(OM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue; // Even if we have the data, we can pretend we don't have it yet.
+ }
+
+ transition(IS, {Transient_GETX, Transient_Local_GETX}) {
+ t_sendAckWithCollectedTokens;
+ m_popRequestQueue;
+ }
+
+ transition(IS, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition(IS, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IS_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition(IM, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IM_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+
+ transition({SM, SM_L}, Persistent_GETX, IM_L) {
+ e_sendAckWithCollectedTokens;
+ forward_eviction_to_cpu
+ l_popPersistentQueue;
+ }
+
+ transition(SM, {Persistent_GETS, Persistent_GETS_Last_Token}, SM_L) {
+ f_sendAckWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(SM_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
+ l_popPersistentQueue;
+ }
+
+ transition(OM, Persistent_GETX, IM_L) {
+ ee_sendDataWithAllTokens;
+ forward_eviction_to_cpu
+ l_popPersistentQueue;
+ }
+
+ transition(OM, Persistent_GETS, SM_L) {
+ ff_sendDataWithAllButNorOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(OM, Persistent_GETS_Last_Token, IM_L) {
+ fo_sendDataWithOwnerToken;
+ l_popPersistentQueue;
+ }
+
+ // Transitions from IM/SM
+
+ transition({IM, SM}, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_Shared, SM) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_Owner, OM) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data_All_Tokens, MM_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(SM, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_Owner, OM) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(SM, Data_All_Tokens, MM_W) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({IM, SM}, {Transient_GETX, Transient_Local_GETX}, IM) { // We don't have the data yet, but we might have collected some tokens. We give them up here to avoid livelock
+ t_sendAckWithCollectedTokens;
+ forward_eviction_to_cpu;
+ m_popRequestQueue;
+ }
+
+ transition({IM, SM}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
+ m_popRequestQueue;
+ }
+
+ transition({IM, SM}, Request_Timeout) {
+ j_unsetReissueTimer;
+ b_issueWriteRequest;
+ }
+
+ // Transitions from OM
+
+ transition(OM, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Ack_All_Tokens, MM_W) {
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(OM, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(OM, Data_All_Tokens, MM_W) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(OM, Request_Timeout) {
+ j_unsetReissueTimer;
+ b_issueWriteRequest;
+ }
+
+ // Transitions from IS
+
+ transition(IS, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Data_All_Tokens, M_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ o_scheduleUseTimeout;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueReadRequest;
+ }
+
+ // Transitions from I_L
+
+ transition(I_L, Load, IS_L) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I_L, Ifetch, IS_L) {
+ pp_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueReadRequest;
+ uu_profileInstMiss;
+ k_popMandatoryQueue;
+ }
+
+ transition(I_L, {Store, Atomic}, IM_L) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+
+ // Transitions from S_L
+
+ transition(S_L, {Store, Atomic}, SM_L) {
+ i_allocateTBE;
+ b_issueWriteRequest;
+ uu_profileDataMiss;
+ k_popMandatoryQueue;
+ }
+
+ // Other transitions from *_L states
+
+ transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS, Transient_GETX, Transient_Local_GETX}) {
+ m_popRequestQueue;
+ }
+
+ transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, Data_All_Tokens) {
+ g_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Request_Timeout) {
+ j_unsetReissueTimer;
+ a_issueReadRequest;
+ }
+
+ transition({IM_L, SM_L}, Request_Timeout) {
+ j_unsetReissueTimer;
+ b_issueWriteRequest;
+ }
+
+ // Opportunisticly Complete the memory operation in the following
+ // cases. Note: these transitions could just use
+ // g_bounceResponseToStarver, but if we have the data and tokens, we
+ // might as well complete the memory request while we have the
+ // chance (and then immediately forward on the data)
+
+ transition(IM_L, Data_All_Tokens, MM_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ j_unsetReissueTimer;
+ o_scheduleUseTimeout;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(SM_L, Data_All_Tokens, S_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ xx_external_store_hit;
+ ff_sendDataWithAllButNorOneTokens;
+ s_deallocateTBE;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Data_Shared, I_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ s_deallocateTBE;
+ e_sendAckWithCollectedTokens;
+ p_informL2AboutTokenLoss;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Data_Owner, I_L) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ ee_sendDataWithAllTokens;
+ s_deallocateTBE;
+ p_informL2AboutTokenLoss;
+ j_unsetReissueTimer;
+ n_popResponseQueue;
+ }
+
+ transition(IS_L, Data_All_Tokens, M_W) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ x_external_load_hit;
+ j_unsetReissueTimer;
+ o_scheduleUseTimeout;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Own_Lock_or_Unlock
+
+ transition(I_L, Own_Lock_or_Unlock, I) {
+ l_popPersistentQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(S_L, Own_Lock_or_Unlock, S) {
+ l_popPersistentQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IM_L, Own_Lock_or_Unlock, IM) {
+ l_popPersistentQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS_L, Own_Lock_or_Unlock, IS) {
+ l_popPersistentQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(SM_L, Own_Lock_or_Unlock, SM) {
+ l_popPersistentQueue;
+ kd_wakeUpDependents;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:L2Cache, "Token protocol")
+ : CacheMemory * L2cache;
+ int N_tokens;
+ Cycles l2_request_latency := 5;
+ Cycles l2_response_latency := 5;
+ bool filtering_enabled := "True";
+
+ // L2 BANK QUEUES
+ // From local bank of L2 cache TO the network
+
+ // this L2 bank -> a local L1 || mod-directory
+ MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
+ vnet_type="response";
+ // this L2 bank -> mod-directory
+ MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
+ vnet_type="request";
+ // this L2 bank -> a local L1
+ MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
+ vnet_type="request";
+
+
+ // FROM the network to this local bank of L2 cache
+
+ // a local L1 || mod-directory -> this L2 bank
+ MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
+ vnet_type="response";
+ MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
+ vnet_type="persistent";
+ // mod-directory -> this L2 bank
+ MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
+ vnet_type="request";
+ // a local L1 -> this L2 bank
+ MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
+ vnet_type="request";
+
+{
+ // STATES
+ state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
+ // Base states
+ NP, AccessPermission:Invalid, desc="Not Present";
+ I, AccessPermission:Invalid, desc="Idle";
+ S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
+ O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
+ M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
+
+ // Locked states
+ I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
+ S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+
+ // Requests
+ L1_GETS, desc="local L1 GETS request";
+ L1_GETS_Last_Token, desc="local L1 GETS request";
+ L1_GETX, desc="local L1 GETX request";
+ L1_INV, desc="L1 no longer has tokens";
+ Transient_GETX, desc="A GetX from another processor";
+ Transient_GETS, desc="A GetS from another processor";
+ Transient_GETS_Last_Token, desc="A GetS from another processor";
+
+ // events initiated by this L2
+ L2_Replacement, desc="L2 Replacement", format="!r";
+
+ // events of external L2 responses
+
+ // Responses
+ Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
+ Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
+ Writeback_All_Tokens, desc="Received a writeback from L1";
+ Writeback_Owned, desc="Received a writeback from L1";
+
+
+ Data_Shared, desc="Received a data message, we are now a sharer";
+ Data_Owner, desc="Received a data message, we are now the owner";
+ Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
+ Ack, desc="Received an ack message";
+ Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
+
+ // Lock/Unlock
+ Persistent_GETX, desc="Another processor has priority to read/write";
+ Persistent_GETS, desc="Another processor has priority to read";
+ Persistent_GETS_Last_Token, desc="Another processor has priority to read";
+ Own_Lock_or_Unlock, desc="This processor now has priority";
+ }
+
+ // TYPES
+
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Tokens, desc="The number of tokens we're holding for the line";
+ DataBlock DataBlk, desc="data for the block";
+ }
+
+ structure(DirEntry, desc="...", interface="AbstractEntry") {
+ Set Sharers, desc="Set of the internal processors that want the block in shared state";
+ bool exclusive, default="false", desc="if local exclusive is likely";
+ }
+
+ structure(PerfectCacheMemory, external="yes") {
+ void allocate(Addr);
+ void deallocate(Addr);
+ DirEntry lookup(Addr);
+ bool isTagPresent(Addr);
+ }
+
+ structure(PersistentTable, external="yes") {
+ void persistentRequestLock(Addr, MachineID, AccessType);
+ void persistentRequestUnlock(Addr, MachineID);
+ MachineID findSmallest(Addr);
+ AccessType typeOfSmallest(Addr);
+ void markEntries(Addr);
+ bool isLocked(Addr);
+ int countStarvingForAddress(Addr);
+ int countReadStarvingForAddress(Addr);
+ }
+
+ PersistentTable persistentTable;
+ PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+
+ Tick clockEdge();
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
+ return cache_entry;
+ }
+
+ DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
+ return localDirectory.lookup(address);
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ int getTokens(Entry cache_entry) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.Tokens;
+ } else {
+ return 0;
+ }
+ }
+
+ State getState(Entry cache_entry, Addr addr) {
+ if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ } else if (persistentTable.isLocked(addr)) {
+ return State:I_L;
+ } else {
+ return State:NP;
+ }
+ }
+
+ void setState(Entry cache_entry, Addr addr, State state) {
+
+ if (is_valid(cache_entry)) {
+ // Make sure the token count is in range
+ assert(cache_entry.Tokens >= 0);
+ assert(cache_entry.Tokens <= max_tokens());
+ assert(cache_entry.Tokens != (max_tokens() / 2));
+
+ // Make sure we have no tokens in L
+ if ((state == State:I_L) ) {
+ assert(cache_entry.Tokens == 0);
+ }
+
+ // in M and E you have all the tokens
+ if (state == State:M ) {
+ assert(cache_entry.Tokens == max_tokens());
+ }
+
+ // in NP you have no tokens
+ if (state == State:NP) {
+ assert(cache_entry.Tokens == 0);
+ }
+
+ // You have at least one token in S-like states
+ if (state == State:S ) {
+ assert(cache_entry.Tokens > 0);
+ }
+
+ // You have at least half the token in O-like states
+ if (state == State:O ) {
+ assert(cache_entry.Tokens > (max_tokens() / 2));
+ }
+
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return L2Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L2Cache_State_to_permission(state));
+ }
+ }
+
+ void removeSharer(Addr addr, NodeID id) {
+
+ if (localDirectory.isTagPresent(addr)) {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.remove(id);
+ if (dir_entry.Sharers.count() == 0) {
+ localDirectory.deallocate(addr);
+ }
+ }
+ }
+
+ bool sharersExist(Addr addr) {
+ if (localDirectory.isTagPresent(addr)) {
+ DirEntry dir_entry := getDirEntry(addr);
+ if (dir_entry.Sharers.count() > 0) {
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ else {
+ return false;
+ }
+ }
+
+ bool exclusiveExists(Addr addr) {
+ if (localDirectory.isTagPresent(addr)) {
+ DirEntry dir_entry := getDirEntry(addr);
+ if (dir_entry.exclusive) {
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ else {
+ return false;
+ }
+ }
+
+ // assumes that caller will check to make sure tag is present
+ Set getSharers(Addr addr) {
+ DirEntry dir_entry := getDirEntry(addr);
+ return dir_entry.Sharers;
+ }
+
+ void setNewWriter(Addr addr, NodeID id) {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ }
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.clear();
+ dir_entry.Sharers.add(id);
+ dir_entry.exclusive := true;
+ }
+
+ void addNewSharer(Addr addr, NodeID id) {
+ if (localDirectory.isTagPresent(addr) == false) {
+ localDirectory.allocate(addr);
+ }
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.Sharers.add(id);
+ // dir_entry.exclusive := false;
+ }
+
+ void clearExclusiveBitIfExists(Addr addr) {
+ if (localDirectory.isTagPresent(addr)) {
+ DirEntry dir_entry := getDirEntry(addr);
+ dir_entry.exclusive := false;
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
+ out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
+
+
+
+ // ** IN_PORTS **
+
+ // Persistent Network
+ in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
+ peek(persistentNetwork_in, PersistentMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
+ } else {
+ error("Unexpected message");
+ }
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ // React to the message based on the current state of the table
+ if (persistentTable.isLocked(in_msg.addr)) {
+
+ if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
+ cache_entry);
+ } else {
+ trigger(Event:Persistent_GETS, in_msg.addr, cache_entry);
+ }
+ } else {
+ trigger(Event:Persistent_GETX, in_msg.addr, cache_entry);
+ }
+ }
+ else {
+ trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry);
+ }
+ }
+ }
+ }
+
+
+ // Request Network
+ in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:Transient_GETX, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (getTokens(cache_entry) == 1) {
+ trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
+ cache_entry);
+ }
+ else {
+ trigger(Event:Transient_GETS, in_msg.addr, cache_entry);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:L1_GETX, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (getTokens(cache_entry) == 1 ||
+ getTokens(cache_entry) == (max_tokens() / 2) + 1) {
+ trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry);
+ }
+ else {
+ trigger(Event:L1_GETS, in_msg.addr, cache_entry);
+ }
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+
+ // Response Network
+ in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+
+ if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_Shared, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
+ in_msg.Type == CoherenceResponseType:WB_OWNED ||
+ in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+
+ if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
+
+ // either room is available or the block is already present
+
+ if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
+ assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry);
+ }
+ else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
+ //assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_Owned, in_msg.addr, cache_entry);
+ }
+ }
+ else {
+ trigger(Event:L2_Replacement,
+ L2cache.cacheProbe(in_msg.addr),
+ getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
+ }
+ } else if (in_msg.Type == CoherenceResponseType:INV) {
+ trigger(Event:L1_INV, in_msg.addr, cache_entry);
+ } else {
+ error("Unexpected message");
+ }
+ } else {
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
+ in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
+ in_msg.Type == CoherenceResponseType:WB_OWNED ||
+ in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
+
+ // either room is available or the block is already present
+
+ if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
+ assert(in_msg.Dirty == false);
+ assert( (getState(cache_entry, in_msg.addr) != State:NP)
+ && (getState(cache_entry, in_msg.addr) != State:I) );
+ trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
+ } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ assert(in_msg.Dirty == false);
+ trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
+ }
+ else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
+ trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
+ }
+ }
+ else {
+ trigger(Event:L2_Replacement,
+ L2cache.cacheProbe(in_msg.addr),
+ getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
+ }
+ } else if (in_msg.Type == CoherenceResponseType:INV) {
+ trigger(Event:L1_INV, in_msg.addr, cache_entry);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+ }
+
+
+ // ACTIONS
+
+ action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
+
+ peek(L1requestNetwork_in, RequestMsg) {
+
+ // if this is a retry or no local sharers, broadcast normally
+ enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.RetryNum := in_msg.RetryNum;
+
+ //
+ // If a statically shared L2 cache, then no other L2 caches can
+ // store the block
+ //
+ //out_msg.Destination.broadcast(MachineType:L2Cache);
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+ //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
+
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ } //enqueue
+ // } // if
+
+ //profile_filter_action(0);
+ } // peek
+ } //action
+
+
+ action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(c_cleanReplacement, "c", desc="Issue clean writeback") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ cache_entry.Tokens := 0;
+ }
+ }
+
+ action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+
+ if (cache_entry.Dirty) {
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ } else {
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ out_msg.Type := CoherenceResponseType:ACK_OWNER;
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := N_tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := cache_entry.Tokens - N_tokens;
+ }
+ else {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := 1;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := cache_entry.Tokens - 1;
+ }
+ }
+ }
+
+ action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
+ assert(is_valid(cache_entry));
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
+ assert(is_valid(cache_entry));
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ if (cache_entry.Tokens > 1) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens - 1;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ cache_entry.Tokens := 1;
+ }
+
+ action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := cache_entry.Tokens - 1;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := 1;
+ }
+
+ action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
+ //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ cache_entry.Tokens := 0;
+ }
+
+
+
+ action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
+ // assert(persistentTable.isLocked(address));
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
+ //assert(persistentTable.isLocked(address));
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ } else {
+ assert(in_msg.Tokens < (max_tokens() / 2));
+ out_msg.Type := CoherenceResponseType:ACK;
+ }
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+ action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
+ // assert(persistentTable.isLocked(address));
+ peek(responseNetwork_in, ResponseMsg) {
+ // FIXME, should use a 3rd vnet in some cases
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := in_msg.MessageSize;
+ }
+ }
+ }
+
+
+ action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
+ peek(responseNetwork_in, ResponseMsg) {
+ removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender));
+ }
+ }
+
+ action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
+ peek(requestNetwork_in, RequestMsg) {
+ if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) {
+ //profile_filter_action(1);
+ DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
+ in_msg.RetryNum);
+ }
+ else {
+ enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
+ out_msg.addr := in_msg.addr;
+ out_msg.Requestor := in_msg.Requestor;
+
+ //
+ // Currently assuming only one chip so all L1s are local
+ //
+ //out_msg.Destination := getLocalL1IDs(machineID);
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.remove(in_msg.Requestor);
+
+ out_msg.Type := in_msg.Type;
+ out_msg.isLocal := false;
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ out_msg.AccessMode := in_msg.AccessMode;
+ out_msg.Prefetch := in_msg.Prefetch;
+ }
+ //profile_filter_action(0);
+ }
+ }
+ }
+
+ action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens > 0);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ out_msg.Tokens := 1;
+ }
+ cache_entry.Tokens := cache_entry.Tokens - 1;
+ }
+ }
+
+ action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ out_msg.Tokens := cache_entry.Tokens;
+ }
+ cache_entry.Tokens := 0;
+ }
+ }
+
+ action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+// assert(cache_entry.Tokens == max_tokens());
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
+ //out_msg.Tokens := max_tokens();
+ out_msg.Tokens := cache_entry.Tokens;
+ }
+ cache_entry.Tokens := 0;
+ }
+ }
+
+ action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
+ persistentNetwork_in.dequeue(clockEdge());
+ }
+
+ action(m_popRequestQueue, "m", desc="Pop request queue.") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
+ L1requestNetwork_in.dequeue(clockEdge());
+ }
+
+
+ action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ assert(in_msg.Tokens != 0);
+ cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
+
+ // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
+ // may not trigger this action.
+ if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
+ cache_entry.Dirty := true;
+ }
+ }
+ }
+
+ action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
+ peek(L1requestNetwork_in, RequestMsg) {
+ if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
+ if (in_msg.Type == CoherenceRequestType:GETX) {
+ setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
+ }
+ }
+ }
+ }
+
+ action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
+ clearExclusiveBitIfExists(address);
+ }
+
+ action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
+ peek(L1requestNetwork_in, RequestMsg) {
+ if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
+ (is_valid(cache_entry))) {
+ L2cache.setMRU(address);
+ }
+ }
+ }
+
+ action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
+ assert(is_valid(cache_entry));
+ if (cache_entry.Tokens > 0) {
+ peek(L1requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ assert(cache_entry.Tokens >= 1);
+ out_msg.Tokens := cache_entry.Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ cache_entry.Tokens := 0;
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ if ((cache_entry.Dirty == false) && in_msg.Dirty) {
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cache.deallocate(address);
+ unset_cache_entry();
+ }
+
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cache.demand_misses;
+ }
+
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cache.demand_hits;
+ }
+
+ action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
+ peek(responseNetwork_in, ResponseMsg) {
+ if (in_msg.Type != CoherenceResponseType:ACK &&
+ in_msg.Type != CoherenceResponseType:WB_TOKENS) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.DataBlk == in_msg.DataBlk);
+ }
+ }
+ }
+
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
+
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
+ l_popPersistentQueue;
+ }
+
+
+ // Transitions from NP
+
+ transition(NP, {Transient_GETX, Transient_GETS}) {
+ // forward message to local sharers
+ r_clearExclusive;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+
+ transition(NP, {L1_GETS, L1_GETX}) {
+ a_broadcastLocalRequest;
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
+ bb_bounceResponse;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_Shared_Data, S) {
+ vv_allocateL2CacheBlock;
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_Tokens, I) {
+ vv_allocateL2CacheBlock;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_All_Tokens, M) {
+ vv_allocateL2CacheBlock;
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(NP, Writeback_Owned, O) {
+ vv_allocateL2CacheBlock;
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+
+ transition(NP,
+ {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
+ I_L) {
+ l_popPersistentQueue;
+ }
+
+ // Transitions from Idle
+
+ transition(I, {L1_GETS, L1_GETS_Last_Token}) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(I, L1_GETX) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(I, L2_Replacement) {
+ c_cleanReplacement; // Only needed in some cases
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
+ r_clearExclusive;
+ t_sendAckWithCollectedTokens;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(I,
+ {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
+ I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(I, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Shared, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Writeback_Shared_Data, S) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(I, Writeback_Tokens) {
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_Owner, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(I, Writeback_Owned, O) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(I, Data_All_Tokens, M) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+
+ transition(I, Writeback_All_Tokens, M) {
+ u_writeDataToCache;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ // Transitions from Shared
+
+ transition(S, L2_Replacement, I) {
+ c_cleanReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(S, Transient_GETX, I) {
+ r_clearExclusive;
+ t_sendAckWithCollectedTokens;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
+ j_forwardTransientRequestToLocalSharers;
+ r_clearExclusive;
+ m_popRequestQueue;
+ }
+
+ transition(S, Persistent_GETX, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
+ f_sendAckWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(S, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_Tokens) {
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_Shared_Data) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+
+ transition(S, Data_Owner, O) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_Owned, O) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(S, Writeback_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S, L1_GETX, I) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+
+ transition(S, L1_GETS) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(S, L1_GETS_Last_Token, I) {
+
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ // Transitions from Owned
+
+ transition(O, L2_Replacement, I) {
+ cc_dirtyReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(O, Transient_GETX, I) {
+ r_clearExclusive;
+ dd_sendDataWithAllTokens;
+ j_forwardTransientRequestToLocalSharers;
+ m_popRequestQueue;
+ }
+
+ transition(O, Persistent_GETX, I_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Persistent_GETS, S_L) {
+ ff_sendDataWithAllButOneTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Persistent_GETS_Last_Token, I_L) {
+ fa_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(O, Transient_GETS) {
+ // send multiple tokens
+ r_clearExclusive;
+ d_sendDataWithTokens;
+ m_popRequestQueue;
+ }
+
+ transition(O, Transient_GETS_Last_Token) {
+ // WAIT FOR IT TO GO PERSISTENT
+ r_clearExclusive;
+ m_popRequestQueue;
+ }
+
+ transition(O, Ack) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Ack_All_Tokens, M) {
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_Shared) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+
+ transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(O, Data_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ n_popResponseQueue;
+ }
+
+ transition(O, Writeback_All_Tokens, M) {
+ w_assertIncomingDataAndCacheDataMatch;
+ q_updateTokensFromResponse;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(O, L1_GETS) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(O, L1_GETS_Last_Token, I) {
+ k_dataOwnerFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(O, L1_GETX, I) {
+ a_broadcastLocalRequest;
+ k_dataAndAllTokensFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ // Transitions from M
+
+ transition(M, L2_Replacement, I) {
+ cc_dirtyReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ // MRM_DEBUG: Give up all tokens even for GETS? ???
+ transition(M, {Transient_GETX, Transient_GETS}, I) {
+ r_clearExclusive;
+ dd_sendDataWithAllTokens;
+ m_popRequestQueue;
+ }
+
+ transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
+ ee_sendDataWithAllTokens;
+ l_popPersistentQueue;
+ }
+
+
+ transition(M, L1_GETS, O) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(M, L1_GETX, I) {
+ k_dataAndAllTokensFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+
+ //Transitions from locked states
+
+ transition({I_L, S_L}, Ack) {
+ gg_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
+ gg_bounceResponseToStarver;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
+ gg_bounceWBSharedToStarver;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
+ gg_bounceWBOwnedToStarver;
+ h_updateFilterFromL1HintOrWB;
+ n_popResponseQueue;
+ }
+
+ transition(S_L, L2_Replacement, I) {
+ c_cleanReplacement;
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I_L, L2_Replacement, I) {
+ rr_deallocateL2CacheBlock;
+ }
+
+ transition(I_L, Own_Lock_or_Unlock, I) {
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, Own_Lock_or_Unlock, S) {
+ l_popPersistentQueue;
+ }
+
+ transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
+ r_clearExclusive;
+ m_popRequestQueue;
+ }
+
+ transition(I_L, {L1_GETX, L1_GETS}) {
+ a_broadcastLocalRequest;
+ r_markNewSharer;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, L1_GETX, I_L) {
+ a_broadcastLocalRequest;
+ tt_sendLocalAckWithCollectedTokens;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileMiss;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, L1_GETS) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, L1_GETS_Last_Token, I_L) {
+ k_dataFromL2CacheToL1Requestor;
+ r_markNewSharer;
+ r_setMRU;
+ uu_profileHit;
+ o_popL1RequestQueue;
+ }
+
+ transition(S_L, Persistent_GETX, I_L) {
+ e_sendAckWithCollectedTokens;
+ l_popPersistentQueue;
+ }
+
+ transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
+ l_popPersistentQueue;
+ }
+
+ transition(I_L, {Persistent_GETX, Persistent_GETS}) {
+ l_popPersistentQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+machine(MachineType:Directory, "Token protocol")
+ : DirectoryMemory * directory;
+ int l2_select_num_bits;
+ Cycles directory_latency := 5;
+ bool distributed_persistent := "True";
+ Cycles fixed_timeout_latency := 100;
+ Cycles reissue_wakeup_latency := 10;
+ Cycles to_memory_controller_latency := 1;
+
+ // Message Queues from dir to other controllers / network
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
+ vnet_type="response";
+
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ vnet_type="response";
+
+ MessageBuffer * persistentFromDir, network="To", virtual_network="3",
+ vnet_type="persistent";
+
+ MessageBuffer * requestFromDir, network="To", virtual_network="1",
+ vnet_type="request";
+
+ // Message Queues to dir from other controllers / network
+ MessageBuffer * responseToDir, network="From", virtual_network="4",
+ vnet_type="response";
+
+ MessageBuffer * persistentToDir, network="From", virtual_network="3",
+ vnet_type="persistent";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ vnet_type="request";
+
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ vnet_type="request";
+
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_O") {
+ // Base states
+ O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
+ NO, AccessPermission:Maybe_Stale, desc="Not Owner";
+ L, AccessPermission:Busy, desc="Locked";
+
+ // Memory wait states - can block all messages including persistent requests
+ O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
+ L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
+ L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
+ DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
+ DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
+ NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
+ O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
+ O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
+
+ // DMA request transient states - must respond to persistent requests
+ O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
+ NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
+ NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
+
+ // DMA request in progress - competing with a CPU persistent request
+ DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
+ DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
+
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ Lockdown, desc="A lockdown request arrives";
+ Unlockdown, desc="An un-lockdown request arrives";
+ Own_Lock_or_Unlock, desc="own lock or unlock";
+ Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
+ Data_Owner, desc="Data arrive";
+ Data_All_Tokens, desc="Data and all tokens";
+ Ack_Owner, desc="Owner token arrived without data because it was clean";
+ Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
+ Tokens, desc="Tokens arrive";
+ Ack_All_Tokens, desc="All_Tokens arrive";
+ Request_Timeout, desc="A DMA request has timed out";
+
+ // Memory Controller
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+
+ // DMA requests
+ DMA_READ, desc="A DMA Read memory request";
+ DMA_WRITE, desc="A DMA Write memory request";
+ DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
+
+ // The following state is provided to allow for bandwidth
+ // efficient directory-like operation. However all of this state
+ // is 'soft state' that does not need to be correct (as long as
+ // you're eventually willing to resort to broadcast.)
+
+ Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
+ Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
+ }
+
+ structure(PersistentTable, external="yes") {
+ void persistentRequestLock(Addr, MachineID, AccessType);
+ void persistentRequestUnlock(Addr, MachineID);
+ bool okToIssueStarving(Addr, MachineID);
+ MachineID findSmallest(Addr);
+ AccessType typeOfSmallest(Addr);
+ void markEntries(Addr);
+ bool isLocked(Addr);
+ int countStarvingForAddress(Addr);
+ int countReadStarvingForAddress(Addr);
+ }
+
+ // TBE entries for DMA requests
+ structure(TBE, desc="TBE entries for outstanding DMA requests") {
+ Addr PhysicalAddress, desc="physical address";
+ State TBEState, desc="Transient State";
+ DataBlock DataBlk, desc="Current view of the associated address range";
+ int Len, desc="...";
+ MachineID DmaRequestor, desc="DMA requestor";
+ bool WentPersistent, desc="Did the DMA request require a persistent request";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ // ** OBJECTS **
+
+ PersistentTable persistentTable;
+ TimerTable reissueTimerTable;
+
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ bool starving, default="false";
+ int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+
+ Tick clockEdge();
+ Tick clockEdge(Cycles c);
+ Tick cyclesToTicks(Cycles c);
+ void set_tbe(TBE b);
+ void unset_tbe();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+
+ if (is_valid(dir_entry)) {
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ State getState(TBE tbe, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else {
+ return getDirectoryEntry(addr).DirectoryState;
+ }
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ getDirectoryEntry(addr).DirectoryState := state;
+
+ if (state == State:L || state == State:DW_L || state == State:DR_L) {
+ assert(getDirectoryEntry(addr).Tokens == 0);
+ }
+
+ // We have one or zero owners
+ assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
+
+ // Make sure the token count is in range
+ assert(getDirectoryEntry(addr).Tokens >= 0);
+ assert(getDirectoryEntry(addr).Tokens <= max_tokens());
+
+ if (state == State:O || state == State:O_W || state == State:O_DW) {
+ assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
+ // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ return Directory_State_to_permission(tbe.TBEState);
+ }
+
+ if (directory.isPresent(addr)) {
+ DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+
+ bool okToIssueStarving(Addr addr, MachineID machinID) {
+ return persistentTable.okToIssueStarving(addr, machineID);
+ }
+
+ void markPersistentEntries(Addr addr) {
+ persistentTable.markEntries(addr);
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ // ** OUT_PORTS **
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
+ out_port(requestNetwork_out, RequestMsg, requestFromDir);
+ out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
+
+ // ** IN_PORTS **
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Reissue Timer
+ in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
+ }
+ }
+
+ in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ if (responseNetwork_in.isReady(clockEdge())) {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
+ if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
+ (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
+ trigger(Event:Data_All_Tokens, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
+ trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack_All_Tokens, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ } else {
+ if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
+ trigger(Event:Data_Owner, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
+ (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
+ trigger(Event:Tokens, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
+ trigger(Event:Ack_Owner, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else {
+ DPRINTF(RubySlicc, "%s\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+ }
+
+ in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
+ peek(persistentNetwork_in, PersistentMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+
+ if (distributed_persistent) {
+ // Apply the lockdown or unlockdown message to the table
+ if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
+ } else {
+ error("Invalid message");
+ }
+
+ // React to the message based on the current state of the table
+ if (persistentTable.isLocked(in_msg.addr)) {
+ if (persistentTable.findSmallest(in_msg.addr) == machineID) {
+ if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
+ trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else {
+ trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
+ TBEs[in_msg.addr]);
+ }
+ } else {
+ // locked
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
+ }
+ } else {
+ // unlocked
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
+ }
+ }
+ else {
+ if (persistentTable.findSmallest(in_msg.addr) == machineID) {
+ if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
+ trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
+ TBEs[in_msg.addr]);
+ } else {
+ trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
+ TBEs[in_msg.addr]);
+ }
+ } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
+ // locked
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
+ // locked
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
+ // unlocked
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+ }
+
+ in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ if (requestNetwork_in.isReady(clockEdge())) {
+ peek(requestNetwork_in, RequestMsg) {
+ assert(in_msg.Destination.isElement(machineID));
+ if (in_msg.Type == CoherenceRequestType:GETS) {
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
+ } else if (in_msg.Type == CoherenceRequestType:GETX) {
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ if (in_msg.Type == DMARequestType:READ) {
+ trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == DMARequestType:WRITE) {
+ if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
+ trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
+ TBEs[in_msg.LineAddress]);
+ } else {
+ trigger(Event:DMA_WRITE, in_msg.LineAddress,
+ TBEs[in_msg.LineAddress]);
+ }
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(a_sendTokens, "a", desc="Send tokens to requestor") {
+ // Only send a message if we have tokens to send
+ if (getDirectoryEntry(address).Tokens > 0) {
+ peek(requestNetwork_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ getDirectoryEntry(address).Tokens := 0;
+ }
+ }
+
+ action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
+ if (okToIssueStarving(address, machineID) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ out_msg.Prefetch := PrefetchBit:No;
+ out_msg.AccessMode := RubyAccessMode:Supervisor;
+ }
+ markPersistentEntries(address);
+ starving := true;
+
+ tbe.WentPersistent := true;
+
+ // Do not schedule a wakeup, a persistent requests will always complete
+ } else {
+
+ // We'd like to issue a persistent request, but are not allowed
+ // to issue a P.R. right now. This, we do not increment the
+ // IssueCount.
+
+ // Set a wakeup timer
+ reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
+ }
+ }
+
+ action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ //
+ // Assser that we only send message if we don't already have all the tokens
+ //
+ assert(getDirectoryEntry(address).Tokens != max_tokens());
+ enqueue(requestNetwork_out, RequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+
+ //
+ // Since only one chip, assuming all L1 caches are local
+ //
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.RetryNum := 0;
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ out_msg.Prefetch := PrefetchBit:No;
+ out_msg.AccessMode := RubyAccessMode:Supervisor;
+ }
+ }
+ }
+
+ action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
+ if (okToIssueStarving(address, machineID) && (starving == false)) {
+ enqueue(persistentNetwork_out, PersistentMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ out_msg.Prefetch := PrefetchBit:No;
+ out_msg.AccessMode := RubyAccessMode:Supervisor;
+ }
+ markPersistentEntries(address);
+ starving := true;
+
+ tbe.WentPersistent := true;
+
+ // Do not schedule a wakeup, a persistent requests will always complete
+ } else {
+
+ // We'd like to issue a persistent request, but are not allowed
+ // to issue a P.R. right now. This, we do not increment the
+ // IssueCount.
+
+ // Set a wakeup timer
+ reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
+ }
+ }
+
+ action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(requestNetwork_out, RequestMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+
+ //
+ // Since only one chip, assuming all L1 caches are local
+ //
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.RetryNum := 0;
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ out_msg.Prefetch := PrefetchBit:No;
+ out_msg.AccessMode := RubyAccessMode:Supervisor;
+ }
+ }
+ }
+
+ action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
+ // Only send a message if we have tokens to send
+ if (getDirectoryEntry(address).Tokens > 0) {
+ enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := getDirectoryEntry(address).Tokens;
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ getDirectoryEntry(address).Tokens := 0;
+ }
+ }
+
+ action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ assert(getDirectoryEntry(address).Tokens > 0);
+ out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ getDirectoryEntry(address).Tokens := 0;
+ }
+
+ action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getDirectoryEntry(address).Tokens > 0);
+ out_msg.Tokens := getDirectoryEntry(address).Tokens;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ getDirectoryEntry(address).Tokens := 0;
+ }
+
+ action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ assert(getDirectoryEntry(address).Tokens > 0);
+ out_msg.Tokens := getDirectoryEntry(address).Tokens;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := false;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ getDirectoryEntry(address).Tokens := 0;
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestNetwork_in, RequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
+ }
+ }
+
+ action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
+ queueMemoryRead(persistentTable.findSmallest(address), address,
+ to_memory_controller_latency);
+ }
+
+ action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
+ }
+ }
+
+ action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+ action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
+ queueMemoryWritePartial(tbe.DmaRequestor, address,
+ to_memory_controller_latency, tbe.DataBlk,
+ tbe.Len);
+ }
+
+ action(lr_queueMemoryDmaReadWriteback, "lr",
+ desc="Write DMA data from read to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+ action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.PhysicalAddress := in_msg.PhysicalAddress;
+ tbe.Len := in_msg.Len;
+ tbe.DmaRequestor := in_msg.Requestor;
+ tbe.WentPersistent := false;
+ }
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+
+ if (tbe.WentPersistent) {
+ assert(starving);
+
+ enqueue(persistentNetwork_out, PersistentMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+
+ //
+ // Currently the configuration system limits the system to only one
+ // chip. Therefore, if we assume one shared L2 cache, then only one
+ // pertinent L2 cache exist.
+ //
+ //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
+
+ out_msg.Destination.add(mapAddressToRange(address,
+ MachineType:L2Cache, l2_select_low_bit,
+ l2_select_num_bits, intToID(0)));
+
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Persistent_Control;
+ }
+ starving := false;
+ }
+
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
+ peek(responseNetwork_in, ResponseMsg) {
+ DataBlock DataBlk := tbe.DataBlk;
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.DataBlk.copyPartial(DataBlk, getOffset(tbe.PhysicalAddress),
+ tbe.Len);
+ }
+ }
+
+ action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Tokens >= 1);
+ getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
+ }
+ }
+
+ action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
+ assert(getDirectoryEntry(address).Tokens == max_tokens());
+ }
+
+ action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
+ requestNetwork_in.dequeue(clockEdge());
+ }
+
+ action(z_recycleRequest, "z", desc="Recycle the request queue") {
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
+ responseNetwork_in.dequeue(clockEdge());
+ }
+
+ action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
+ persistentNetwork_in.dequeue(clockEdge());
+ }
+
+ action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.MessageSize := in_msg.MessageSize;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Dirty := in_msg.Dirty;
+ }
+ }
+ }
+
+ action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
+ //
+ // currently only support a fixed timeout latency
+ //
+ if (reissueTimerTable.isSet(address)) {
+ reissueTimerTable.unset(address);
+ reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
+ }
+ }
+
+ action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
+ //
+ // currently only support a fixed timeout latency
+ //
+ reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
+ }
+
+ action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
+ if (reissueTimerTable.isSet(address)) {
+ reissueTimerTable.unset(address);
+ }
+ }
+
+ action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
+ peek(responseNetwork_in, ResponseMsg) {
+ assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+
+ // Bounce the message, but "re-associate" the data and the owner
+ // token. In essence we're converting an ACK_OWNER message to a
+ // DATA_OWNER message, keeping the number of tokens the same.
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_OWNER;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(persistentTable.findSmallest(address));
+ out_msg.Tokens := in_msg.Tokens;
+ out_msg.Dirty := in_msg.Dirty;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:ACK;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ //
+ // we send the entire data block and rely on the dma controller to
+ // split it up if need be
+ //
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
+ peek(responseNetwork_in, ResponseMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ //
+ // we send the entire data block and rely on the dma controller to
+ // split it up if need be
+ //
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ // TRANSITIONS
+
+ //
+ // Trans. from base state O
+ // the directory has valid data
+ //
+ transition(O, GETX, NO_W) {
+ qf_queueMemoryFetchRequest;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, DMA_WRITE, O_DW) {
+ vd_allocateDmaRequestInTBE;
+ bw_broadcastWrite;
+ st_scheduleTimeout;
+ p_popDmaRequestQueue;
+ }
+
+ transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
+ vd_allocateDmaRequestInTBE;
+ ld_queueMemoryDmaWriteFromTbe;
+ p_popDmaRequestQueue;
+ }
+
+ transition(O, GETS, NO_W) {
+ qf_queueMemoryFetchRequest;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(O, DMA_READ, O_DR_W) {
+ vd_allocateDmaRequestInTBE;
+ fd_memoryDma;
+ st_scheduleTimeout;
+ p_popDmaRequestQueue;
+ }
+
+ transition(O, Lockdown, L_O_W) {
+ qp_queueMemoryForPersistent;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(O, {Tokens, Ack_All_Tokens}) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(O, {Data_Owner, Data_All_Tokens}) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition({O, NO}, Unlockdown) {
+ l_popIncomingPersistentQueue;
+ }
+
+ //
+ // transitioning to Owner, waiting for memory before DMA ack
+ // All other events should recycle/stall
+ //
+ transition(O_DR_W, Memory_Data, O) {
+ dm_sendMemoryDataToDma;
+ ut_unsetReissueTimer;
+ s_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ //
+ // issued GETX for DMA write, waiting for all tokens
+ //
+ transition(O_DW, Request_Timeout) {
+ ut_unsetReissueTimer;
+ px_tryIssuingPersistentGETXRequest;
+ }
+
+ transition(O_DW, Tokens) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(O_DW, Data_Owner) {
+ f_incrementTokens;
+ rd_recordDataInTbe;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(O_DW, Ack_Owner) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(O_DW, Lockdown, DW_L) {
+ de_sendTbeDataToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
+ f_incrementTokens;
+ rd_recordDataInTbe;
+ ld_queueMemoryDmaWriteFromTbe;
+ ut_unsetReissueTimer;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(O_DW, Ack_All_Tokens, O_DW_W) {
+ f_incrementTokens;
+ ld_queueMemoryDmaWriteFromTbe;
+ ut_unsetReissueTimer;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
+ f_incrementTokens;
+ ld_queueMemoryDmaWriteFromTbe;
+ ut_unsetReissueTimer;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(O_DW_W, Memory_Ack, O) {
+ da_sendDmaAck;
+ s_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ //
+ // Trans. from NO
+ // The direcotry does not have valid data, but may have some tokens
+ //
+ transition(NO, GETX) {
+ a_sendTokens;
+ j_popIncomingRequestQueue;
+ }
+
+ transition(NO, DMA_WRITE, NO_DW) {
+ vd_allocateDmaRequestInTBE;
+ bw_broadcastWrite;
+ st_scheduleTimeout;
+ p_popDmaRequestQueue;
+ }
+
+ transition(NO, GETS) {
+ j_popIncomingRequestQueue;
+ }
+
+ transition(NO, DMA_READ, NO_DR) {
+ vd_allocateDmaRequestInTBE;
+ br_broadcastRead;
+ st_scheduleTimeout;
+ p_popDmaRequestQueue;
+ }
+
+ transition(NO, Lockdown, L) {
+ aa_sendTokensToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
+ f_incrementTokens;
+ lq_queueMemoryWbRequest;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(NO, Tokens) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(NO_W, Memory_Data, NO) {
+ d_sendMemoryDataWithAllTokens;
+ l_popMemQueue;
+ }
+
+ // Trans. from NO_DW
+ transition(NO_DW, Request_Timeout) {
+ ut_unsetReissueTimer;
+ px_tryIssuingPersistentGETXRequest;
+ }
+
+ transition(NO_DW, Lockdown, DW_L) {
+ aa_sendTokensToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
+ // Note: NO_DW should not receive the action Ack_All_Tokens because the
+ // directory does not have valid data
+
+ transition(NO_DW, Data_Owner, O_DW) {
+ f_incrementTokens;
+ rd_recordDataInTbe;
+ k_popIncomingResponseQueue;
+ }
+
+ transition({NO_DW, NO_DR}, Tokens) {
+ f_incrementTokens;
+ k_popIncomingResponseQueue;
+ }
+
+ // Trans. from NO_DR
+ transition(NO_DR, Request_Timeout) {
+ ut_unsetReissueTimer;
+ ps_tryIssuingPersistentGETSRequest;
+ }
+
+ transition(NO_DR, Lockdown, DR_L) {
+ aa_sendTokensToStarver;
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
+ f_incrementTokens;
+ dd_sendDmaData;
+ lr_queueMemoryDmaReadWriteback;
+ ut_unsetReissueTimer;
+ s_deallocateTBE;
+ k_popIncomingResponseQueue;
+ }
+
+ // Trans. from L
+ transition({L, DW_L, DR_L}, {GETX, GETS}) {
+ j_popIncomingRequestQueue;
+ }
+
+ transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
+ l_popIncomingPersistentQueue;
+ }
+
+ //
+ // Received data for lockdown blocks
+ // For blocks with outstanding dma requests to them
+ // ...we could change this to write the data to memory and send it cleanly
+ // ...we could also proactively complete our DMA requests
+ // However, to keep my mind from spinning out-of-control, we won't for now :)
+ //
+ transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
+ r_bounceResponse;
+ k_popIncomingResponseQueue;
+ }
+
+ transition({DW_L, DR_L, L}, Tokens) {
+ r_bounceResponse;
+ k_popIncomingResponseQueue;
+ }
+
+ transition({DW_L, DR_L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
+ bd_bounceDatalessOwnerToken;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(L, {Ack_Owner_All_Tokens, Ack_Owner}, L_O_W) {
+ f_incrementTokens;
+ qp_queueMemoryForPersistent;
+ k_popIncomingResponseQueue;
+ }
+
+ transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(L, Own_Lock_or_Unlock_Tokens, O) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition({L_NO_W, L_O_W}, Memory_Data, L) {
+ dd_sendMemDataToStarver;
+ l_popMemQueue;
+ }
+
+ transition(L_O_W, Memory_Ack) {
+ qp_queueMemoryForPersistent;
+ l_popMemQueue;
+ }
+
+ transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(DR_L_W, Memory_Data, DR_L) {
+ dd_sendMemDataToStarver;
+ l_popMemQueue;
+ }
+
+ transition(DW_L_W, Memory_Ack, L) {
+ aat_assertAllTokens;
+ da_sendDmaAck;
+ s_deallocateTBE;
+ dd_sendMemDataToStarver;
+ l_popMemQueue;
+ }
+
+ transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
+ ut_unsetReissueTimer;
+ px_tryIssuingPersistentGETXRequest;
+ }
+
+ transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(DR_L, Request_Timeout) {
+ ut_unsetReissueTimer;
+ ps_tryIssuingPersistentGETSRequest;
+ }
+
+ //
+ // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
+ // presistent request is issued and resolve before memory returns with data
+ //
+ transition(O_W, {Memory_Ack, Memory_Data}, O) {
+ l_popMemQueue;
+ }
+
+ transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
+ l_popIncomingPersistentQueue;
+ }
+
+ // Blocked states
+ transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
+ z_recycleRequest;
+ }
+
+ transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
+ y_recycleDmaRequestQueue;
+ }
+
+ transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
+ kz_recycleResponse;
+ }
+
+ //
+ // If we receive a request timeout while waiting for memory, it is likely that
+ // the request will be satisfied and issuing a presistent request will do us
+ // no good. Just wait.
+ //
+ transition({O_DW_W, O_DR_W}, Request_Timeout) {
+ rs_resetScheduleTimeout;
+ }
+
+ transition(NO_W, Lockdown, L_NO_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(O_W, Lockdown, L_O_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(O_DR_W, Lockdown, DR_L_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition(O_DW_W, Lockdown, DW_L_W) {
+ l_popIncomingPersistentQueue;
+ }
+
+ transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
+ l_popIncomingPersistentQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+machine(MachineType:DMA, "DMA Controller")
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
+
+ // Messsage Queues
+ MessageBuffer * responseFromDir, network="From", virtual_network="5",
+ vnet_type="response";
+ MessageBuffer * reqToDirectory, network="To", virtual_network="0",
+ vnet_type="request";
+
+ MessageBuffer * mandatoryQueue;
+{
+ state_declaration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, AccessPermission:Invalid, desc="Ready to accept a new request";
+ BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
+ BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ Ack, desc="DMA write to memory completed";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+
+ TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ State getState(TBE tbe, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else {
+ return State:READY;
+ }
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("DMA does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("DMA does not support functional write.");
+ }
+
+ out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
+ peek( dmaResponseQueue_in, DMAResponseMsg) {
+ if (in_msg.Type == DMAResponseType:ACK) {
+ trigger(Event:Ack, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == DMAResponseType:DATA) {
+ trigger(Event:Data, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.LineAddress := in_msg.LineAddress;
+ out_msg.Type := DMARequestType:READ;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg, request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.LineAddress := in_msg.LineAddress;
+ out_msg.Type := DMARequestType:WRITE;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback(address);
+ }
+
+ action(d_dataCallback, "d", desc="Write data to dma sequencer") {
+ dma_sequencer.dataCallback(tbe.DataBlk, address);
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ assert(is_valid(tbe));
+ peek(dmaResponseQueue_in, DMAResponseMsg) {
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue(clockEdge());
+ }
+
+ action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
+ stall_and_wait(dmaRequestQueue_in, address);
+ }
+
+ action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
+ }
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ v_allocateTBE;
+ s_sendReadRequest;
+ p_popRequestQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ v_allocateTBE;
+ s_sendWriteRequest;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Data, READY) {
+ t_updateTBEData;
+ d_dataCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition(BUSY_WR, Ack, READY) {
+ a_ackCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
+ zz_stallAndWaitRequestQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+}
+
+// PersistentType
+enumeration(PersistentRequestType, desc="...") {
+ GETX_PERSISTENT, desc="...";
+ GETS_PERSISTENT, desc="...";
+ DEACTIVATE_PERSISTENT,desc="...";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ DATA_OWNER, desc="Data";
+ ACK_OWNER, desc="data-less owner token";
+ DATA_SHARED, desc="Data";
+ ACK, desc="ACKnowledgment";
+ WB_TOKENS, desc="L1 to L2 writeback";
+ WB_SHARED_DATA, desc="L1 to L2 writeback with data";
+ WB_OWNED, desc="L1 to L2 writeback with data";
+ INV, desc="L1 informing L2 of loss of all tokens";
+}
+
+// PersistentMsg
+structure(PersistentMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ PersistentRequestType Type, desc="Type of starvation request";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Destination set";
+ MessageSizeType MessageSize, desc="size category of the message";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+
+ bool functionalRead(Packet *pkt) {
+ // No data in persistent messages
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No data in persistent messages
+ return false;
+ }
+}
+
+// RequestMsg
+structure(RequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Multicast destination mask";
+ bool isLocal, desc="Is this request from a local L1";
+ int RetryNum, desc="retry sequence number";
+ MessageSizeType MessageSize, desc="size category of the message";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+
+ bool functionalRead(Packet *pkt) {
+ // No data in request messages
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No data in request messages
+ return false;
+ }
+}
+
+// ResponseMsg
+structure(ResponseMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ NetDest Destination, desc="Node to whom the data is sent";
+ int Tokens, desc="Number of tokens being transfered for this line";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ // No check being carried out on the message type. Would be added later.
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // No check required since all messages are written.
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
+ READ, desc="Memory Read";
+ WRITE, desc="Memory Write";
+ NULL, desc="Invalid";
+}
+
+enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
+ DATA, desc="DATA read";
+ ACK, desc="ACK write";
+ NULL, desc="Invalid";
+}
+
+structure(DMARequestMsg, desc="...", interface="Message") {
+ DMARequestType Type, desc="Request type (read/write)";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ Addr LineAddress, desc="Line address for this request";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ int Len, desc="The length of the request";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(LineAddress, DataBlk, pkt);
+ }
+}
+
+structure(DMAResponseMsg, desc="...", interface="Message") {
+ DMAResponseType Type, desc="Response type (DATA/ACK)";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ Addr LineAddress, desc="Line address for this request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(LineAddress, DataBlk, pkt);
+ }
+}
--- /dev/null
+protocol "MOESI_CMP_token";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_CMP_token-msg.sm";
+include "MOESI_CMP_token-L1cache.sm";
+include "MOESI_CMP_token-L2cache.sm";
+include "MOESI_CMP_token-dir.sm";
+include "MOESI_CMP_token-dma.sm";
--- /dev/null
+/*
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * AMD's contributions to the MOESI hammer protocol do not constitute an
+ * endorsement of its similarity to any AMD products.
+ *
+ * Authors: Milo Martin
+ * Brad Beckmann
+ */
+
+machine(MachineType:L1Cache, "AMD Hammer-like protocol")
+ : Sequencer * sequencer;
+ CacheMemory * L1Icache;
+ CacheMemory * L1Dcache;
+ CacheMemory * L2cache;
+ Cycles cache_response_latency := 10;
+ Cycles issue_latency := 2;
+ Cycles l2_cache_hit_latency := 10;
+ bool no_mig_atomic := "True";
+ bool send_evictions;
+
+ // NETWORK BUFFERS
+ MessageBuffer * requestFromCache, network="To", virtual_network="2",
+ vnet_type="request";
+ MessageBuffer * responseFromCache, network="To", virtual_network="4",
+ vnet_type="response";
+ MessageBuffer * unblockFromCache, network="To", virtual_network="5",
+ vnet_type="unblock";
+
+ MessageBuffer * forwardToCache, network="From", virtual_network="3",
+ vnet_type="forward";
+ MessageBuffer * responseToCache, network="From", virtual_network="4",
+ vnet_type="response";
+
+ MessageBuffer * mandatoryQueue;
+
+ MessageBuffer * triggerQueue;
+{
+ // STATES
+ state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
+ // Base states
+ I, AccessPermission:Invalid, desc="Idle";
+ S, AccessPermission:Read_Only, desc="Shared";
+ O, AccessPermission:Read_Only, desc="Owned";
+ M, AccessPermission:Read_Only, desc="Modified (dirty)";
+ MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
+
+ // Base states, locked and ready to service the mandatory queue
+ IR, AccessPermission:Invalid, desc="Idle";
+ SR, AccessPermission:Read_Only, desc="Shared";
+ OR, AccessPermission:Read_Only, desc="Owned";
+ MR, AccessPermission:Read_Only, desc="Modified (dirty)";
+ MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
+
+ // Transient States
+ IM, AccessPermission:Busy, "IM", desc="Issued GetX";
+ SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
+ OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
+ ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
+ M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
+ MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
+ IS, AccessPermission:Busy, "IS", desc="Issued GetS";
+ SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
+ OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
+ MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
+ II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
+ ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
+ OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
+ MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
+ MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
+
+ //Transition States Related to Flushing
+ MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
+ MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
+ IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
+ ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
+ SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
+ OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
+ MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
+ }
+
+ // EVENTS
+ enumeration(Event, desc="Cache events") {
+ Load, desc="Load request from the processor";
+ Ifetch, desc="I-fetch request from the processor";
+ Store, desc="Store request from the processor";
+ L2_Replacement, desc="L2 Replacement";
+ L1_to_L2, desc="L1 to L2 transfer";
+ Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
+ Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
+ Complete_L2_to_L1, desc="L2 to L1 transfer completed";
+
+ // Requests
+ Other_GETX, desc="A GetX from another processor";
+ Other_GETS, desc="A GetS from another processor";
+ Merged_GETS, desc="A Merged GetS from another processor";
+ Other_GETS_No_Mig, desc="A GetS from another processor";
+ NC_DMA_GETS, desc="special GetS when only DMA exists";
+ Invalidate, desc="Invalidate block";
+
+ // Responses
+ Ack, desc="Received an ack message";
+ Shared_Ack, desc="Received an ack message, responder has a shared copy";
+ Data, desc="Received a data message";
+ Shared_Data, desc="Received a data message, responder has a shared copy";
+ Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
+
+ Writeback_Ack, desc="Writeback O.K. from directory";
+ Writeback_Nack, desc="Writeback not O.K. from directory";
+
+ // Triggers
+ All_acks, desc="Received all required data and message acks";
+ All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
+
+ // For Flush
+ Flush_line, desc="flush the cache line from all caches";
+ Block_Ack, desc="the directory is blocked and ready for the flush";
+ }
+
+ // STRUCTURE DEFINITIONS
+ // CacheEntry
+ structure(Entry, desc="...", interface="AbstractCacheEntry") {
+ State CacheState, desc="cache state";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ DataBlock DataBlk, desc="data for the block";
+ bool FromL2, default="false", desc="block just moved from L2";
+ bool AtomicAccessed, default="false", desc="block just moved from L2";
+ }
+
+ // TBE fields
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
+ bool Sharers, desc="On a GetS, did we find any other sharers in the system";
+ bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
+ MachineID LastResponder, desc="last machine to send a response for this request";
+ MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
+
+ Cycles InitialRequestTime, default="Cycles(0)",
+ desc="time the initial requests was sent from the L1Cache";
+ Cycles ForwardRequestTime, default="Cycles(0)",
+ desc="time the dir forwarded the request";
+ Cycles FirstResponseTime, default="Cycles(0)",
+ desc="the time the first response was received";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ Entry getCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
+ if(is_valid(L2cache_entry)) {
+ return L2cache_entry;
+ }
+
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
+ if(is_valid(L1Dcache_entry)) {
+ return L1Dcache_entry;
+ }
+
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
+ return L1Icache_entry;
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ testAndRead(addr, cache_entry.DataBlk, pkt);
+ } else {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ error("Missing data block");
+ }
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, cache_entry.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ TBE tbe := TBEs[addr];
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ return num_functional_writes;
+ }
+
+ Entry getL2CacheEntry(Addr address), return_by_pointer="yes" {
+ Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
+ return L2cache_entry;
+ }
+
+ Entry getL1DCacheEntry(Addr address), return_by_pointer="yes" {
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
+ return L1Dcache_entry;
+ }
+
+ Entry getL1ICacheEntry(Addr address), return_by_pointer="yes" {
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
+ return L1Icache_entry;
+ }
+
+ State getState(TBE tbe, Entry cache_entry, Addr addr) {
+ if(is_valid(tbe)) {
+ return tbe.TBEState;
+ } else if (is_valid(cache_entry)) {
+ return cache_entry.CacheState;
+ }
+ return State:I;
+ }
+
+ void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
+ assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
+ assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
+ assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
+
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+
+ if (is_valid(cache_entry)) {
+ cache_entry.CacheState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ return L1Cache_State_to_permission(tbe.TBEState);
+ }
+
+ Entry cache_entry := getCacheEntry(addr);
+ if(is_valid(cache_entry)) {
+ return L1Cache_State_to_permission(cache_entry.CacheState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Entry cache_entry, Addr addr, State state) {
+ if (is_valid(cache_entry)) {
+ cache_entry.changePermission(L1Cache_State_to_permission(state));
+ }
+ }
+
+ Event mandatory_request_type_to_event(RubyRequestType type) {
+ if (type == RubyRequestType:LD) {
+ return Event:Load;
+ } else if (type == RubyRequestType:IFETCH) {
+ return Event:Ifetch;
+ } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
+ return Event:Store;
+ } else if ((type == RubyRequestType:FLUSH)) {
+ return Event:Flush_line;
+ } else {
+ error("Invalid RubyRequestType");
+ }
+ }
+
+ MachineType testAndClearLocalHit(Entry cache_entry) {
+ if (is_valid(cache_entry) && cache_entry.FromL2) {
+ cache_entry.FromL2 := false;
+ return MachineType:L2Cache;
+ }
+ return MachineType:L1Cache;
+ }
+
+ bool IsAtomicAccessed(Entry cache_entry) {
+ assert(is_valid(cache_entry));
+ return cache_entry.AtomicAccessed;
+ }
+
+ // ** OUT_PORTS **
+ out_port(requestNetwork_out, RequestMsg, requestFromCache);
+ out_port(responseNetwork_out, ResponseMsg, responseFromCache);
+ out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if (in_msg.Type == TriggerType:L2_to_L1) {
+ trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_acks, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
+ trigger(Event:All_acks_no_sharers, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the unblock network
+
+ // Response Network
+ in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
+ if (responseToCache_in.isReady(clockEdge())) {
+ peek(responseToCache_in, ResponseMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
+ trigger(Event:Shared_Ack, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Shared_Data, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Exclusive_Data, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Forward Network
+ in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
+ if (forwardToCache_in.isReady(clockEdge())) {
+ peek(forwardToCache_in, RequestMsg, block_on="addr") {
+
+ Entry cache_entry := getCacheEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+
+ if ((in_msg.Type == CoherenceRequestType:GETX) ||
+ (in_msg.Type == CoherenceRequestType:GETF)) {
+ trigger(Event:Other_GETX, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
+ trigger(Event:Merged_GETS, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:GETS) {
+ if (machineCount(MachineType:L1Cache) > 1) {
+ if (is_valid(cache_entry)) {
+ if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
+ trigger(Event:Other_GETS_No_Mig, in_msg.addr, cache_entry, tbe);
+ } else {
+ trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
+ }
+ } else {
+ trigger(Event:NC_DMA_GETS, in_msg.addr, cache_entry, tbe);
+ }
+ } else if (in_msg.Type == CoherenceRequestType:INV) {
+ trigger(Event:Invalidate, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
+ trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
+ trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
+ trigger(Event:Block_Ack, in_msg.addr, cache_entry, tbe);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // Nothing from the request network
+
+ // Mandatory Queue
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
+ peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
+
+ // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
+ TBE tbe := TBEs[in_msg.LineAddress];
+
+ if (in_msg.Type == RubyRequestType:IFETCH) {
+ // ** INSTRUCTION ACCESS ***
+
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
+ } else {
+ // Check to see if it is in the OTHER L1
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cache.cacheAvail(in_msg.LineAddress)) {
+ trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
+ } else {
+ Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L2_Replacement,
+ l2_victim_addr,
+ getL2CacheEntry(l2_victim_addr),
+ TBEs[l2_victim_addr]);
+ }
+ }
+
+ if (L1Icache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it in the L1
+
+ Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
+ if (is_valid(L2cache_entry)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
+ L2cache_entry, tbe);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Icache_entry, tbe);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ // Check if the line we want to evict is not locked
+ Addr l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, l1i_victim_addr);
+ if (L2cache.cacheAvail(l1i_victim_addr)) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2,
+ l1i_victim_addr,
+ getL1ICacheEntry(l1i_victim_addr),
+ TBEs[l1i_victim_addr]);
+ } else {
+ Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement,
+ l2_victim_addr,
+ getL2CacheEntry(l2_victim_addr),
+ TBEs[l2_victim_addr]);
+ }
+ }
+ }
+ } else {
+ // *** DATA ACCESS ***
+
+ Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Dcache_entry)) {
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
+ } else {
+
+ // Check to see if it is in the OTHER L1
+ Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
+ if (is_valid(L1Icache_entry)) {
+ // The block is in the wrong L1, try to write it to the L2
+ if (L2cache.cacheAvail(in_msg.LineAddress)) {
+ trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
+ } else {
+ Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
+ trigger(Event:L2_Replacement,
+ l2_victim_addr,
+ getL2CacheEntry(l2_victim_addr),
+ TBEs[l2_victim_addr]);
+ }
+ }
+
+ if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it in the L1
+ Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
+ if (is_valid(L2cache_entry)) {
+ // L2 has it (maybe not with the right permissions)
+ trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
+ L2cache_entry, tbe);
+ } else {
+ // We have room, the L2 doesn't have it, so the L1 fetches the line
+ trigger(mandatory_request_type_to_event(in_msg.Type),
+ in_msg.LineAddress, L1Dcache_entry, tbe);
+ }
+ } else {
+ // No room in the L1, so we need to make room
+ // Check if the line we want to evict is not locked
+ Addr l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
+ check_on_cache_probe(mandatoryQueue_in, l1d_victim_addr);
+ if (L2cache.cacheAvail(l1d_victim_addr)) {
+ // The L2 has room, so we move the line from the L1 to the L2
+ trigger(Event:L1_to_L2,
+ l1d_victim_addr,
+ getL1DCacheEntry(l1d_victim_addr),
+ TBEs[l1d_victim_addr]);
+ } else {
+ Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
+ // The L2 does not have room, so we replace a line from the L2
+ trigger(Event:L2_Replacement,
+ l2_victim_addr,
+ getL2CacheEntry(l2_victim_addr),
+ TBEs[l2_victim_addr]);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // ACTIONS
+
+ action(a_issueGETS, "a", desc="Issue GETS") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+
+ // One from each other cache (n-1) plus the memory (+1)
+ tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
+ }
+ }
+
+ action(b_issueGETX, "b", desc="Issue GETX") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+
+ // One from each other cache (n-1) plus the memory (+1)
+ tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
+ }
+ }
+
+ action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
+ if (machineCount(MachineType:L1Cache) > 1) {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+ }
+ }
+
+ // One from each other cache (n-1) plus the memory (+1)
+ tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
+ }
+
+ action(bf_issueGETF, "bf", desc="Issue GETF") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETF;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := curCycle();
+
+ // One from each other cache (n-1) plus the memory (+1)
+ tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
+ }
+ }
+
+ action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ if (in_msg.DirectedProbe) {
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ } else {
+ out_msg.Acks := 2;
+ }
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (in_msg.DirectedProbe) {
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ } else {
+ out_msg.Acks := 2;
+ }
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(d_issuePUT, "d", desc="Issue PUT") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUT;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(df_issuePUTF, "df", desc="Issue PUTF") {
+ enqueue(requestNetwork_out, RequestMsg, issue_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:PUTF;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(e_sendData, "e", desc="Send data from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ if (in_msg.DirectedProbe) {
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ } else {
+ out_msg.Acks := 2;
+ }
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
+ if (in_msg.DirectedProbe) {
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ } else {
+ out_msg.Acks := 2;
+ }
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
+ if (in_msg.DirectedProbe) {
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ } else {
+ out_msg.Acks := 2;
+ }
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination := in_msg.MergedRequestors;
+ out_msg.DataBlk := cache_entry.DataBlk;
+ out_msg.Dirty := cache_entry.Dirty;
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination := in_msg.MergedRequestors;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(f_sendAck, "f", desc="Send ack from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 1;
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ assert(in_msg.DirectedProbe == false);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Acks := 1;
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ assert(in_msg.DirectedProbe == false);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(g_sendUnblock, "g", desc="Send unblock to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
+ enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCKM;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
+ enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCKS;
+ out_msg.Sender := machineID;
+ out_msg.CurOwner := tbe.CurOwner;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Unblock_Control;
+ }
+ }
+
+ action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Dcache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, false,
+ testAndClearLocalHit(cache_entry));
+ }
+
+ action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(cache_entry);
+ sequencer.readCallback(address, cache_entry.DataBlk, false,
+ testAndClearLocalHit(cache_entry));
+ }
+
+ action(hx_external_load_hit, "hx", desc="load required external msgs") {
+ assert(is_valid(cache_entry));
+ assert(is_valid(tbe));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ peek(responseToCache_in, ResponseMsg) {
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.readCallback(address, cache_entry.DataBlk, true,
+ machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
+ tbe.ForwardRequestTime, tbe.FirstResponseTime);
+ }
+ }
+
+ action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ peek(mandatoryQueue_in, RubyRequest) {
+ L1Dcache.setMRU(cache_entry);
+ sequencer.writeCallback(address, cache_entry.DataBlk, false,
+ testAndClearLocalHit(cache_entry));
+
+ cache_entry.Dirty := true;
+ if (in_msg.Type == RubyRequestType:ATOMIC) {
+ cache_entry.AtomicAccessed := true;
+ }
+ }
+ }
+
+ action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
+ assert(is_valid(tbe));
+ DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
+ sequencer.writeCallback(address, tbe.DataBlk, false, MachineType:L1Cache);
+ }
+
+ action(sx_external_store_hit, "sx", desc="store required external msgs.") {
+ assert(is_valid(cache_entry));
+ assert(is_valid(tbe));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ peek(responseToCache_in, ResponseMsg) {
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.writeCallback(address, cache_entry.DataBlk, true,
+ machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
+ tbe.ForwardRequestTime, tbe.FirstResponseTime);
+ }
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ cache_entry.Dirty := true;
+ }
+
+ action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
+ assert(is_valid(cache_entry));
+ assert(is_valid(tbe));
+ DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
+ L1Icache.setMRU(address);
+ L1Dcache.setMRU(address);
+ sequencer.writeCallback(address, cache_entry.DataBlk, true,
+ machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
+ tbe.ForwardRequestTime, tbe.FirstResponseTime);
+
+ cache_entry.Dirty := true;
+ }
+
+ action(i_allocateTBE, "i", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ assert(is_valid(cache_entry));
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
+ tbe.Dirty := cache_entry.Dirty;
+ tbe.Sharers := false;
+ }
+
+ action(it_allocateTBE, "it", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.Dirty := false;
+ tbe.Sharers := false;
+ }
+
+ action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
+ mandatoryQueue_in.dequeue(clockEdge());
+ }
+
+ action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
+ forwardToCache_in.dequeue(clockEdge());
+ }
+
+ action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
+ assert(is_valid(cache_entry));
+ assert(is_valid(tbe));
+ cache_entry.Dirty := tbe.Dirty;
+ cache_entry.DataBlk := tbe.DataBlk;
+ }
+
+ action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
+ assert(is_valid(cache_entry));
+ assert(is_valid(tbe));
+ cache_entry.Dirty := tbe.Dirty;
+ cache_entry.DataBlk := tbe.DataBlk;
+ cache_entry.FromL2 := true;
+ }
+
+ action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(in_msg.Acks >= 0);
+ assert(is_valid(tbe));
+ DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
+ DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
+ if (tbe.AppliedSilentAcks == false) {
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
+ tbe.AppliedSilentAcks := true;
+ }
+ DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
+ DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
+ APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ tbe.LastResponder := in_msg.Sender;
+ if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
+ assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
+ }
+ if (in_msg.InitialRequestTime != zero_time()) {
+ tbe.InitialRequestTime := in_msg.InitialRequestTime;
+ }
+ if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
+ assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
+ }
+ if (in_msg.ForwardRequestTime != zero_time()) {
+ tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ if (tbe.FirstResponseTime == zero_time()) {
+ tbe.FirstResponseTime := curCycle();
+ }
+ }
+ }
+ action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.CurOwner := in_msg.Sender;
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseToCache_in.dequeue(clockEdge());
+ }
+
+ action(ll_L2toL1Transfer, "ll", desc="") {
+ enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:L2_to_L1;
+ }
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ assert(is_valid(tbe));
+ if (tbe.NumPendingMsgs == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ if (tbe.Sharers) {
+ out_msg.Type := TriggerType:ALL_ACKS;
+ } else {
+ out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
+ }
+ }
+ }
+ }
+
+ action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
+ assert(is_valid(tbe));
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
+ }
+
+ action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
+ assert(is_valid(tbe));
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
+ }
+
+ action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
+ peek(forwardToCache_in, RequestMsg) {
+ assert(in_msg.Requestor != machineID);
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (in_msg.DirectedProbe) {
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ } else {
+ out_msg.Acks := 2;
+ }
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
+ peek(forwardToCache_in, RequestMsg) {
+ assert(in_msg.Requestor != machineID);
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (in_msg.DirectedProbe) {
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ } else {
+ out_msg.Acks := 2;
+ }
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
+ peek(forwardToCache_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:DATA_SHARED;
+ out_msg.Sender := machineID;
+ out_msg.Destination := in_msg.MergedRequestors;
+ DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ out_msg.Acks := machineCount(MachineType:L1Cache);
+ out_msg.SilentAcks := in_msg.SilentAcks;
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
+ }
+ }
+ }
+
+ action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Dirty) {
+ out_msg.Type := CoherenceResponseType:WB_DIRTY;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceResponseType:WB_CLEAN;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(r_setSharerBit, "r", desc="We saw other sharers") {
+ assert(is_valid(tbe));
+ tbe.Sharers := true;
+ }
+
+ action(s_deallocateTBE, "s", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
+ enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Dirty := tbe.Dirty;
+ if (tbe.Dirty) {
+ out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Data;
+ } else {
+ out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
+ // NOTE: in a real system this would not send data. We send
+ // data here only so we can check it at the memory
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(u_writeDataToCache, "u", desc="Write data to cache") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty;
+ }
+ }
+
+ action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(is_valid(cache_entry));
+ DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
+ cache_entry.DataBlk, in_msg.DataBlk);
+ assert(cache_entry.DataBlk == in_msg.DataBlk);
+ cache_entry.DataBlk := in_msg.DataBlk;
+ cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
+ }
+ }
+
+ action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
+ peek(responseToCache_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
+ tbe.DataBlk, in_msg.DataBlk);
+ assert(tbe.DataBlk == in_msg.DataBlk);
+ tbe.DataBlk := in_msg.DataBlk;
+ tbe.Dirty := in_msg.Dirty || tbe.Dirty;
+ }
+ }
+
+ action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
+ if (L1Dcache.isTagPresent(address)) {
+ L1Dcache.deallocate(address);
+ } else {
+ L1Icache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1Dcache.allocate(address, new Entry));
+ }
+ }
+
+ action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
+ if (is_invalid(cache_entry)) {
+ set_cache_entry(L1Icache.allocate(address, new Entry));
+ }
+ }
+
+ action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
+ set_cache_entry(L2cache.allocate(address, new Entry));
+ }
+
+ action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
+ L2cache.deallocate(address);
+ unset_cache_entry();
+ }
+
+ action(gr_deallocateCacheBlock, "\gr", desc="Deallocate an L1 or L2 cache block.") {
+ if (L1Dcache.isTagPresent(address)) {
+ L1Dcache.deallocate(address);
+ }
+ else if (L1Icache.isTagPresent(address)){
+ L1Icache.deallocate(address);
+ }
+ else {
+ assert(L2cache.isTagPresent(address));
+ L2cache.deallocate(address);
+ }
+ unset_cache_entry();
+ }
+
+ action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
+ if (send_evictions) {
+ DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
+ sequencer.evictionCallback(address);
+ }
+ }
+
+ action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1Dcache.demand_misses;
+ }
+
+ action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
+ ++L1Dcache.demand_hits;
+ }
+
+ action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
+ ++L1Icache.demand_hits;
+ }
+
+ action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
+ ++L2cache.demand_misses;
+ }
+
+ action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
+ ++L2cache.demand_hits;
+ }
+
+ action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
+ stall_and_wait(mandatoryQueue_in, address);
+ }
+
+ action(z_stall, "z", desc="stall") {
+ // do nothing and the special z_stall action will return a protocol stall
+ // so that the next port is checked
+ }
+
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
+
+ action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
+ }
+
+ //*****************************************************
+ // TRANSITIONS
+ //*****************************************************
+
+ // Transitions for Load/Store/L2_Replacement from transient states
+ transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({IM, IS, OI, MI, II, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({MI_F, MM_F}, {Store}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({MM_F, MI_F}, {Flush_line}) {
+ zz_stallAndWaitMandatoryQueue;
+ }
+
+ transition({ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
+ z_stall;
+ }
+
+ transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
+ z_stall;
+ }
+
+ // Transitions moving data between the L1 and L2 caches
+ transition({S, O, M, MM}, L1_to_L2) {
+ i_allocateTBE;
+ gg_deallocateL1CacheBlock;
+ vv_allocateL2CacheBlock;
+ hp_copyFromTBEToL2;
+ s_deallocateTBE;
+ }
+
+ transition(S, Trigger_L2_to_L1D, ST) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ ii_allocateL1DCacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(O, Trigger_L2_to_L1D, OT) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ ii_allocateL1DCacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(M, Trigger_L2_to_L1D, MT) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ ii_allocateL1DCacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(MM, Trigger_L2_to_L1D, MMT) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ ii_allocateL1DCacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(S, Trigger_L2_to_L1I, ST) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ jj_allocateL1ICacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(O, Trigger_L2_to_L1I, OT) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ jj_allocateL1ICacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(M, Trigger_L2_to_L1I, MT) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ jj_allocateL1ICacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(MM, Trigger_L2_to_L1I, MMT) {
+ i_allocateTBE;
+ rr_deallocateL2CacheBlock;
+ jj_allocateL1ICacheBlock;
+ nb_copyFromTBEToL1;
+ s_deallocateTBE;
+ zz_stallAndWaitMandatoryQueue;
+ ll_L2toL1Transfer;
+ }
+
+ transition(ST, Complete_L2_to_L1, SR) {
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(OT, Complete_L2_to_L1, OR) {
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MT, Complete_L2_to_L1, MR) {
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MMT, Complete_L2_to_L1, MMR) {
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from Idle
+ transition({I,IR}, Load, IS) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
+ k_popMandatoryQueue;
+ }
+
+ transition({I,IR}, Ifetch, IS) {
+ jj_allocateL1ICacheBlock;
+ i_allocateTBE;
+ a_issueGETS;
+ uu_profileL1InstMiss;
+ uu_profileL2Miss;
+ k_popMandatoryQueue;
+ }
+
+ transition({I,IR}, Store, IM) {
+ ii_allocateL1DCacheBlock;
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
+ k_popMandatoryQueue;
+ }
+
+ transition({I, IR}, Flush_line, IM_F) {
+ it_allocateTBE;
+ bf_issueGETF;
+ k_popMandatoryQueue;
+ }
+
+ transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Shared
+ transition({S, SM, ISM}, Load) {
+ h_load_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, ISM}, Ifetch) {
+ h_ifetch_hit;
+ uu_profileL1InstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(SR, Load, S) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(SR, Ifetch, S) {
+ h_ifetch_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition({S,SR}, Store, SM) {
+ i_allocateTBE;
+ b_issueGETX;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SR}, Flush_line, SM_F) {
+ i_allocateTBE;
+ bf_issueGETF;
+ forward_eviction_to_cpu;
+ gg_deallocateL1CacheBlock;
+ k_popMandatoryQueue;
+ }
+
+ transition(S, L2_Replacement, I) {
+ forward_eviction_to_cpu;
+ rr_deallocateL2CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(S, {Other_GETX, Invalidate}, I) {
+ f_sendAck;
+ forward_eviction_to_cpu;
+ gr_deallocateCacheBlock;
+ l_popForwardQueue;
+ }
+
+ transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
+ ff_sendAckShared;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Owned
+ transition({O, OM, SS, MM_W, M_W}, {Load}) {
+ h_load_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
+ h_ifetch_hit;
+ uu_profileL1InstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(OR, Load, O) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(OR, Ifetch, O) {
+ h_ifetch_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition({O,OR}, Store, OM) {
+ i_allocateTBE;
+ b_issueGETX;
+ p_decrementNumberOfMessagesByOne;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
+ k_popMandatoryQueue;
+ }
+
+ transition({O, OR}, Flush_line, OM_F) {
+ i_allocateTBE;
+ bf_issueGETF;
+ p_decrementNumberOfMessagesByOne;
+ forward_eviction_to_cpu;
+ gg_deallocateL1CacheBlock;
+ k_popMandatoryQueue;
+ }
+
+ transition(O, L2_Replacement, OI) {
+ i_allocateTBE;
+ d_issuePUT;
+ forward_eviction_to_cpu;
+ rr_deallocateL2CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(O, {Other_GETX, Invalidate}, I) {
+ e_sendData;
+ forward_eviction_to_cpu;
+ gr_deallocateCacheBlock;
+ l_popForwardQueue;
+ }
+
+ transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ transition(O, Merged_GETS) {
+ em_sendDataSharedMultiple;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Modified
+ transition({MM, M}, {Ifetch}) {
+ h_ifetch_hit;
+ uu_profileL1InstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, M}, {Load}) {
+ h_load_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Store) {
+ hh_store_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MMR, Load, MM) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MMR, Ifetch, MM) {
+ h_ifetch_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MMR, Store, MM) {
+ hh_store_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition({MM, M, MMR, MR}, Flush_line, MM_F) {
+ i_allocateTBE;
+ bf_issueGETF;
+ p_decrementNumberOfMessagesByOne;
+ forward_eviction_to_cpu;
+ gg_deallocateL1CacheBlock;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM_F, Block_Ack, MI_F) {
+ df_issuePUTF;
+ l_popForwardQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MM, L2_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUT;
+ forward_eviction_to_cpu;
+ rr_deallocateL2CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MM, {Other_GETX, Invalidate}, I) {
+ c_sendExclusiveData;
+ forward_eviction_to_cpu;
+ gr_deallocateCacheBlock;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Other_GETS, I) {
+ c_sendExclusiveData;
+ forward_eviction_to_cpu;
+ gr_deallocateCacheBlock;
+ l_popForwardQueue;
+ }
+
+ transition(MM, NC_DMA_GETS, O) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Other_GETS_No_Mig, O) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ transition(MM, Merged_GETS, O) {
+ em_sendDataSharedMultiple;
+ l_popForwardQueue;
+ }
+
+ // Transitions from Dirty Exclusive
+ transition(M, Store, MM) {
+ hh_store_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MR, Load, M) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MR, Ifetch, M) {
+ h_ifetch_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MR, Store, MM) {
+ hh_store_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(M, L2_Replacement, MI) {
+ i_allocateTBE;
+ d_issuePUT;
+ forward_eviction_to_cpu;
+ rr_deallocateL2CacheBlock;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(M, {Other_GETX, Invalidate}, I) {
+ c_sendExclusiveData;
+ forward_eviction_to_cpu;
+ gr_deallocateCacheBlock;
+ l_popForwardQueue;
+ }
+
+ transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ transition(M, NC_DMA_GETS, O) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ transition(M, Merged_GETS, O) {
+ em_sendDataSharedMultiple;
+ l_popForwardQueue;
+ }
+
+ // Transitions from IM
+
+ transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition({IM, IM_F, MM_F}, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Data, ISM) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM_F, Data, ISM_F) {
+ uf_writeDataToCacheTBE;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IM, Exclusive_Data, MM_W) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ sx_external_store_hit;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IM_F, Exclusive_Data, MM_WF) {
+ uf_writeDataToCacheTBE;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ // Transitions from SM
+ transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
+ ff_sendAckShared;
+ l_popForwardQueue;
+ }
+
+ transition(SM, {Other_GETX, Invalidate}, IM) {
+ f_sendAck;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
+ f_sendAck;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition({SM, SM_F}, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM, {Data, Exclusive_Data}, ISM) {
+ v_writeDataToCacheVerify;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
+ vt_writeDataToTBEVerify;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ // Transitions from ISM
+ transition({ISM, ISM_F}, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(ISM, All_acks_no_sharers, MM) {
+ sxt_trig_ext_store_hit;
+ gm_sendUnblockM;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(ISM_F, All_acks_no_sharers, MI_F) {
+ df_issuePUTF;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from OM
+
+ transition(OM, {Other_GETX, Invalidate}, IM) {
+ e_sendData;
+ pp_incrementNumberOfMessagesByOne;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
+ q_sendDataFromTBEToCache;
+ pp_incrementNumberOfMessagesByOne;
+ forward_eviction_to_cpu;
+ l_popForwardQueue;
+ }
+
+ transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
+ ee_sendDataShared;
+ l_popForwardQueue;
+ }
+
+ transition(OM, Merged_GETS) {
+ em_sendDataSharedMultiple;
+ l_popForwardQueue;
+ }
+
+ transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
+ et_sendDataSharedFromTBE;
+ l_popForwardQueue;
+ }
+
+ transition(OM_F, Merged_GETS) {
+ emt_sendDataSharedMultipleFromTBE;
+ l_popForwardQueue;
+ }
+
+ transition({OM, OM_F}, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(OM, {All_acks, All_acks_no_sharers}, MM) {
+ sxt_trig_ext_store_hit;
+ gm_sendUnblockM;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
+ df_issuePUTF;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+ // Transitions from IS
+
+ transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(IS, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(IS, Data, SS) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ hx_external_load_hit;
+ uo_updateCurrentOwner;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Exclusive_Data, M_W) {
+ u_writeDataToCache;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ hx_external_load_hit;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(IS, Shared_Data, SS) {
+ u_writeDataToCache;
+ r_setSharerBit;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ hx_external_load_hit;
+ uo_updateCurrentOwner;
+ n_popResponseQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from SS
+
+ transition(SS, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SS, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(SS, All_acks, S) {
+ gs_sendUnblockS;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(SS, All_acks_no_sharers, S) {
+ // Note: The directory might still be the owner, so that is why we go to S
+ gs_sendUnblockS;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from MM_W
+
+ transition(MM_W, Store) {
+ hh_store_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM_W, MM_WF}, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(MM_W, All_acks_no_sharers, MM) {
+ gm_sendUnblockM;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MM_WF, All_acks_no_sharers, MI_F) {
+ df_issuePUTF;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+ // Transitions from M_W
+
+ transition(M_W, Store, MM_W) {
+ hh_store_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(M_W, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(M_W, All_acks_no_sharers, M) {
+ gm_sendUnblockM;
+ s_deallocateTBE;
+ j_popTriggerQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from OI/MI
+
+ transition({OI, MI}, {Other_GETX, Invalidate}, II) {
+ q_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
+ sq_sendSharedDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition({OI, MI}, Merged_GETS, OI) {
+ qm_sendDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(MI, Writeback_Ack, I) {
+ t_sendExclusiveDataFromTBEToMemory;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MI_F, Writeback_Ack, I) {
+ hh_flush_hit;
+ t_sendExclusiveDataFromTBEToMemory;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(OI, Writeback_Ack, I) {
+ qq_sendDataFromTBEToMemory;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ kd_wakeUpDependents;
+ }
+
+ // Transitions from II
+ transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
+ f_sendAck;
+ l_popForwardQueue;
+ }
+
+ transition(II, Writeback_Ack, I) {
+ g_sendUnblock;
+ s_deallocateTBE;
+ l_popForwardQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(II, Writeback_Nack, I) {
+ s_deallocateTBE;
+ l_popForwardQueue;
+ kd_wakeUpDependents;
+ }
+
+ transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
+ ct_sendExclusiveDataFromTBE;
+ pp_incrementNumberOfMessagesByOne;
+ l_popForwardQueue;
+ }
+
+ transition(MM_F, Other_GETS, IM_F) {
+ ct_sendExclusiveDataFromTBE;
+ pp_incrementNumberOfMessagesByOne;
+ l_popForwardQueue;
+ }
+
+ transition(MM_F, NC_DMA_GETS, OM_F) {
+ sq_sendSharedDataFromTBEToCache;
+ l_popForwardQueue;
+ }
+
+ transition(MM_F, Other_GETS_No_Mig, OM_F) {
+ et_sendDataSharedFromTBE;
+ l_popForwardQueue;
+ }
+
+ transition(MM_F, Merged_GETS, OM_F) {
+ emt_sendDataSharedMultipleFromTBE;
+ l_popForwardQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * AMD's contributions to the MOESI hammer protocol do not constitute an
+ * endorsement of its similarity to any AMD products.
+ *
+ * Authors: Milo Martin
+ * Brad Beckmann
+ */
+
+machine(MachineType:Directory, "AMD Hammer-like protocol")
+ : DirectoryMemory * directory;
+ CacheMemory * probeFilter;
+ Cycles from_memory_controller_latency := 2;
+ Cycles to_memory_controller_latency := 1;
+ bool probe_filter_enabled := "False";
+ bool full_bit_dir_enabled := "False";
+
+ MessageBuffer * forwardFromDir, network="To", virtual_network="3",
+ vnet_type="forward";
+
+ MessageBuffer * responseFromDir, network="To", virtual_network="4",
+ vnet_type="response";
+
+ // For a finite buffered network, note that the DMA response network only
+ // works at this relatively lower numbered (lower priority) virtual network
+ // because the trigger queue decouples cache responses from DMA responses.
+ MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
+ vnet_type="response";
+
+ MessageBuffer * unblockToDir, network="From", virtual_network="5",
+ vnet_type="unblock";
+
+ MessageBuffer * responseToDir, network="From", virtual_network="4",
+ vnet_type="response";
+
+ MessageBuffer * requestToDir, network="From", virtual_network="2",
+ vnet_type="request";
+
+ MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
+ vnet_type="request";
+
+ MessageBuffer * triggerQueue;
+ MessageBuffer * responseFromMemory;
+{
+ // STATES
+ state_declaration(State, desc="Directory states", default="Directory_State_E") {
+ // Base states
+ NX, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in O at Owner";
+ NO, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
+ S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
+ O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
+ E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
+
+ O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
+ S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
+ NO_R, AccessPermission:Busy, desc="Was Not Owner or Sharer, replacing probe filter entry";
+
+ NO_B, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked";
+ NO_B_X, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
+ NO_B_S, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
+ NO_B_S_W, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
+ O_B, AccessPermission:Busy, "O^B", desc="Owner, Blocked";
+ NO_B_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
+ O_B_W, AccessPermission:Busy, desc="Owner, Blocked, waiting for Dram";
+ NO_W, AccessPermission:Busy, desc="Not Owner, waiting for Dram";
+ O_W, AccessPermission:Busy, desc="Owner, waiting for Dram";
+ NO_DW_B_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram and cache responses";
+ NO_DR_B_W, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for Dram and cache responses";
+ NO_DR_B_D, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
+ NO_DR_B, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses";
+ NO_DW_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram";
+ O_DR_B_W, AccessPermission:Busy, desc="Owner, Dma Read waiting for Dram and cache responses";
+ O_DR_B, AccessPermission:Busy, desc="Owner, Dma Read waiting for cache responses";
+ WB, AccessPermission:Busy, desc="Blocked on a writeback";
+ WB_O_W, AccessPermission:Busy, desc="Blocked on memory write, will go to O";
+ WB_E_W, AccessPermission:Busy, desc="Blocked on memory write, will go to E";
+
+ NO_F, AccessPermission:Busy, desc="Blocked on a flush";
+ NO_F_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
+ }
+
+ // Events
+ enumeration(Event, desc="Directory events") {
+ GETX, desc="A GETX arrives";
+ GETS, desc="A GETS arrives";
+ PUT, desc="A PUT arrives";
+ Unblock, desc="An unblock message arrives";
+ UnblockS, desc="An unblock message arrives";
+ UnblockM, desc="An unblock message arrives";
+ Writeback_Clean, desc="The final part of a PutX (no data)";
+ Writeback_Dirty, desc="The final part of a PutX (data)";
+ Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
+ Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
+
+ // Probe filter
+ Pf_Replacement, desc="probe filter replacement";
+
+ // DMA requests
+ DMA_READ, desc="A DMA Read memory request";
+ DMA_WRITE, desc="A DMA Write memory request";
+
+ // Memory Controller
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+
+ // Cache responses required to handle DMA
+ Ack, desc="Received an ack message";
+ Shared_Ack, desc="Received an ack message, responder has a shared copy";
+ Shared_Data, desc="Received a data message, responder has a shared copy";
+ Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
+ Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
+
+ // Triggers
+ All_acks_and_shared_data, desc="Received shared data and message acks";
+ All_acks_and_owner_data, desc="Received shared data and message acks";
+ All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
+ All_Unblocks, desc="Received all unblocks for a merged gets request";
+ GETF, desc="A GETF arrives";
+ PUTF, desc="A PUTF arrives";
+ }
+
+ // TYPES
+
+ // DirectoryEntry
+ structure(Entry, desc="...", interface="AbstractEntry") {
+ State DirectoryState, desc="Directory state";
+ }
+
+ // ProbeFilterEntry
+ structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
+ State PfState, desc="Directory state";
+ MachineID Owner, desc="Owner node";
+ Set Sharers, desc="sharing vector for full bit directory";
+ }
+
+ // TBE entries for DMA requests
+ structure(TBE, desc="TBE entries for outstanding DMA requests") {
+ Addr PhysicalAddress, desc="physical address";
+ State TBEState, desc="Transient State";
+ CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
+ int Acks, default="0", desc="The number of acks that the waiting response represents";
+ int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
+ DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
+ DataBlock DataBlk, desc="The current view of system memory";
+ int Len, desc="...";
+ MachineID DmaRequestor, desc="DMA requestor";
+ NetDest GetSRequestors, desc="GETS merged requestors";
+ int NumPendingMsgs, desc="Number of pending acks/messages";
+ bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
+ bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
+ bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
+ }
+
+ structure(TBETable, external="yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ Tick clockEdge();
+ void set_cache_entry(AbstractCacheEntry b);
+ void unset_cache_entry();
+ void set_tbe(TBE a);
+ void unset_tbe();
+ void wakeUpBuffers(Addr a);
+ Cycles curCycle();
+
+ // ** OBJECTS **
+
+ Set fwd_set;
+
+ TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+
+ Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+
+ if (is_valid(dir_entry)) {
+ return dir_entry;
+ }
+
+ dir_entry := static_cast(Entry, "pointer",
+ directory.allocate(addr, new Entry));
+ return dir_entry;
+ }
+
+ PfEntry getProbeFilterEntry(Addr addr), return_by_pointer="yes" {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
+ return pfEntry;
+ }
+ return OOD;
+ }
+
+ State getState(TBE tbe, PfEntry pf_entry, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ if (is_valid(pf_entry)) {
+ assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
+ }
+ }
+ return getDirectoryEntry(addr).DirectoryState;
+ }
+ }
+
+ void setState(TBE tbe, PfEntry pf_entry, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ if (is_valid(pf_entry)) {
+ pf_entry.PfState := state;
+ }
+ if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
+ assert(is_valid(pf_entry));
+ }
+ if (state == State:E) {
+ assert(is_valid(pf_entry) == false);
+ }
+ }
+ if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
+ state == State:O) {
+ assert(is_valid(tbe) == false);
+ }
+ getDirectoryEntry(addr).DirectoryState := state;
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ return Directory_State_to_permission(tbe.TBEState);
+ }
+
+ if(directory.isPresent(addr)) {
+ return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
+ }
+
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(PfEntry pf_entry, Addr addr, State state) {
+ getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ testAndRead(addr, tbe.DataBlk, pkt);
+ } else {
+ functionalMemoryRead(pkt);
+ }
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
+ TBE tbe := TBEs[addr];
+ if(is_valid(tbe)) {
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
+ }
+
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
+ }
+
+ Event cache_request_to_event(CoherenceRequestType type) {
+ if (type == CoherenceRequestType:GETS) {
+ return Event:GETS;
+ } else if (type == CoherenceRequestType:GETX) {
+ return Event:GETX;
+ } else if (type == CoherenceRequestType:GETF) {
+ return Event:GETF;
+ } else {
+ error("Invalid CoherenceRequestType");
+ }
+ }
+
+ // ** OUT_PORTS **
+ out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
+ out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
+ out_port(responseNetwork_out, ResponseMsg, responseFromDir);
+ out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue);
+
+ // ** IN_PORTS **
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
+ if (triggerQueue_in.isReady(clockEdge())) {
+ peek(triggerQueue_in, TriggerMsg) {
+ PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_acks_and_owner_data, in_msg.addr,
+ pf_entry, tbe);
+ } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
+ trigger(Event:All_acks_and_shared_data, in_msg.addr,
+ pf_entry, tbe);
+ } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
+ trigger(Event:All_acks_and_data_no_sharers, in_msg.addr,
+ pf_entry, tbe);
+ } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
+ trigger(Event:All_Unblocks, in_msg.addr,
+ pf_entry, tbe);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+ if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
+ trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
+ trigger(Event:UnblockS, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
+ trigger(Event:UnblockM, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
+ trigger(Event:Writeback_Clean, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
+ trigger(Event:Writeback_Dirty, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
+ trigger(Event:Writeback_Exclusive_Clean, in_msg.addr,
+ pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
+ trigger(Event:Writeback_Exclusive_Dirty, in_msg.addr,
+ pf_entry, tbe);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Response Network
+ in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
+ if (responseToDir_in.isReady(clockEdge())) {
+ peek(responseToDir_in, ResponseMsg) {
+ PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+ if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
+ trigger(Event:Shared_Ack, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
+ trigger(Event:Shared_Data, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA) {
+ trigger(Event:Data, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Exclusive_Data, in_msg.addr, pf_entry, tbe);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
+ if (memQueue_in.isReady(clockEdge())) {
+ peek(memQueue_in, MemoryMsg) {
+ PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.addr, pf_entry, tbe);
+ } else {
+ DPRINTF(RubySlicc, "%d\n", in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
+ if (requestQueue_in.isReady(clockEdge())) {
+ peek(requestQueue_in, RequestMsg) {
+ PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
+ if (in_msg.Type == CoherenceRequestType:PUT) {
+ trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
+ } else if (in_msg.Type == CoherenceRequestType:PUTF) {
+ trigger(Event:PUTF, in_msg.addr, pf_entry, tbe);
+ } else {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ if (is_valid(pf_entry)) {
+ trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
+ pf_entry, tbe);
+ } else {
+ if (probeFilter.cacheAvail(in_msg.addr)) {
+ trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
+ pf_entry, tbe);
+ } else {
+ trigger(Event:Pf_Replacement,
+ probeFilter.cacheProbe(in_msg.addr),
+ getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
+ TBEs[probeFilter.cacheProbe(in_msg.addr)]);
+ }
+ }
+ } else {
+ trigger(cache_request_to_event(in_msg.Type), in_msg.addr,
+ pf_entry, tbe);
+ }
+ }
+ }
+ }
+ }
+
+ in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
+ if (in_msg.Type == DMARequestType:READ) {
+ trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
+ } else if (in_msg.Type == DMARequestType:WRITE) {
+ trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // Actions
+
+ action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ assert(is_valid(cache_entry));
+ probeFilter.setMRU(address);
+ }
+ }
+
+ action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ assert(is_valid(cache_entry));
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(cache_entry.Owner != in_msg.Sender);
+ if (full_bit_dir_enabled) {
+ assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
+ }
+ }
+ }
+ }
+
+ action(uo_updateOwnerIfPf, "uo", desc="update owner") {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ assert(is_valid(cache_entry));
+ peek(unblockNetwork_in, ResponseMsg) {
+ cache_entry.Owner := in_msg.Sender;
+ if (full_bit_dir_enabled) {
+ cache_entry.Sharers.clear();
+ cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
+ APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
+ DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
+ }
+ }
+ }
+ }
+
+ action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
+ if (full_bit_dir_enabled) {
+ assert(probeFilter.isTagPresent(address));
+ peek(unblockNetwork_in, ResponseMsg) {
+ cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
+ }
+ }
+ }
+
+ action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WB_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
+ peek(requestQueue_in, RequestMsg) {
+ if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:BLOCK_ACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+ }
+
+ action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:WB_NACK;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ peek(requestQueue_in, RequestMsg) {
+ set_cache_entry(probeFilter.allocate(address, new PfEntry));
+ cache_entry.Owner := in_msg.Requestor;
+ cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
+ }
+ }
+ }
+
+ action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ probeFilter.deallocate(address);
+ unset_cache_entry();
+ }
+ }
+
+ action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
+ if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
+ probeFilter.deallocate(address);
+ unset_cache_entry();
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE") {
+ check_allocate(TBEs);
+ peek(requestQueue_in, RequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.PhysicalAddress := address;
+ tbe.ResponseType := CoherenceResponseType:NULL;
+ }
+ }
+
+ action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
+ check_allocate(TBEs);
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ tbe.DmaDataBlk := in_msg.DataBlk;
+ tbe.PhysicalAddress := in_msg.PhysicalAddress;
+ tbe.Len := in_msg.Len;
+ tbe.DmaRequestor := in_msg.Requestor;
+ tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
+ //
+ // One ack for each last-level cache
+ //
+ tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
+ //
+ // Assume initially that the caches store a clean copy and that memory
+ // will provide the data
+ //
+ tbe.CacheDirty := false;
+ }
+ }
+
+ action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
+ assert(is_valid(tbe));
+ if (full_bit_dir_enabled) {
+ assert(is_valid(cache_entry));
+ tbe.NumPendingMsgs := cache_entry.Sharers.count();
+ } else {
+ tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
+ }
+ }
+
+ action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
+ assert(is_valid(tbe));
+ tbe.NumPendingMsgs := 1;
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
+ assert(is_valid(tbe));
+ peek(requestQueue_in, RequestMsg) {
+ if (full_bit_dir_enabled) {
+ assert(is_valid(cache_entry));
+ //
+ // If we are using the full-bit directory and no sharers exists beyond
+ // the requestor, then we must set the ack number to all, not one
+ //
+ fwd_set := cache_entry.Sharers;
+ fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
+ if (fwd_set.count() > 0) {
+ tbe.Acks := 1;
+ tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
+ tbe.SilentAcks := tbe.SilentAcks - 1;
+ } else {
+ tbe.Acks := machineCount(MachineType:L1Cache);
+ tbe.SilentAcks := 0;
+ }
+ } else {
+ tbe.Acks := 1;
+ }
+ }
+ }
+
+ action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
+ assert(is_valid(tbe));
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ tbe.Acks := machineCount(MachineType:L1Cache);
+ tbe.SilentAcks := 0;
+ } else {
+ tbe.Acks := 1;
+ }
+ }
+
+ action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
+ peek(responseToDir_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ assert(in_msg.Acks > 0);
+ DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
+ //
+ // Note that cache data responses will have an ack count of 2. However,
+ // directory DMA requests must wait for acks from all LLC caches, so
+ // only decrement by 1.
+ //
+ if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
+ (in_msg.Type == CoherenceResponseType:DATA) ||
+ (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
+ } else {
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
+ }
+ DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
+ }
+ }
+
+ action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
+ DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
+ tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
+ DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
+ }
+ }
+
+ action(n_popResponseQueue, "n", desc="Pop response queue") {
+ responseToDir_in.dequeue(clockEdge());
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ assert(is_valid(tbe));
+ if (tbe.NumPendingMsgs == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ if (tbe.Sharers) {
+ if (tbe.Owned) {
+ out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
+ } else {
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ } else {
+ out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
+ }
+ }
+ }
+ }
+
+ action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
+ assert(is_valid(tbe));
+ if (tbe.NumPendingMsgs == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:ALL_UNBLOCKS;
+ }
+ }
+ }
+
+ action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
+ assert(is_valid(tbe));
+ tbe.NumPendingMsgs := tbe.GetSRequestors.count();
+ }
+
+ action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ assert(is_valid(tbe));
+ tbe.NumPendingMsgs := 0;
+ }
+ }
+
+ action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
+ assert(is_valid(tbe));
+ if (tbe.NumPendingMsgs == 0) {
+ assert(probe_filter_enabled || full_bit_dir_enabled);
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.addr := address;
+ out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
+ }
+ }
+ }
+
+ action(d_sendData, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := tbe.ResponseType;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := tbe.Acks;
+ out_msg.SilentAcks := tbe.SilentAcks;
+ DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
+ assert(out_msg.Acks > 0);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ assert(is_valid(tbe));
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ //
+ // we send the entire data block and rely on the dma controller to
+ // split it up if need be
+ //
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
+ peek(triggerQueue_in, TriggerMsg) {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ assert(is_valid(tbe));
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:DATA;
+ //
+ // we send the entire data block and rely on the dma controller to
+ // split it up if need be
+ //
+ out_msg.DataBlk := tbe.DataBlk;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Response_Data;
+ }
+ }
+ }
+
+ action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
+ assert(is_valid(tbe));
+ out_msg.PhysicalAddress := address;
+ out_msg.LineAddress := address;
+ out_msg.Type := DMAResponseType:ACK;
+ out_msg.Destination.add(tbe.DmaRequestor);
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
+ peek(requestQueue_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
+ }
+ }
+
+ action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
+ peek(requestQueue_in, RequestMsg) {
+ assert(is_valid(tbe));
+ if (full_bit_dir_enabled) {
+ fwd_set := cache_entry.Sharers;
+ fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
+ if (fwd_set.count() > 0) {
+ tbe.ResponseType := CoherenceResponseType:DATA;
+ } else {
+ tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
+ }
+ } else {
+ tbe.ResponseType := CoherenceResponseType:DATA;
+ }
+ }
+ }
+
+ action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
+ peek(requestQueue_in, RequestMsg) {
+ assert(is_valid(tbe));
+ tbe.GetSRequestors.add(in_msg.Requestor);
+ }
+ }
+
+ action(r_setSharerBit, "r", desc="We saw other sharers") {
+ assert(is_valid(tbe));
+ tbe.Sharers := true;
+ }
+
+ action(so_setOwnerBit, "so", desc="We saw other sharers") {
+ assert(is_valid(tbe));
+ tbe.Sharers := true;
+ tbe.Owned := true;
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
+ }
+ }
+
+ action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
+ }
+ }
+
+ action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
+ assert(is_valid(tbe));
+ if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
+ if (full_bit_dir_enabled) {
+ assert(is_valid(cache_entry));
+ peek(requestQueue_in, RequestMsg) {
+ fwd_set := cache_entry.Sharers;
+ fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
+ if (fwd_set.count() > 0) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
+ out_msg.MessageSize := MessageSizeType:Multicast_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ assert(tbe.SilentAcks > 0);
+ out_msg.SilentAcks := tbe.SilentAcks;
+ }
+ }
+ }
+ } else {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
+ out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ }
+ }
+ }
+ }
+ }
+
+ action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
+ if (machineCount(MachineType:L1Cache) > 1) {
+ if (full_bit_dir_enabled) {
+ assert(cache_entry.Sharers.count() > 0);
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
+ out_msg.MessageSize := MessageSizeType:Multicast_Control;
+ }
+ }
+ } else {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ }
+ }
+ }
+ }
+
+ action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
+ if (machineCount(MachineType:L1Cache) > 1) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:INV;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.add(cache_entry.Owner);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.DirectedProbe := true;
+ }
+ }
+ }
+
+ action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
+ if (machineCount(MachineType:L1Cache) > 1) {
+ peek(requestQueue_in, RequestMsg) {
+ if (full_bit_dir_enabled) {
+ fwd_set := cache_entry.Sharers;
+ fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
+ if (fwd_set.count() > 0) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
+ out_msg.MessageSize := MessageSizeType:Multicast_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
+ out_msg.SilentAcks := out_msg.SilentAcks - 1;
+ }
+ }
+ } else {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
+ out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ }
+ }
+ }
+ } else {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, 1) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceResponseType:ACK;
+ out_msg.Sender := machineID;
+ out_msg.Destination.add(in_msg.Requestor);
+ out_msg.Dirty := false; // By definition, the block is now clean
+ out_msg.Acks := 0;
+ out_msg.SilentAcks := 0;
+ DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
+ out_msg.MessageSize := MessageSizeType:Response_Control;
+ }
+ }
+ }
+ }
+
+ action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
+ assert(machineCount(MachineType:L1Cache) > 1);
+ //
+ // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
+ // decouple the two.
+ //
+ peek(unblockNetwork_in, ResponseMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ assert(is_valid(tbe));
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:MERGED_GETS;
+ out_msg.MergedRequestors := tbe.GetSRequestors;
+ if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
+ out_msg.Destination.add(in_msg.CurOwner);
+ } else {
+ out_msg.Destination.add(in_msg.Sender);
+ }
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.InitialRequestTime := zero_time();
+ out_msg.ForwardRequestTime := curCycle();
+ }
+ }
+ }
+
+ action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
+ assert(machineCount(MachineType:L1Cache) > 1);
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(cache_entry.Owner);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.DirectedProbe := true;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ }
+ }
+ } else {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
+ out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ }
+ }
+ }
+ }
+
+ action(nofc_forwardRequestConditionalOwner, "nofc", desc="Forward request to one or more nodes if the requestor is not the owner") {
+ if (machineCount(MachineType:L1Cache) > 1) {
+
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ peek(requestQueue_in, RequestMsg) {
+ if (in_msg.Requestor != cache_entry.Owner) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ assert(is_valid(cache_entry));
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.add(cache_entry.Owner);
+ out_msg.MessageSize := MessageSizeType:Request_Control;
+ out_msg.DirectedProbe := true;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ }
+ }
+ }
+ } else {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := in_msg.Requestor;
+ out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
+ out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ out_msg.InitialRequestTime := in_msg.InitialRequestTime;
+ out_msg.ForwardRequestTime := curCycle();
+ }
+ }
+ }
+ }
+ }
+
+ action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
+ assert(is_valid(tbe));
+ if (tbe.NumPendingMsgs > 0) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETX;
+ //
+ // Send to all L1 caches, since the requestor is the memory controller
+ // itself
+ //
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ }
+ }
+ }
+ }
+
+ action(f_forwardReadFromDma, "fr", desc="Forward requests") {
+ assert(is_valid(tbe));
+ if (tbe.NumPendingMsgs > 0) {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
+ out_msg.addr := address;
+ out_msg.Type := CoherenceRequestType:GETS;
+ //
+ // Send to all L1 caches, since the requestor is the memory controller
+ // itself
+ //
+ out_msg.Requestor := machineID;
+ out_msg.Destination.broadcast(MachineType:L1Cache);
+ out_msg.MessageSize := MessageSizeType:Broadcast_Control;
+ }
+ }
+ }
+ }
+
+ action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
+ requestQueue_in.dequeue(clockEdge());
+ }
+
+ action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ }
+ unblockNetwork_in.dequeue(clockEdge());
+ }
+
+ action(k_wakeUpDependents, "k", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
+
+ action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue(clockEdge());
+ }
+
+ action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
+ triggerQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ stall_and_wait(dmaRequestQueue_in, address);
+ }
+
+ action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
+ peek(memQueue_in, MemoryMsg) {
+ assert(is_valid(tbe));
+ if (tbe.CacheDirty == false) {
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+ }
+
+ action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
+ peek(responseToDir_in, ResponseMsg) {
+ assert(is_valid(tbe));
+ tbe.CacheDirty := true;
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
+ assert(is_valid(tbe));
+ assert(tbe.CacheDirty);
+ }
+
+ action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
+ if (probe_filter_enabled || full_bit_dir_enabled) {
+ peek(requestQueue_in, RequestMsg) {
+ assert(is_valid(cache_entry));
+ assert(cache_entry.Owner != in_msg.Requestor);
+ }
+ }
+ }
+
+ action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
+ if (full_bit_dir_enabled) {
+ peek(requestQueue_in, RequestMsg) {
+ assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
+ }
+ }
+ }
+
+ action(rs_removeSharer, "s", desc="remove current sharer") {
+ if (full_bit_dir_enabled) {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
+ cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
+ }
+ }
+ }
+
+ action(cs_clearSharers, "cs", desc="clear current sharers") {
+ if (full_bit_dir_enabled) {
+ peek(requestQueue_in, RequestMsg) {
+ cache_entry.Sharers.clear();
+ cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
+ }
+ }
+ }
+
+ action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ }
+ }
+
+ action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
+ assert(is_valid(tbe));
+ queueMemoryWritePartial(tbe.DmaRequestor, tbe.PhysicalAddress,
+ to_memory_controller_latency, tbe.DmaDataBlk,
+ tbe.Len);
+ }
+
+ action(ly_queueMemoryWriteFromTBE, "ly", desc="Write data to memory from TBE") {
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ tbe.DataBlk);
+ }
+
+ action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ assert(in_msg.Dirty == false);
+ assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+ DPRINTF(RubySlicc, "%s\n", in_msg.DataBlk);
+ }
+ }
+
+ action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
+ peek(requestQueue_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
+ stall_and_wait(requestQueue_in, address);
+ }
+
+ // TRANSITIONS
+
+ // Transitions out of E state
+ transition(E, GETX, NO_B_W) {
+ pfa_probeFilterAllocate;
+ v_allocateTBE;
+ rx_recordExclusiveInTBE;
+ saa_setAcksToAllIfPF;
+ qf_queueMemoryFetchRequest;
+ fn_forwardRequestIfNecessary;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(E, GETF, NO_F_W) {
+ pfa_probeFilterAllocate;
+ v_allocateTBE;
+ rx_recordExclusiveInTBE;
+ saa_setAcksToAllIfPF;
+ qf_queueMemoryFetchRequest;
+ fn_forwardRequestIfNecessary;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(E, GETS, NO_B_W) {
+ pfa_probeFilterAllocate;
+ v_allocateTBE;
+ rx_recordExclusiveInTBE;
+ saa_setAcksToAllIfPF;
+ qf_queueMemoryFetchRequest;
+ fn_forwardRequestIfNecessary;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(E, DMA_READ, NO_DR_B_W) {
+ vd_allocateDmaRequestInTBE;
+ qd_queueMemoryRequestFromDmaRead;
+ spa_setPendingAcksToZeroIfPF;
+ f_forwardReadFromDma;
+ p_popDmaRequestQueue;
+ }
+
+ transition(E, DMA_WRITE, NO_DW_B_W) {
+ vd_allocateDmaRequestInTBE;
+ spa_setPendingAcksToZeroIfPF;
+ sc_signalCompletionIfPF;
+ f_forwardWriteFromDma;
+ p_popDmaRequestQueue;
+ }
+
+ // Transitions out of O state
+ transition(O, GETX, NO_B_W) {
+ r_setMRU;
+ v_allocateTBE;
+ r_recordDataInTBE;
+ sa_setAcksToOne;
+ qf_queueMemoryFetchRequest;
+ fb_forwardRequestBcast;
+ cs_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, GETF, NO_F_W) {
+ r_setMRU;
+ v_allocateTBE;
+ r_recordDataInTBE;
+ sa_setAcksToOne;
+ qf_queueMemoryFetchRequest;
+ fb_forwardRequestBcast;
+ cs_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ // This transition is dumb, if a shared copy exists on-chip, then that should
+ // provide data, not slow off-chip dram. The problem is that the current
+ // caches don't provide data in S state
+ transition(O, GETS, O_B_W) {
+ r_setMRU;
+ v_allocateTBE;
+ r_recordDataInTBE;
+ saa_setAcksToAllIfPF;
+ qf_queueMemoryFetchRequest;
+ fn_forwardRequestIfNecessary;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(O, DMA_READ, O_DR_B_W) {
+ vd_allocateDmaRequestInTBE;
+ spa_setPendingAcksToZeroIfPF;
+ qd_queueMemoryRequestFromDmaRead;
+ f_forwardReadFromDma;
+ p_popDmaRequestQueue;
+ }
+
+ transition(O, Pf_Replacement, O_R) {
+ v_allocateTBE;
+ pa_setPendingMsgsToAll;
+ ia_invalidateAllRequest;
+ pfd_probeFilterDeallocate;
+ }
+
+ transition(S, Pf_Replacement, S_R) {
+ v_allocateTBE;
+ pa_setPendingMsgsToAll;
+ ia_invalidateAllRequest;
+ pfd_probeFilterDeallocate;
+ }
+
+ transition(NO, Pf_Replacement, NO_R) {
+ v_allocateTBE;
+ po_setPendingMsgsToOne;
+ io_invalidateOwnerRequest;
+ pfd_probeFilterDeallocate;
+ }
+
+ transition(NX, Pf_Replacement, NO_R) {
+ v_allocateTBE;
+ pa_setPendingMsgsToAll;
+ ia_invalidateAllRequest;
+ pfd_probeFilterDeallocate;
+ }
+
+ transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
+ vd_allocateDmaRequestInTBE;
+ f_forwardWriteFromDma;
+ p_popDmaRequestQueue;
+ }
+
+ // Transitions out of NO state
+ transition(NX, GETX, NO_B) {
+ r_setMRU;
+ fb_forwardRequestBcast;
+ cs_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(NX, GETF, NO_F) {
+ r_setMRU;
+ fb_forwardRequestBcast;
+ cs_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ // Transitions out of NO state
+ transition(NO, GETX, NO_B) {
+ r_setMRU;
+ ano_assertNotOwner;
+ fc_forwardRequestConditionalOwner;
+ cs_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(NO, GETF, NO_F) {
+ r_setMRU;
+ //ano_assertNotOwner;
+ nofc_forwardRequestConditionalOwner; //forward request if the requester is not the owner
+ cs_clearSharers;
+ oc_sendBlockAck; // send ack if the owner
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, GETX, NO_B) {
+ r_setMRU;
+ fb_forwardRequestBcast;
+ cs_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, GETF, NO_F) {
+ r_setMRU;
+ fb_forwardRequestBcast;
+ cs_clearSharers;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, GETS, NO_B) {
+ r_setMRU;
+ ano_assertNotOwner;
+ fb_forwardRequestBcast;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(NO, GETS, NO_B) {
+ r_setMRU;
+ ano_assertNotOwner;
+ ans_assertNotSharer;
+ fc_forwardRequestConditionalOwner;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(NX, GETS, NO_B) {
+ r_setMRU;
+ ano_assertNotOwner;
+ fc_forwardRequestConditionalOwner;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({NO, NX, S}, PUT, WB) {
+ //
+ // note that the PUT requestor may not be the current owner if an invalidate
+ // raced with PUT
+ //
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
+ vd_allocateDmaRequestInTBE;
+ f_forwardReadFromDma;
+ p_popDmaRequestQueue;
+ }
+
+ // Nack PUT requests when races cause us to believe we own the data
+ transition({O, E}, PUT) {
+ b_sendWriteBackNack;
+ i_popIncomingRequestQueue;
+ }
+
+ // Blocked transient states
+ transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
+ NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
+ NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
+ {GETS, GETX, GETF, PUT, Pf_Replacement}) {
+ z_stallAndWaitRequest;
+ }
+
+ transition(NO_F, {GETS, GETX, GETF, PUT, Pf_Replacement}){
+ z_stallAndWaitRequest;
+ }
+
+ transition(NO_B, {GETX, GETF}, NO_B_X) {
+ z_stallAndWaitRequest;
+ }
+
+ transition(NO_B, {PUT, Pf_Replacement}) {
+ z_stallAndWaitRequest;
+ }
+
+ transition(NO_B_S, {GETX, GETF, PUT, Pf_Replacement}) {
+ z_stallAndWaitRequest;
+ }
+
+ transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
+ NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
+ NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
+ {DMA_READ, DMA_WRITE}) {
+ zd_stallAndWaitDMARequest;
+ }
+
+ // merge GETS into one response
+ transition(NO_B, GETS, NO_B_S) {
+ v_allocateTBE;
+ rs_recordGetSRequestor;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(NO_B_S, GETS) {
+ rs_recordGetSRequestor;
+ i_popIncomingRequestQueue;
+ }
+
+ // unblock responses
+ transition({NO_B, NO_B_X}, UnblockS, NX) {
+ us_updateSharerIfFBD;
+ k_wakeUpDependents;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition({NO_B, NO_B_X}, UnblockM, NO) {
+ uo_updateOwnerIfPf;
+ us_updateSharerIfFBD;
+ k_wakeUpDependents;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_B_S, UnblockS, NO_B_S_W) {
+ us_updateSharerIfFBD;
+ fr_forwardMergeReadRequestsToOwner;
+ sp_setPendingMsgsToMergedSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_B_S, UnblockM, NO_B_S_W) {
+ uo_updateOwnerIfPf;
+ fr_forwardMergeReadRequestsToOwner;
+ sp_setPendingMsgsToMergedSharers;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_B_S_W, UnblockS) {
+ us_updateSharerIfFBD;
+ mu_decrementNumberOfUnblocks;
+ os_checkForMergedGetSCompletion;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_B_S_W, All_Unblocks, NX) {
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(O_B, UnblockS, O) {
+ us_updateSharerIfFBD;
+ k_wakeUpDependents;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(O_B, UnblockM, NO) {
+ us_updateSharerIfFBD;
+ uo_updateOwnerIfPf;
+ k_wakeUpDependents;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_B_W, Memory_Data, NO_B) {
+ d_sendData;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ transition(NO_F_W, Memory_Data, NO_F) {
+ d_sendData;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
+ r_recordMemoryData;
+ o_checkForCompletion;
+ l_popMemQueue;
+ }
+
+ transition(O_DR_B_W, Memory_Data, O_DR_B) {
+ r_recordMemoryData;
+ dr_sendDmaData;
+ o_checkForCompletion;
+ l_popMemQueue;
+ }
+
+ transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition({O_R, S_R, NO_R}, Ack) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(S_R, Data) {
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(NO_R, {Data, Exclusive_Data}) {
+ r_recordCacheData;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition({O_R, S_R}, All_acks_and_data_no_sharers, E) {
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_R, All_acks_and_data_no_sharers, WB_E_W) {
+ ly_queueMemoryWriteFromTBE;
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition({NO_DR_B_W, O_DR_B_W}, Ack) {
+ m_decrementNumberOfMessages;
+ n_popResponseQueue;
+ }
+
+ transition(NO_DR_B_W, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ n_popResponseQueue;
+ }
+
+ transition(O_DR_B, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(O_DR_B_W, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ n_popResponseQueue;
+ }
+
+ transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
+ m_decrementNumberOfMessages;
+ r_setSharerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(NO_DR_B_W, Shared_Data) {
+ r_recordCacheData;
+ m_decrementNumberOfMessages;
+ so_setOwnerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
+ r_recordCacheData;
+ m_decrementNumberOfMessages;
+ so_setOwnerBit;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(NO_DR_B_W, {Exclusive_Data, Data}) {
+ r_recordCacheData;
+ m_decrementNumberOfMessages;
+ n_popResponseQueue;
+ }
+
+ transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
+ r_recordCacheData;
+ m_decrementNumberOfMessages;
+ o_checkForCompletion;
+ n_popResponseQueue;
+ }
+
+ transition(NO_DR_B, All_acks_and_owner_data, WB_O_W) {
+ //
+ // Note that the DMA consistency model allows us to send the DMA device
+ // a response as soon as we receive valid data and prior to receiving
+ // all acks. However, to simplify the protocol we wait for all acks.
+ //
+ dt_sendDmaDataFromTbe;
+ ly_queueMemoryWriteFromTBE;
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_DR_B, All_acks_and_shared_data, S) {
+ //
+ // Note that the DMA consistency model allows us to send the DMA device
+ // a response as soon as we receive valid data and prior to receiving
+ // all acks. However, to simplify the protocol we wait for all acks.
+ //
+ dt_sendDmaDataFromTbe;
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_DR_B_D, All_acks_and_owner_data, WB_O_W) {
+ //
+ // Note that the DMA consistency model allows us to send the DMA device
+ // a response as soon as we receive valid data and prior to receiving
+ // all acks. However, to simplify the protocol we wait for all acks.
+ //
+ dt_sendDmaDataFromTbe;
+ ly_queueMemoryWriteFromTBE;
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_DR_B_D, All_acks_and_shared_data, S) {
+ //
+ // Note that the DMA consistency model allows us to send the DMA device
+ // a response as soon as we receive valid data and prior to receiving
+ // all acks. However, to simplify the protocol we wait for all acks.
+ //
+ dt_sendDmaDataFromTbe;
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(O_DR_B, All_acks_and_owner_data, WB_O_W) {
+ ly_queueMemoryWriteFromTBE;
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(O_DR_B, All_acks_and_data_no_sharers, WB_E_W) {
+ ly_queueMemoryWriteFromTBE;
+ w_deallocateTBE;
+ pfd_probeFilterDeallocate;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_DR_B, All_acks_and_data_no_sharers, WB_E_W) {
+ //
+ // Note that the DMA consistency model allows us to send the DMA device
+ // a response as soon as we receive valid data and prior to receiving
+ // all acks. However, to simplify the protocol we wait for all acks.
+ //
+ dt_sendDmaDataFromTbe;
+ ly_queueMemoryWriteFromTBE;
+ w_deallocateTBE;
+ ppfd_possibleProbeFilterDeallocate;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_DR_B_D, All_acks_and_data_no_sharers, WB_E_W) {
+ a_assertCacheData;
+ //
+ // Note that the DMA consistency model allows us to send the DMA device
+ // a response as soon as we receive valid data and prior to receiving
+ // all acks. However, to simplify the protocol we wait for all acks.
+ //
+ dt_sendDmaDataFromTbe;
+ ly_queueMemoryWriteFromTBE;
+ w_deallocateTBE;
+ ppfd_possibleProbeFilterDeallocate;
+ k_wakeUpDependents;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
+ ld_queueMemoryDmaWrite;
+ g_popTriggerQueue;
+ }
+
+ transition(NO_DW_W, Memory_Ack, E) {
+ da_sendDmaAck;
+ w_deallocateTBE;
+ ppfd_possibleProbeFilterDeallocate;
+ k_wakeUpDependents;
+ l_popMemQueue;
+ }
+
+ transition(O_B_W, Memory_Data, O_B) {
+ d_sendData;
+ w_deallocateTBE;
+ l_popMemQueue;
+ }
+
+ transition(NO_B_W, UnblockM, NO_W) {
+ uo_updateOwnerIfPf;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_B_W, UnblockS, NO_W) {
+ us_updateSharerIfFBD;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(O_B_W, UnblockS, O_W) {
+ us_updateSharerIfFBD;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_W, Memory_Data, NO) {
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ l_popMemQueue;
+ }
+
+ transition(O_W, Memory_Data, O) {
+ w_deallocateTBE;
+ k_wakeUpDependents;
+ l_popMemQueue;
+ }
+
+ // WB State Transistions
+ transition(WB, Writeback_Dirty, WB_O_W) {
+ rs_removeSharer;
+ l_queueMemoryWBRequest;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
+ rs_removeSharer;
+ l_queueMemoryWBRequest;
+ pfd_probeFilterDeallocate;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB_E_W, Memory_Ack, E) {
+ k_wakeUpDependents;
+ l_popMemQueue;
+ }
+
+ transition(WB_O_W, Memory_Ack, O) {
+ k_wakeUpDependents;
+ l_popMemQueue;
+ }
+
+ transition(WB, Writeback_Clean, O) {
+ ll_checkIncomingWriteback;
+ rs_removeSharer;
+ k_wakeUpDependents;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB, Writeback_Exclusive_Clean, E) {
+ ll_checkIncomingWriteback;
+ rs_removeSharer;
+ pfd_probeFilterDeallocate;
+ k_wakeUpDependents;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(WB, Unblock, NX) {
+ auno_assertUnblockerNotOwner;
+ k_wakeUpDependents;
+ j_popIncomingUnblockQueue;
+ }
+
+ transition(NO_F, PUTF, WB) {
+ a_sendWriteBackAck;
+ i_popIncomingRequestQueue;
+ }
+
+ //possible race between GETF and UnblockM -- not sure needed any more?
+ transition(NO_F, UnblockM) {
+ us_updateSharerIfFBD;
+ uo_updateOwnerIfPf;
+ j_popIncomingUnblockQueue;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+machine(MachineType:DMA, "DMA Controller")
+ : DMASequencer * dma_sequencer;
+ Cycles request_latency := 6;
+
+ MessageBuffer * responseFromDir, network="From", virtual_network="1",
+ vnet_type="response";
+ MessageBuffer * requestToDir, network="To", virtual_network="0",
+ vnet_type="request";
+ MessageBuffer * mandatoryQueue;
+{
+ state_declaration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, AccessPermission:Invalid, desc="Ready to accept a new request";
+ BUSY_RD, AccessPermission:Busy, desc="Busy: currently processing a request";
+ BUSY_WR, AccessPermission:Busy, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ Ack, desc="DMA write to memory completed";
+ }
+
+ structure(TBE, desc="...") {
+ State TBEState, desc="Transient state";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ structure(TBETable, external = "yes") {
+ TBE lookup(Addr);
+ void allocate(Addr);
+ void deallocate(Addr);
+ bool isPresent(Addr);
+ }
+
+ void set_tbe(TBE b);
+ void unset_tbe();
+ void wakeUpAllBuffers();
+
+ TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
+
+ Tick clockEdge();
+ MachineID mapAddressToMachine(Addr addr, MachineType mtype);
+
+ State getState(TBE tbe, Addr addr) {
+ if (is_valid(tbe)) {
+ return tbe.TBEState;
+ } else {
+ return State:READY;
+ }
+ }
+
+ void setState(TBE tbe, Addr addr, State state) {
+ if (is_valid(tbe)) {
+ tbe.TBEState := state;
+ }
+ }
+
+ AccessPermission getAccessPermission(Addr addr) {
+ return AccessPermission:NotPresent;
+ }
+
+ void setAccessPermission(Addr addr, State state) {
+ }
+
+ void functionalRead(Addr addr, Packet *pkt) {
+ error("DMA does not support functional read.");
+ }
+
+ int functionalWrite(Addr addr, Packet *pkt) {
+ error("DMA does not support functional write.");
+ }
+
+ out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
+ peek( dmaResponseQueue_in, DMAResponseMsg) {
+ if (in_msg.Type == DMAResponseType:ACK) {
+ trigger(Event:Ack, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else if (in_msg.Type == DMAResponseType:DATA) {
+ trigger(Event:Data, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.LineAddress := in_msg.LineAddress;
+ out_msg.Type := DMARequestType:READ;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(requestToDir_out, DMARequestMsg, request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
+ out_msg.LineAddress := in_msg.LineAddress;
+ out_msg.Type := DMARequestType:WRITE;
+ out_msg.Requestor := machineID;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback(address);
+ }
+
+ action(d_dataCallback, "d", desc="Write data to dma sequencer") {
+ dma_sequencer.dataCallback(tbe.DataBlk, address);
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ assert(is_valid(tbe));
+ peek( dmaResponseQueue_in, DMAResponseMsg) {
+ tbe.DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ set_tbe(TBEs[address]);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ unset_tbe();
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue(clockEdge());
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue(clockEdge());
+ }
+
+ action(zz_stallAndWaitRequestQueue, "zz", desc="...") {
+ stall_and_wait(dmaRequestQueue_in, address);
+ }
+
+ action(wkad_wakeUpAllDependents, "wkad", desc="wake-up all dependents") {
+ wakeUpAllBuffers();
+ }
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ v_allocateTBE;
+ s_sendReadRequest;
+ p_popRequestQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ v_allocateTBE;
+ s_sendWriteRequest;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Data, READY) {
+ t_updateTBEData;
+ d_dataCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition(BUSY_WR, Ack, READY) {
+ a_ackCallback;
+ w_deallocateTBE;
+ p_popResponseQueue;
+ wkad_wakeUpAllDependents;
+ }
+
+ transition({BUSY_RD,BUSY_WR}, {ReadRequest,WriteRequest}) {
+ zz_stallAndWaitRequestQueue;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * AMD's contributions to the MOESI hammer protocol do not constitute an
+ * endorsement of its similarity to any AMD products.
+ */
+
+// CoherenceRequestType
+enumeration(CoherenceRequestType, desc="...") {
+ GETX, desc="Get eXclusive";
+ GETS, desc="Get Shared";
+ MERGED_GETS, desc="Get Shared";
+ PUT, desc="Put Ownership";
+ WB_ACK, desc="Writeback ack";
+ WB_NACK, desc="Writeback neg. ack";
+ PUTF, desc="PUT on a Flush";
+ GETF, desc="Issue exclusive for Flushing";
+ BLOCK_ACK, desc="Dir Block ack";
+ INV, desc="Invalidate";
+}
+
+// CoherenceResponseType
+enumeration(CoherenceResponseType, desc="...") {
+ ACK, desc="ACKnowledgment, responder does not have a copy";
+ ACK_SHARED, desc="ACKnowledgment, responder has a shared copy";
+ DATA, desc="Data, responder does not have a copy";
+ DATA_SHARED, desc="Data, responder has a shared copy";
+ DATA_EXCLUSIVE, desc="Data, responder was exclusive, gave us a copy, and they went to invalid";
+ WB_CLEAN, desc="Clean writeback";
+ WB_DIRTY, desc="Dirty writeback";
+ WB_EXCLUSIVE_CLEAN, desc="Clean writeback of exclusive data";
+ WB_EXCLUSIVE_DIRTY, desc="Dirty writeback of exclusive data";
+ UNBLOCK, desc="Unblock for writeback";
+ UNBLOCKS, desc="Unblock now in S";
+ UNBLOCKM, desc="Unblock now in M/O/E";
+ NULL, desc="Null value";
+}
+
+// TriggerType
+enumeration(TriggerType, desc="...") {
+ L2_to_L1, desc="L2 to L1 transfer";
+ ALL_ACKS, desc="See corresponding event";
+ ALL_ACKS_OWNER_EXISTS,desc="See corresponding event";
+ ALL_ACKS_NO_SHARERS, desc="See corresponding event";
+ ALL_UNBLOCKS, desc="all unblockS received";
+}
+
+// TriggerMsg
+structure(TriggerMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ TriggerType Type, desc="Type of trigger";
+
+ bool functionalRead(Packet *pkt) {
+ // Trigger messages do not hold any data!
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // Trigger messages do not hold any data!
+ return false;
+ }
+}
+
+// RequestMsg (and also forwarded requests)
+structure(RequestMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest MergedRequestors, desc="Merge set of read requestors";
+ NetDest Destination, desc="Multicast destination mask";
+ MessageSizeType MessageSize, desc="size category of the message";
+ bool DirectedProbe, default="false", desc="probe filter directed probe";
+
+ Cycles InitialRequestTime, default="Cycles(0)",
+ desc="time the initial requests was sent from the L1Cache";
+ Cycles ForwardRequestTime, default="Cycles(0)",
+ desc="time the dir forwarded the request";
+ int SilentAcks, default="0", desc="silent acks from the full-bit directory";
+
+ bool functionalRead(Packet *pkt) {
+ // Request messages do not hold any data
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // Request messages do not hold any data
+ return false;
+ }
+}
+
+// ResponseMsg (and also unblock requests)
+structure(ResponseMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ CoherenceResponseType Type, desc="Type of response (Ack, Data, etc)";
+ MachineID Sender, desc="Node who sent the data";
+ MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
+ NetDest Destination, desc="Node to whom the data is sent";
+ DataBlock DataBlk, desc="data for the cache line";
+ bool Dirty, desc="Is the data dirty (different than memory)?";
+ int Acks, default="0", desc="How many messages this counts as";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ Cycles InitialRequestTime, default="Cycles(0)",
+ desc="time the initial requests was sent from the L1Cache";
+ Cycles ForwardRequestTime, default="Cycles(0)",
+ desc="time the dir forwarded the request";
+ int SilentAcks, default="0", desc="silent acks from the full-bit directory";
+
+ bool functionalRead(Packet *pkt) {
+ // The check below ensures that data is read only from messages that
+ // actually hold data.
+ if (Type == CoherenceResponseType:DATA ||
+ Type == CoherenceResponseType:DATA_SHARED ||
+ Type == CoherenceResponseType:DATA_EXCLUSIVE ||
+ Type == CoherenceResponseType:WB_DIRTY ||
+ Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ return false;
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ // Message type does not matter since all messages are written.
+ // If a protocol reads data from a packet that is not supposed
+ // to hold the data, then the fault lies with the protocol.
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
+
+enumeration(DMARequestType, desc="...", default="DMARequestType_NULL") {
+ READ, desc="Memory Read";
+ WRITE, desc="Memory Write";
+ NULL, desc="Invalid";
+}
+
+enumeration(DMAResponseType, desc="...", default="DMAResponseType_NULL") {
+ DATA, desc="DATA read";
+ ACK, desc="ACK write";
+ NULL, desc="Invalid";
+}
+
+structure(DMARequestMsg, desc="...", interface="Message") {
+ DMARequestType Type, desc="Request type (read/write)";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ Addr LineAddress, desc="Line address for this request";
+ MachineID Requestor, desc="Node who initiated the request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ int Len, desc="The length of the request";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ return testAndRead(LineAddress, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(LineAddress, DataBlk, pkt);
+ }
+}
+
+structure(DMAResponseMsg, desc="...", interface="Message") {
+ DMAResponseType Type, desc="Response type (DATA/ACK)";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ Addr LineAddress, desc="Line address for this request";
+ NetDest Destination, desc="Destination";
+ DataBlock DataBlk, desc="DataBlk attached to this request";
+ MessageSizeType MessageSize, desc="size category of the message";
+
+ bool functionalRead(Packet *pkt) {
+ return testAndRead(LineAddress, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(LineAddress, DataBlk, pkt);
+ }
+}
--- /dev/null
+protocol "MOESI_hammer";
+include "RubySlicc_interfaces.slicc";
+include "MOESI_hammer-msg.sm";
+include "MOESI_hammer-cache.sm";
+include "MOESI_hammer-dir.sm";
+include "MOESI_hammer-dma.sm";
--- /dev/null
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Mapping functions
+
+int machineCount(MachineType machType);
+MachineID mapAddressToRange(Addr addr, MachineType type,
+ int low, int high);
+MachineID mapAddressToRange(Addr addr, MachineType type,
+ int low, int high, NodeID n);
+NetDest broadcast(MachineType type);
+NodeID machineIDToNodeID(MachineID machID);
+NodeID machineIDToVersion(MachineID machID);
+MachineType machineIDToMachineType(MachineID machID);
+MachineID createMachineID(MachineType t, NodeID i);
--- /dev/null
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Hack, no node object since base class has them
+NodeID version;
+MachineID machineID;
+NodeID clusterID;
+Cycles recycle_latency;
+
+// Functions implemented in the AbstractController class for
+// making timing access to the memory maintained by the
+// memory controllers.
+void queueMemoryRead(MachineID id, Addr addr, Cycles latency);
+void queueMemoryWrite(MachineID id, Addr addr, Cycles latency,
+ DataBlock block);
+void queueMemoryWritePartial(MachineID id, Addr addr, Cycles latency,
+ DataBlock block, int size);
+
+// Functions implemented in the AbstractController class for
+// making functional access to the memory maintained by the
+// memory controllers.
+void functionalMemoryRead(Packet *pkt);
+bool functionalMemoryWrite(Packet *pkt);
--- /dev/null
+/*
+ * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
+ * Copyright (c) 2011 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Declarations of external types that are common to all protocols
+external_type(int, primitive="yes", default="0");
+external_type(bool, primitive="yes", default="false");
+external_type(std::string, primitive="yes");
+external_type(uint32_t, primitive="yes");
+external_type(uint64_t, primitive="yes");
+external_type(PacketPtr, primitive="yes");
+external_type(Packet, primitive="yes");
+external_type(Addr, primitive="yes");
+external_type(Cycles, primitive="yes", default="Cycles(0)");
+external_type(Tick, primitive="yes", default="0");
+
+structure(WriteMask, external="yes", desc="...") {
+ void clear();
+ bool cmpMask(WriteMask);
+ bool isEmpty();
+ bool isFull();
+ bool isOverlap(WriteMask);
+ void orMask(WriteMask);
+ void fillMask();
+}
+
+structure(DataBlock, external = "yes", desc="..."){
+ void clear();
+ void copyPartial(DataBlock, int, int);
+ void copyPartial(DataBlock, WriteMask);
+ void atomicPartial(DataBlock, WriteMask);
+}
+
+bool testAndRead(Addr addr, DataBlock datablk, Packet *pkt);
+bool testAndReadMask(Addr addr, DataBlock datablk, WriteMask mask, Packet *pkt);
+bool testAndWrite(Addr addr, DataBlock datablk, Packet *pkt);
+
+// AccessPermission
+// The following five states define the access permission of all memory blocks.
+// These permissions have multiple uses. They coordinate locking and
+// synchronization primitives, as well as enable functional accesses.
+// One should not need to add any additional permission values and it is very
+// risky to do so.
+enumeration(AccessPermission, desc="...", default="AccessPermission_NotPresent") {
+ // Valid data
+ Read_Only, desc="block is Read Only (modulo functional writes)";
+ Read_Write, desc="block is Read/Write";
+
+ // Possibly Invalid data
+ // The maybe stale permission indicates that accordingly to the protocol,
+ // there is no guarantee the block contains valid data. However, functional
+ // writes should update the block because a dataless PUT request may
+ // revalidate the block's data.
+ Maybe_Stale, desc="block can be stale or revalidated by a dataless PUT";
+ // In Broadcast/Snoop protocols, memory has no idea if it is exclusive owner
+ // or not of a block, making it hard to make the logic of having only one
+ // read_write block in the system impossible. This is to allow the memory to
+ // say, "I have the block" and for the RubyPort logic to know that this is a
+ // last-resort block if there are no writable copies in the caching hierarchy.
+ // This is not supposed to be used in directory or token protocols where
+ // memory/NB has an idea of what is going on in the whole system.
+ Backing_Store, desc="for memory in Broadcast/Snoop protocols";
+
+ // Invalid data
+ Invalid, desc="block is in an Invalid base state";
+ NotPresent, desc="block is NotPresent";
+ Busy, desc="block is in a transient state, currently invalid";
+}
+//HSA scopes
+enumeration(HSAScope, desc="...", default="HSAScope_UNSPECIFIED") {
+ UNSPECIFIED, desc="Unspecified scope";
+ NOSCOPE, desc="Explictly unscoped";
+ WAVEFRONT, desc="Wavefront scope";
+ WORKGROUP, desc="Workgroup scope";
+ DEVICE, desc="Device scope";
+ SYSTEM, desc="System scope";
+}
+
+// HSA segment types
+enumeration(HSASegment, desc="...", default="HSASegment_GLOBAL") {
+ GLOBAL, desc="Global segment";
+ GROUP, desc="Group segment";
+ PRIVATE, desc="Private segment";
+ KERNARG, desc="Kernarg segment";
+ READONLY, desc="Readonly segment";
+ SPILL, desc="Spill segment";
+ ARG, desc="Arg segment";
+}
+
+// TesterStatus
+enumeration(TesterStatus, desc="...") {
+ Idle, desc="Idle";
+ Action_Pending, desc="Action Pending";
+ Ready, desc="Ready";
+ Check_Pending, desc="Check Pending";
+}
+
+// InvalidateGeneratorStatus
+enumeration(InvalidateGeneratorStatus, desc="...") {
+ Load_Waiting, desc="Load waiting to be issued";
+ Load_Pending, desc="Load issued";
+ Inv_Waiting, desc="Store (invalidate) waiting to be issued";
+ Inv_Pending, desc="Store (invalidate) issued";
+}
+
+// SeriesRequestGeneratorStatus
+enumeration(SeriesRequestGeneratorStatus, desc="...") {
+ Thinking, desc="Doing work before next action";
+ Request_Pending, desc="Request pending";
+}
+
+// LockStatus
+enumeration(LockStatus, desc="...") {
+ Unlocked, desc="Lock is not held";
+ Locked, desc="Lock is held";
+}
+
+// SequencerStatus
+enumeration(SequencerStatus, desc="...") {
+ Idle, desc="Idle";
+ Pending, desc="Pending";
+}
+
+enumeration(TransitionResult, desc="...") {
+ Valid, desc="Valid transition";
+ ResourceStall, desc="Stalled due to insufficient resources";
+ ProtocolStall, desc="Protocol specified stall";
+ Reject, desc="Rejected because of a type mismatch";
+}
+
+// RubyRequestType
+enumeration(RubyRequestType, desc="...", default="RubyRequestType_NULL") {
+ LD, desc="Load";
+ ST, desc="Store";
+ ATOMIC, desc="Atomic Load/Store -- depricated. use ATOMIC_RETURN or ATOMIC_NO_RETURN";
+ ATOMIC_RETURN, desc="Atomic Load/Store, return data";
+ ATOMIC_NO_RETURN, desc="Atomic Load/Store, do not return data";
+ IFETCH, desc="Instruction fetch";
+ IO, desc="I/O";
+ REPLACEMENT, desc="Replacement";
+ Load_Linked, desc="";
+ Store_Conditional, desc="";
+ RMW_Read, desc="";
+ RMW_Write, desc="";
+ Locked_RMW_Read, desc="";
+ Locked_RMW_Write, desc="";
+ COMMIT, desc="Commit version";
+ NULL, desc="Invalid request type";
+ FLUSH, desc="Flush request type";
+ Release, desc="Release operation";
+ Acquire, desc="Acquire opertion";
+ AcquireRelease, desc="Acquire and Release opertion";
+}
+
+enumeration(SequencerRequestType, desc="...", default="SequencerRequestType_NULL") {
+ Default, desc="Replace this with access_types passed to the DMA Ruby object";
+ LD, desc="Load";
+ ST, desc="Store";
+ ATOMIC, desc="Atomic Load/Store";
+ REPLACEMENT, desc="Replacement";
+ FLUSH, desc="Flush request type";
+ NULL, desc="Invalid request type";
+}
+
+enumeration(CacheRequestType, desc="...", default="CacheRequestType_NULL") {
+ DataArrayRead, desc="Read access to the cache's data array";
+ DataArrayWrite, desc="Write access to the cache's data array";
+ TagArrayRead, desc="Read access to the cache's tag array";
+ TagArrayWrite, desc="Write access to the cache's tag array";
+}
+
+enumeration(CacheResourceType, desc="...", default="CacheResourceType_NULL") {
+ DataArray, desc="Access to the cache's data array";
+ TagArray, desc="Access to the cache's tag array";
+}
+
+enumeration(DirectoryRequestType, desc="...", default="DirectoryRequestType_NULL") {
+ Default, desc="Replace this with access_types passed to the Directory Ruby object";
+}
+
+enumeration(DMASequencerRequestType, desc="...", default="DMASequencerRequestType_NULL") {
+ Default, desc="Replace this with access_types passed to the DMA Ruby object";
+}
+
+enumeration(MemoryControlRequestType, desc="...", default="MemoryControlRequestType_NULL") {
+ Default, desc="Replace this with access_types passed to the DMA Ruby object";
+}
+
+
+// These are statically defined types of states machines that we can have.
+// If you want to add a new machine type, edit this enum. It is not necessary
+// for a protocol to have state machines defined for the all types here. But
+// you cannot use anything other than the ones defined here. Also, a protocol
+// can have only one state machine for a given type.
+enumeration(MachineType, desc="...", default="MachineType_NULL") {
+ L0Cache, desc="L0 Cache Mach";
+ L1Cache, desc="L1 Cache Mach";
+ L2Cache, desc="L2 Cache Mach";
+ L3Cache, desc="L3 Cache Mach";
+ Directory, desc="Directory Mach";
+ DMA, desc="DMA Mach";
+ Collector, desc="Collector Mach";
+ L1Cache_wCC, desc="L1 Cache Mach to track cache-to-cache transfer (used for miss latency profile)";
+ L2Cache_wCC, desc="L2 Cache Mach to track cache-to-cache transfer (used for miss latency profile)";
+ CorePair, desc="Cache Mach (2 cores, Private L1Ds, Shared L1I & L2)";
+ TCP, desc="GPU L1 Data Cache (Texture Cache per Pipe)";
+ TCC, desc="GPU L2 Shared Cache (Texture Cache per Channel)";
+ TCCdir, desc="Directory at the GPU L2 Cache (TCC)";
+ SQC, desc="GPU L1 Instr Cache (Sequencer Cache)";
+ RegionDir, desc="Region-granular directory";
+ RegionBuffer,desc="Region buffer for CPU and GPU";
+ NULL, desc="null mach type";
+}
+
+// MessageSizeType
+enumeration(MessageSizeType, desc="...") {
+ Control, desc="Control Message";
+ Data, desc="Data Message";
+ Request_Control, desc="Request";
+ Reissue_Control, desc="Reissued request";
+ Response_Data, desc="data response";
+ ResponseL2hit_Data, desc="data response";
+ ResponseLocal_Data, desc="data response";
+ Response_Control, desc="non-data response";
+ Writeback_Data, desc="Writeback data";
+ Writeback_Control, desc="Writeback control";
+ Broadcast_Control, desc="Broadcast control";
+ Multicast_Control, desc="Multicast control";
+ Forwarded_Control, desc="Forwarded control";
+ Invalidate_Control, desc="Invalidate control";
+ Unblock_Control, desc="Unblock control";
+ Persistent_Control, desc="Persistent request activation messages";
+ Completion_Control, desc="Completion messages";
+}
+
+// AccessType
+enumeration(AccessType, desc="...") {
+ Read, desc="Reading from cache";
+ Write, desc="Writing to cache";
+}
+
+// RubyAccessMode
+enumeration(RubyAccessMode, default="RubyAccessMode_User", desc="...") {
+ Supervisor, desc="Supervisor mode";
+ User, desc="User mode";
+ Device, desc="Device mode";
+}
+
+enumeration(PrefetchBit, default="PrefetchBit_No", desc="...") {
+ No, desc="No, not a prefetch";
+ Yes, desc="Yes, a prefetch";
+ L1_HW, desc="This is a L1 hardware prefetch";
+ L2_HW, desc="This is a L2 hardware prefetch";
+}
+
+// CacheMsg
+structure(SequencerMsg, desc="...", interface="Message") {
+ Addr LineAddress, desc="Line address for this request";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ SequencerRequestType Type, desc="Type of request (LD, ST, etc)";
+ Addr ProgramCounter, desc="Program counter of the instruction that caused the miss";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ DataBlock DataBlk, desc="Data";
+ int Len, desc="size in bytes of access";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ MessageSizeType MessageSize, default="MessageSizeType_Request_Control";
+
+ bool functionalRead(Packet *pkt) {
+ return testAndRead(PhysicalAddress, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(PhysicalAddress, DataBlk, pkt);
+ }
+}
+
+// MaskPredictorType
+enumeration(MaskPredictorType, "MaskPredictorType_Undefined", desc="...") {
+ Undefined, desc="Undefined";
+ AlwaysUnicast, desc="AlwaysUnicast";
+ TokenD, desc="TokenD";
+ AlwaysBroadcast, desc="AlwaysBroadcast";
+ TokenB, desc="TokenB";
+ TokenNull, desc="TokenNull";
+ Random, desc="Random";
+ Pairwise, desc="Pairwise";
+ Owner, desc="Owner";
+ BroadcastIfShared, desc="Broadcast-If-Shared";
+ BroadcastCounter, desc="Broadcast Counter";
+ Group, desc="Group";
+ Counter, desc="Counter";
+ StickySpatial, desc="StickySpatial";
+ OwnerBroadcast, desc="Owner/Broadcast Hybrid";
+ OwnerGroup, desc="Owner/Group Hybrid";
+ OwnerBroadcastMod, desc="Owner/Broadcast Hybrid-Mod";
+ OwnerGroupMod, desc="Owner/Group Hybrid-Mod";
+ LastNMasks, desc="Last N Masks";
+ BandwidthAdaptive, desc="Bandwidth Adaptive";
+}
+
+// MaskPredictorIndex
+enumeration(MaskPredictorIndex, "MaskPredictorIndex_Undefined", desc="...") {
+ Undefined, desc="Undefined";
+ DataBlock, desc="Data Block";
+ PC, desc="Program Counter";
+}
+
+// MaskPredictorTraining
+enumeration(MaskPredictorTraining, "MaskPredictorTraining_Undefined", desc="...") {
+ Undefined, desc="Undefined";
+ None, desc="None";
+ Implicit, desc="Implicit";
+ Explicit, desc="Explicit";
+ Both, desc="Both";
+}
+
+// Request Status
+enumeration(RequestStatus, desc="...", default="RequestStatus_NULL") {
+ Ready, desc="The sequencer is ready and the request does not alias";
+ Issued, desc="The sequencer successfully issued the request";
+ BufferFull, desc="Can not issue because the sequencer is full";
+ Aliased, desc="This request aliased with a currently outstanding request";
+ NULL, desc="";
+}
+
+// LinkDirection
+enumeration(LinkDirection, desc="...") {
+ In, desc="Inward link direction";
+ Out, desc="Outward link direction";
+}
--- /dev/null
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+// MemoryRequestType used in MemoryMsg
+
+enumeration(MemoryRequestType, desc="...") {
+
+ // Southbound request: from directory to memory cache
+ // or directory to memory or memory cache to memory
+ MEMORY_READ, desc="Read request to memory";
+ MEMORY_WB, desc="Write back data to memory";
+
+ // response from memory to directory
+ // (These are currently unused!)
+ MEMORY_DATA, desc="Data read from memory";
+ MEMORY_ACK, desc="Write to memory acknowledgement";
+}
+
+
+// Message to and from Memory Control
+
+structure(MemoryMsg, desc="...", interface="Message") {
+ Addr addr, desc="Physical address for this request";
+ MemoryRequestType Type, desc="Type of memory request (MEMORY_READ or MEMORY_WB)";
+ MachineID Sender, desc="What component sent the data";
+ MachineID OriginalRequestorMachId, desc="What component originally requested";
+ DataBlock DataBlk, desc="Data to writeback";
+ MessageSizeType MessageSize, desc="size category of the message";
+ // Not all fields used by all protocols:
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ bool ReadX, desc="Exclusive";
+ int Acks, desc="How many acks to expect";
+
+ bool functionalRead(Packet *pkt) {
+ return testAndRead(addr, DataBlk, pkt);
+ }
+
+ bool functionalWrite(Packet *pkt) {
+ return testAndWrite(addr, DataBlk, pkt);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * Copyright (c) 2013 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// External Types
+
+//
+// **PLEASE NOTE!** When adding objects to this file you must also add a line
+// in the src/mem/ruby/SConscript file. Otherwise the external object's .hh
+// file will not be copied to the protocol directory and you will encounter a
+// undefined declaration error.
+//
+
+external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
+external_type(OutPort, primitive="yes");
+external_type(Scalar, primitive="yes");
+
+structure(InPort, external = "yes", primitive="yes") {
+ bool isReady(Tick current_time);
+ Tick dequeue(Tick current_time);
+ void recycle(Tick current_time, Tick recycle_latency);
+ bool isEmpty();
+ bool isStallMapEmpty();
+ int getStallMapSize();
+}
+
+external_type(NodeID, default="0", primitive="yes");
+external_type(MachineID);
+
+structure (Set, external = "yes", non_obj="yes") {
+ void setSize(int);
+ void add(NodeID);
+ void addSet(Set);
+ void remove(NodeID);
+ void removeSet(Set);
+ void broadcast();
+ void addRandom();
+ void clear();
+ int count();
+ bool isElement(NodeID);
+ bool isEqual(Set);
+ bool isSuperset(Set);
+ bool intersectionIsEmpty(Set);
+ NodeID smallestElement();
+}
+
+structure (NetDest, external = "yes", non_obj="yes") {
+ void setSize(int);
+ void setSize(int, int);
+ void add(NodeID);
+ void add(MachineID);
+ void addSet(Set);
+ void addNetDest(NetDest);
+ void setNetDest(MachineType, Set);
+ void remove(NodeID);
+ void remove(MachineID);
+ void removeSet(Set);
+ void removeNetDest(NetDest);
+ void broadcast();
+ void broadcast(MachineType);
+ void addRandom();
+ void clear();
+ Set toSet();
+ int count();
+ bool isElement(NodeID);
+ bool isElement(MachineID);
+ bool isSuperset(Set);
+ bool isSuperset(NetDest);
+ bool isEmpty();
+ bool intersectionIsEmpty(Set);
+ bool intersectionIsEmpty(NetDest);
+ MachineID smallestElement(MachineType);
+ NetDest OR(NetDest);
+ NetDest AND(NetDest);
+}
+
+structure (Sequencer, external = "yes") {
+ void readCallback(Addr, DataBlock);
+ void readCallback(Addr, DataBlock, bool);
+ void readCallback(Addr, DataBlock, bool, MachineType);
+ void readCallback(Addr, DataBlock, bool, MachineType,
+ Cycles, Cycles, Cycles);
+
+ void writeCallback(Addr, DataBlock);
+ void writeCallback(Addr, DataBlock, bool);
+ void writeCallback(Addr, DataBlock, bool, MachineType);
+ void writeCallback(Addr, DataBlock, bool, MachineType,
+ Cycles, Cycles, Cycles);
+
+ void checkCoherence(Addr);
+ void evictionCallback(Addr);
+ void recordRequestType(SequencerRequestType);
+ bool checkResourceAvailable(CacheResourceType, Addr);
+ void invalidateSC(Addr);
+}
+
+structure (GPUCoalescer, external = "yes") {
+ void readCallback(Addr, DataBlock);
+ void readCallback(Addr, MachineType, DataBlock);
+ void readCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles);
+ void readCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles, bool);
+ void writeCallback(Addr, DataBlock);
+ void writeCallback(Addr, MachineType, DataBlock);
+ void writeCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles);
+ void writeCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles, bool);
+ void checkCoherence(Addr);
+ void evictionCallback(Addr);
+ void recordCPReadCallBack(MachineID, MachineID);
+ void recordCPWriteCallBack(MachineID, MachineID);
+}
+
+structure (VIPERCoalescer, external = "yes") {
+ void readCallback(Addr, DataBlock);
+ void readCallback(Addr, MachineType, DataBlock);
+ void readCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles);
+ void readCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles, bool);
+ void writeCallback(Addr, DataBlock);
+ void writeCallback(Addr, MachineType, DataBlock);
+ void writeCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles);
+ void writeCallback(Addr, MachineType, DataBlock,
+ Cycles, Cycles, Cycles, bool);
+ void invCallback(Addr);
+ void wbCallback(Addr);
+ void checkCoherence(Addr);
+ void evictionCallback(Addr);
+}
+
+structure(RubyRequest, desc="...", interface="Message", external="yes") {
+ Addr LineAddress, desc="Line address for this request";
+ Addr PhysicalAddress, desc="Physical address for this request";
+ RubyRequestType Type, desc="Type of request (LD, ST, etc)";
+ Addr ProgramCounter, desc="Program counter of the instruction that caused the miss";
+ RubyAccessMode AccessMode, desc="user/supervisor access type";
+ int Size, desc="size in bytes of access";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+ int contextId, desc="this goes away but must be replace with Nilay";
+ WriteMask writeMask, desc="Writethrough mask";
+ DataBlock WTData, desc="Writethrough data block";
+ int wfid, desc="Writethrough wavefront";
+ HSAScope scope, desc="HSA scope";
+ HSASegment segment, desc="HSA segment";
+ PacketPtr pkt, desc="Packet associated with this request";
+}
+
+structure(AbstractEntry, primitive="yes", external = "yes") {
+ void changePermission(AccessPermission);
+}
+
+structure (DirectoryMemory, external = "yes") {
+ AbstractEntry allocate(Addr, AbstractEntry);
+ AbstractEntry lookup(Addr);
+ bool isPresent(Addr);
+ void invalidateBlock(Addr);
+ void recordRequestType(DirectoryRequestType);
+}
+
+structure(AbstractCacheEntry, primitive="yes", external = "yes") {
+ void changePermission(AccessPermission);
+}
+
+structure (CacheMemory, external = "yes") {
+ bool cacheAvail(Addr);
+ Addr cacheProbe(Addr);
+ AbstractCacheEntry allocate(Addr, AbstractCacheEntry);
+ AbstractCacheEntry allocate(Addr, AbstractCacheEntry, bool);
+ void allocateVoid(Addr, AbstractCacheEntry);
+ void deallocate(Addr);
+ AbstractCacheEntry lookup(Addr);
+ bool isTagPresent(Addr);
+ Cycles getTagLatency();
+ Cycles getDataLatency();
+ void setMRU(Addr);
+ void setMRU(Addr, int);
+ void setMRU(AbstractCacheEntry);
+ void recordRequestType(CacheRequestType, Addr);
+ bool checkResourceAvailable(CacheResourceType, Addr);
+
+ int getCacheSize();
+ int getNumBlocks();
+ Addr getAddressAtIdx(int);
+
+ Scalar demand_misses;
+ Scalar demand_hits;
+}
+
+structure (WireBuffer, inport="yes", outport="yes", external = "yes") {
+
+}
+
+structure (DMASequencer, external = "yes") {
+ void ackCallback(Addr);
+ void dataCallback(DataBlock,Addr);
+ void recordRequestType(CacheRequestType);
+}
+
+structure (TimerTable, inport="yes", external = "yes") {
+ bool isReady(Tick);
+ Addr nextAddress();
+ void set(Addr, Tick);
+ void unset(Addr);
+ bool isSet(Addr);
+}
+
+structure (AbstractBloomFilter, external = "yes") {
+ void clear(int);
+ void set(Addr, int);
+ void unset(Addr, int);
+
+ bool isSet(Addr, int);
+ int getCount(Addr, int);
+}
+
+structure (Prefetcher, external = "yes") {
+ void observeMiss(Addr, RubyRequestType);
+ void observePfHit(Addr);
+ void observePfMiss(Addr);
+}
--- /dev/null
+
+/*
+ * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Miscallaneous Functions
+
+void error(std::string msg);
+void assert(bool condition);
+int random(int number);
+Cycles zero_time();
+NodeID intToID(int nodenum);
+int IDToInt(NodeID id);
+int addressToInt(Addr addr);
+Addr intToAddress(int addr);
+void procProfileCoherenceRequest(NodeID node, bool needCLB);
+void dirProfileCoherenceRequest(NodeID node, bool needCLB);
+int max_tokens();
+Addr setOffset(Addr addr, int offset);
+Addr makeLineAddress(Addr addr);
+int getOffset(Addr addr);
+int mod(int val, int mod);
+Addr bitSelect(Addr addr, int small, int big);
+Addr maskLowOrderBits(Addr addr, int number);
+Addr makeNextStrideAddress(Addr addr, int stride);
+structure(BoolVec, external="yes") {
+}
+int countBoolVec(BoolVec bVec);
--- /dev/null
+include "RubySlicc_Exports.sm";
+include "RubySlicc_Types.sm";
+include "RubySlicc_Util.sm";
+include "RubySlicc_ComponentMapping.sm";
+include "RubySlicc_Defines.sm";
+include "RubySlicc_MemControl.sm";
--- /dev/null
+# -*- mode:python -*-
+
+# Copyright (c) 2009 The Hewlett-Packard Development Company
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+import os
+import re
+import sys
+
+from os.path import isdir, isfile, join as joinpath
+
+from SCons.Scanner import Classic
+
+from gem5_scons import Transform
+
+Import('*')
+
+if env['PROTOCOL'] == 'None':
+ Return()
+
+output_dir = Dir('.')
+html_dir = Dir('html')
+slicc_dir = Dir('../slicc')
+
+sys.path[1:1] = [ Dir('..').Dir('..').srcnode().abspath ]
+from slicc.parser import SLICC
+
+slicc_depends = []
+for root,dirs,files in os.walk(slicc_dir.srcnode().abspath):
+ for f in files:
+ if f.endswith('.py'):
+ slicc_depends.append(File(joinpath(root, f)))
+
+#
+# Use SLICC
+#
+env["SLICC_PATH"] = protocol_dirs
+slicc_scanner = Classic("SliccScanner", ['.sm', '.slicc'], "SLICC_PATH",
+ r'''include[ \t]["'](.*)["'];''')
+env.Append(SCANNERS=slicc_scanner)
+
+def slicc_emitter(target, source, env):
+ assert len(source) == 1
+ filepath = source[0].srcnode().abspath
+
+ slicc = SLICC(filepath, protocol_base.abspath, verbose=False)
+ slicc.process()
+ slicc.writeCodeFiles(output_dir.abspath, slicc_includes)
+ if env['SLICC_HTML']:
+ slicc.writeHTMLFiles(html_dir.abspath)
+
+ target.extend([output_dir.File(f) for f in sorted(slicc.files())])
+ return target, source
+
+def slicc_action(target, source, env):
+ assert len(source) == 1
+ filepath = source[0].srcnode().abspath
+
+ slicc = SLICC(filepath, protocol_base.abspath, verbose=True)
+ slicc.process()
+ slicc.writeCodeFiles(output_dir.abspath, slicc_includes)
+ if env['SLICC_HTML']:
+ slicc.writeHTMLFiles(html_dir.abspath)
+
+slicc_builder = Builder(action=MakeAction(slicc_action, Transform("SLICC")),
+ emitter=slicc_emitter)
+
+protocol = env['PROTOCOL']
+protocol_dir = None
+for path in protocol_dirs:
+ if os.path.exists(os.path.join(path, "%s.slicc" % protocol)):
+ protocol_dir = Dir(path)
+ break
+
+if not protocol_dir:
+ raise ValueError, "Could not find %s.slicc in protocol_dirs" % protocol
+
+sources = [ protocol_dir.File("%s.slicc" % protocol) ]
+
+env.Append(BUILDERS={'SLICC' : slicc_builder})
+nodes = env.SLICC([], sources)
+env.Depends(nodes, slicc_depends)
+
+for f in nodes:
+ s = str(f)
+ if s.endswith('.cc'):
+ Source(f)
+ elif s.endswith('.py'):
+ SimObject(f)
+
--- /dev/null
+# -*- mode:python -*-
+
+# Copyright (c) 2009 The Hewlett-Packard Development Company
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+import os
+
+Import('*')
+
+all_protocols.extend([
+ 'GPU_VIPER',
+ 'GPU_VIPER_Baseline',
+ 'GPU_VIPER_Region',
+ 'GPU_RfO',
+ 'MOESI_AMD_Base',
+ 'MESI_Two_Level',
+ 'MESI_Three_Level',
+ 'MI_example',
+ 'MOESI_CMP_directory',
+ 'MOESI_CMP_token',
+ 'MOESI_hammer',
+ 'Garnet_standalone',
+ 'None'
+ ])
+
+opt = BoolVariable('SLICC_HTML', 'Create HTML files', False)
+sticky_vars.AddVariables(opt)
+
+protocol_dirs.append(Dir('.').abspath)
+
+protocol_base = Dir('.')
+Export('protocol_base')
+
+slicc_includes.append('mem/ruby/slicc_interface/RubySlicc_includes.hh')
#include <iostream>
#include "base/logging.hh"
-#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/protocol/AccessPermission.hh"
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
class DataBlock;
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "debug/RubyQueue.hh"
-#include "mem/protocol/MemoryMsg.hh"
#include "mem/ruby/network/Network.hh"
+#include "mem/ruby/protocol/MemoryMsg.hh"
#include "mem/ruby/system/GPUCoalescer.hh"
#include "mem/ruby/system/RubySystem.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "base/addr_range.hh"
#include "base/callback.hh"
#include "mem/packet.hh"
-#include "mem/protocol/AccessPermission.hh"
#include "mem/qport.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
#include "mem/ruby/common/Histogram.hh"
#include "mem/ruby/common/MachineID.hh"
#include "mem/ruby/network/MessageBuffer.hh"
+#include "mem/ruby/protocol/AccessPermission.hh"
#include "mem/ruby/system/CacheRecorder.hh"
#include "params/RubyController.hh"
#include "sim/clocked_object.hh"
#include <iostream>
-#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/protocol/AccessPermission.hh"
class AbstractEntry
{
#include <stack>
#include "mem/packet.hh"
-#include "mem/protocol/MessageSizeType.hh"
#include "mem/ruby/common/NetDest.hh"
+#include "mem/ruby/protocol/MessageSizeType.hh"
class Message;
typedef std::shared_ptr<Message> MsgPtr;
#include <ostream>
#include <vector>
-#include "mem/protocol/HSAScope.hh"
-#include "mem/protocol/HSASegment.hh"
-#include "mem/protocol/Message.hh"
-#include "mem/protocol/PrefetchBit.hh"
-#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/protocol/RubyRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/DataBlock.hh"
#include "mem/ruby/common/WriteMask.hh"
+#include "mem/ruby/protocol/HSAScope.hh"
+#include "mem/ruby/protocol/HSASegment.hh"
+#include "mem/ruby/protocol/Message.hh"
+#include "mem/ruby/protocol/PrefetchBit.hh"
+#include "mem/ruby/protocol/RubyAccessMode.hh"
+#include "mem/ruby/protocol/RubyRequestType.hh"
class RubyRequest : public Message
{
#ifndef __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_COMPONENTMAPPINGS_HH__
#define __MEM_RUBY_SLICC_INTERFACE_RUBYSLICC_COMPONENTMAPPINGS_HH__
-#include "mem/protocol/MachineType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/MachineID.hh"
#include "mem/ruby/common/NetDest.hh"
+#include "mem/ruby/protocol/MachineType.hh"
#include "mem/ruby/structures/DirectoryMemory.hh"
inline NetDest
#include "debug/RubyCacheTrace.hh"
#include "debug/RubyResourceStalls.hh"
#include "debug/RubyStats.hh"
-#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/protocol/AccessPermission.hh"
#include "mem/ruby/system/RubySystem.hh"
#include "mem/ruby/system/WeightedLRUPolicy.hh"
#include <vector>
#include "base/statistics.hh"
-#include "mem/protocol/CacheRequestType.hh"
-#include "mem/protocol/CacheResourceType.hh"
-#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/common/DataBlock.hh"
+#include "mem/ruby/protocol/CacheRequestType.hh"
+#include "mem/ruby/protocol/CacheResourceType.hh"
+#include "mem/ruby/protocol/RubyRequest.hh"
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
#include "mem/ruby/structures/AbstractReplacementPolicy.hh"
#include <string>
#include "base/addr_range.hh"
-#include "mem/protocol/DirectoryRequestType.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/protocol/DirectoryRequestType.hh"
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
#include "params/RubyDirectoryMemory.hh"
#include "sim/sim_object.hh"
#include <unordered_map>
-#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/protocol/AccessPermission.hh"
template<class ENTRY>
struct PerfectCacheLineState
#include <iostream>
#include <unordered_map>
-#include "mem/protocol/AccessType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/MachineID.hh"
#include "mem/ruby/common/NetDest.hh"
+#include "mem/ruby/protocol/AccessType.hh"
class PersistentTableEntry
{
#include <vector>
#include "base/types.hh"
-#include "mem/protocol/RubyRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/DataBlock.hh"
#include "mem/ruby/common/TypeDefines.hh"
+#include "mem/ruby/protocol/RubyRequestType.hh"
class Sequencer;
#include "debug/RubyDma.hh"
#include "debug/RubyStats.hh"
-#include "mem/protocol/SequencerMsg.hh"
-#include "mem/protocol/SequencerRequestType.hh"
+#include "mem/ruby/protocol/SequencerMsg.hh"
+#include "mem/ruby/protocol/SequencerRequestType.hh"
#include "mem/ruby/system/RubySystem.hh"
DMARequest::DMARequest(uint64_t start_paddr, int len, bool write,
#include <ostream>
#include <unordered_map>
-#include "mem/protocol/DMASequencerRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/DataBlock.hh"
+#include "mem/ruby/protocol/DMASequencerRequestType.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "params/DMASequencer.hh"
#include <unordered_map>
#include "base/statistics.hh"
-#include "mem/protocol/HSAScope.hh"
-#include "mem/protocol/HSASegment.hh"
-#include "mem/protocol/PrefetchBit.hh"
-#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/protocol/RubyRequestType.hh"
-#include "mem/protocol/SequencerRequestType.hh"
#include "mem/request.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/protocol/HSAScope.hh"
+#include "mem/ruby/protocol/HSASegment.hh"
+#include "mem/ruby/protocol/PrefetchBit.hh"
+#include "mem/ruby/protocol/RubyAccessMode.hh"
+#include "mem/ruby/protocol/RubyRequestType.hh"
+#include "mem/ruby/protocol/SequencerRequestType.hh"
#include "mem/ruby/system/Sequencer.hh"
class DataBlock;
#include "debug/Config.hh"
#include "debug/Drain.hh"
#include "debug/Ruby.hh"
-#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/protocol/AccessPermission.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/simple_mem.hh"
#include "sim/full_system.hh"
#include <cassert>
#include <string>
-#include "mem/protocol/RequestStatus.hh"
#include "mem/ruby/common/MachineID.hh"
#include "mem/ruby/network/MessageBuffer.hh"
+#include "mem/ruby/protocol/RequestStatus.hh"
#include "mem/ruby/system/RubySystem.hh"
#include "mem/tport.hh"
#include "params/RubyPort.hh"
#include "debug/RubySequencer.hh"
#include "debug/RubyStats.hh"
#include "mem/packet.hh"
-#include "mem/protocol/PrefetchBit.hh"
-#include "mem/protocol/RubyAccessMode.hh"
#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/protocol/PrefetchBit.hh"
+#include "mem/ruby/protocol/RubyAccessMode.hh"
#include "mem/ruby/slicc_interface/RubyRequest.hh"
#include "mem/ruby/system/RubySystem.hh"
#include "sim/system.hh"
#include <iostream>
#include <unordered_map>
-#include "mem/protocol/MachineType.hh"
-#include "mem/protocol/RubyRequestType.hh"
-#include "mem/protocol/SequencerRequestType.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/protocol/MachineType.hh"
+#include "mem/ruby/protocol/RubyRequestType.hh"
+#include "mem/ruby/protocol/SequencerRequestType.hh"
#include "mem/ruby/structures/CacheMemory.hh"
#include "mem/ruby/system/RubyPort.hh"
#include "params/RubySequencer.hh"
#include <iostream>
-#include "mem/protocol/PrefetchBit.hh"
-#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/protocol/RubyRequestType.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/protocol/PrefetchBit.hh"
+#include "mem/ruby/protocol/RubyAccessMode.hh"
+#include "mem/ruby/protocol/RubyRequestType.hh"
#include "mem/ruby/system/GPUCoalescer.hh"
#include "mem/ruby/system/RubyPort.hh"
output("SLICC v0.4")
output("Parsing...")
- protocol_base = os.path.join(os.path.dirname(__file__), '..', 'protocol')
+ protocol_base = os.path.join(os.path.dirname(__file__),
+ '..', 'ruby', 'protocol')
slicc = SLICC(slicc_file, protocol_base, verbose=True, debug=opts.debug,
traceback=opts.tb)
class $py_ident(RubyController):
type = '$py_ident'
- cxx_header = 'mem/protocol/${c_ident}.hh'
+ cxx_header = 'mem/ruby/protocol/${c_ident}.hh'
''')
code.indent()
for param in self.config_parameters:
#include <sstream>
#include <string>
-#include "mem/protocol/TransitionResult.hh"
-#include "mem/protocol/Types.hh"
#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/protocol/TransitionResult.hh"
+#include "mem/ruby/protocol/Types.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "params/$c_ident.hh"
seen_types = set()
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
- code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
+ code('#include "mem/ruby/protocol/${{var.type.c_ident}}.hh"')
seen_types.add(var.type.ident)
# for adding information to the protocol debug trace
#include <typeinfo>
#include "base/compiler.hh"
-#include "mem/ruby/common/BoolVec.hh"
#include "base/cprintf.hh"
+#include "mem/ruby/common/BoolVec.hh"
''')
for f in self.debug_flags:
code('#include "debug/${{f}}.hh"')
code('''
-#include "mem/protocol/${ident}_Controller.hh"
-#include "mem/protocol/${ident}_Event.hh"
-#include "mem/protocol/${ident}_State.hh"
-#include "mem/protocol/Types.hh"
#include "mem/ruby/network/Network.hh"
+#include "mem/ruby/protocol/${ident}_Controller.hh"
+#include "mem/ruby/protocol/${ident}_Event.hh"
+#include "mem/ruby/protocol/${ident}_State.hh"
+#include "mem/ruby/protocol/Types.hh"
#include "mem/ruby/system/RubySystem.hh"
''')
seen_types = set()
for var in self.objects:
if var.type.ident not in seen_types and not var.type.isPrimitive:
- code('#include "mem/protocol/${{var.type.c_ident}}.hh"')
+ code('#include "mem/ruby/protocol/${{var.type.c_ident}}.hh"')
seen_types.add(var.type.ident)
num_in_ports = len(self.in_ports)
for f in self.debug_flags:
code('#include "debug/${{f}}.hh"')
code('''
-#include "mem/protocol/${ident}_Controller.hh"
-#include "mem/protocol/${ident}_Event.hh"
-#include "mem/protocol/${ident}_State.hh"
+#include "mem/ruby/protocol/${ident}_Controller.hh"
+#include "mem/ruby/protocol/${ident}_Event.hh"
+#include "mem/ruby/protocol/${ident}_State.hh"
''')
if outputRequest_types:
- code('''#include "mem/protocol/${ident}_RequestType.hh"''')
+ code('''#include "mem/ruby/protocol/${ident}_RequestType.hh"''')
code('''
-#include "mem/protocol/Types.hh"
+#include "mem/ruby/protocol/Types.hh"
#include "mem/ruby/system/RubySystem.hh"
''')
#include "base/trace.hh"
#include "debug/ProtocolTrace.hh"
#include "debug/RubyGenerated.hh"
-#include "mem/protocol/${ident}_Controller.hh"
-#include "mem/protocol/${ident}_Event.hh"
-#include "mem/protocol/${ident}_State.hh"
-#include "mem/protocol/Types.hh"
+#include "mem/ruby/protocol/${ident}_Controller.hh"
+#include "mem/ruby/protocol/${ident}_Event.hh"
+#include "mem/ruby/protocol/${ident}_State.hh"
+#include "mem/ruby/protocol/Types.hh"
#include "mem/ruby/system/RubySystem.hh"
#define HASH_FUN(state, event) ((int(state)*${ident}_Event_NUM)+int(event))
for symbol in self.sym_vec:
if isinstance(symbol, Type) and not symbol.isPrimitive:
- code('#include "mem/protocol/${{symbol.c_ident}}.hh"')
+ code('#include "mem/ruby/protocol/${{symbol.c_ident}}.hh"')
code.write(path, "Types.hh")
#include <iostream>
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
+
''')
for dm in self.data_members.values():
if not dm.type.isPrimitive:
- code('#include "mem/protocol/$0.hh"', dm.type.c_ident)
+ code('#include "mem/ruby/protocol/$0.hh"', dm.type.c_ident)
parent = ""
if "interface" in self:
- code('#include "mem/protocol/$0.hh"', self["interface"])
+ code('#include "mem/ruby/protocol/$0.hh"', self["interface"])
parent = " : public %s" % self["interface"]
code('''
#include <iostream>
#include <memory>
-#include "mem/protocol/${{self.c_ident}}.hh"
+#include "mem/ruby/protocol/${{self.c_ident}}.hh"
#include "mem/ruby/system/RubySystem.hh"
using namespace std;
''')
if self.isStateDecl:
- code('#include "mem/protocol/AccessPermission.hh"')
+ code('#include "mem/ruby/protocol/AccessPermission.hh"')
if self.isMachineType:
code('#include <functional>')
#include <string>
#include "base/logging.hh"
-#include "mem/protocol/${{self.c_ident}}.hh"
+#include "mem/ruby/protocol/${{self.c_ident}}.hh"
using namespace std;
if self.isMachineType:
for enum in self.enums.itervalues():
if enum.primary:
- code('#include "mem/protocol/${{enum.ident}}_Controller.hh"')
+ code('#include "mem/ruby/protocol/${{enum.ident}}'
+ '_Controller.hh"')
code('#include "mem/ruby/common/MachineID.hh"')
code('''