void unset_cache_entry();
void set_tbe(TBE a);
void unset_tbe();
+ void wakeUpBuffers(Address a);
// inclusive cache returns L1 entries only
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
// Response IntraChip L1 Network - response msg to this L1 cache
- in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache) {
+ in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
if (responseIntraChipL1Network_in.isReady()) {
peek(responseIntraChipL1Network_in, ResponseMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
}
// Request InterChip network - request from this L1 cache to the shared L2
- in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache) {
+ in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
if(requestIntraChipL1Network_in.isReady()) {
peek(requestIntraChipL1Network_in, RequestMsg, block_on="Address") {
assert(in_msg.Destination.isElement(machineID));
}
// Mandatory Queue betweens Node's CPU and it's L1 caches
- in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
+ in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
if (mandatoryQueue_in.isReady()) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
}
}
- action(z_stall, "z", desc="Stall") {
- }
-
action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory.deallocate(address);
}
}
- action(zz_recycleRequestQueue, "zz", desc="recycle L1 request queue") {
- requestIntraChipL1Network_in.recycle();
+ action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle L1 request queue") {
+ stall_and_wait(mandatoryQueue_in, address);
}
- action(z_recycleMandatoryQueue, "\z", desc="recycle L1 request queue") {
- mandatoryQueue_in.recycle();
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
}
action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
//*****************************************************
// Transitions for Load/Store/Replacement/WriteBack from transient states
- transition({IS, IM, IS_I, M_I, SM}, {Load, Ifetch, Store, L1_Replacement}) {
- z_recycleMandatoryQueue;
+ transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
+ z_stallAndWaitMandatoryQueue;
}
// Transitions from Idle
transition(M_I, WB_Ack, I) {
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(M, Inv, I) {
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(IS_I, Data_all_Acks, I) {
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(IS, DataS_fromL1, S) {
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(IS_I, DataS_fromL1, I) {
h_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
// directory is blocked when sending exclusive data
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(IS, Data_Exclusive, E) {
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
// Transitions from IM
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
// transitions from SM
hh_store_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
- }
-
- transition(SINK_WB_ACK, {Load, Store, Ifetch, L1_Replacement}){
- z_recycleMandatoryQueue;
+ kd_wakeUpDependents;
}
transition(SINK_WB_ACK, Inv){
l_popRequestQueue;
}
- transition(SINK_WB_ACK, WB_Ack){
+ transition(SINK_WB_ACK, WB_Ack, I){
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
}
void unset_cache_entry();
void set_tbe(TBE a);
void unset_tbe();
+ void wakeUpBuffers(Address a);
// inclusive cache, returns L2 entries only
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
- in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache) {
+ in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
if(L1unblockNetwork_in.isReady()) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.Address);
}
// Response IntraChip L2 Network - response msg to this particular L2 bank
- in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
+ in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
if (responseIntraChipL2Network_in.isReady()) {
peek(responseIntraChipL2Network_in, ResponseMsg) {
// test wether it's from a local L1 or an off chip source
}
// L1 Request
- in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
+ in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
if(L1RequestIntraChipL2Network_in.isReady()) {
peek(L1RequestIntraChipL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.Address);
}
}
- action(zz_recycleL1RequestQueue, "zz", desc="recycle L1 request queue") {
- L1RequestIntraChipL2Network_in.recycle();
+ action(zz_stallAndWaitL1RequestQueue, "zz", desc="recycle L1 request queue") {
+ stall_and_wait(L1RequestIntraChipL2Network_in, address);
}
action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
responseIntraChipL2Network_in.recycle();
}
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
//*****************************************************
// TRANSITIONS
}
transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L2_Replacement, L2_Replacement_clean}) {
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, MEM_Inv) {
transition({SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE}) {
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
e_sendDataToGetSRequestors;
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(IM, Mem_Data, MT_MB) {
}
transition({IS, ISS}, L1_GETX) {
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR}) {
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
// transitions from SS
// transitions from blocking states
transition(SS_MB, Unblock_Cancel, SS) {
k_popUnblockQueue;
+ kd_wakeUpDependents;
}
transition(MT_MB, Unblock_Cancel, MT) {
k_popUnblockQueue;
+ kd_wakeUpDependents;
}
transition(MT_IB, Unblock_Cancel, MT) {
k_popUnblockQueue;
+ kd_wakeUpDependents;
}
transition(SS_MB, Exclusive_Unblock, MT) {
// update actual directory
mmu_markExclusiveFromUnblock;
k_popUnblockQueue;
+ kd_wakeUpDependents;
}
transition({M_MB, MT_MB}, Exclusive_Unblock, MT) {
// update actual directory
mmu_markExclusiveFromUnblock;
k_popUnblockQueue;
+ kd_wakeUpDependents;
}
transition(MT_IIB, {L1_PUTX, L1_PUTX_old}){
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
transition(MT_IIB, Unblock, MT_IB) {
transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
m_writeDataToCache;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(MT_SB, Unblock, SS) {
nnu_addSharerFromUnblock;
k_popUnblockQueue;
+ kd_wakeUpDependents;
}
// writeback states
transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR}) {
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
transition(I_I, Ack) {
}
transition(MCT_I, {L1_PUTX, L1_PUTX_old}){
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
// L1 never changed Dirty data
}
transition(MT_I, {L1_PUTX, L1_PUTX_old}){
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
// possible race between unblock and immediate replacement
transition({MT_MB,SS_MB}, {L1_PUTX, L1_PUTX_old}) {
- zz_recycleL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
transition(MT_I, WB_Data_clean, NP) {
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
transition(S_I, Ack) {
transition(M_I, Mem_Ack, NP) {
s_deallocateTBE;
o_popIncomingResponseQueue;
+ kd_wakeUpDependents;
}
}
void set_tbe(TBE tbe);
void unset_tbe();
+ void wakeUpBuffers(Address a);
Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
// ** IN_PORTS **
- in_port(requestNetwork_in, RequestMsg, requestToDir) {
+ in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
}
}
- in_port(responseNetwork_in, ResponseMsg, responseToDir) {
+ in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
}
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
+ in_port(memQueue_in, MemoryMsg, memBuffer, rank = 2) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
}
-
// Actions
action(a_sendAck, "a", desc="Send ack to L2") {
peek(responseNetwork_in, ResponseMsg) {
memQueue_in.dequeue();
}
+ action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
+ wakeUpBuffers(address);
+ }
+
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
}
}
- action(z_recycleRequestQueue, "z", desc="recycle request queue") {
- requestNetwork_in.recycle();
+ action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
+ stall_and_wait(requestNetwork_in, address);
}
action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
transition(IM, Memory_Data, M) {
d_sendData;
l_popMemQueue;
+ kd_wakeUpDependents;
}
//added by SS
transition(M, CleanReplacement, I) {
c_clearOwner;
aa_sendAck;
l_popMemQueue;
+ kd_wakeUpDependents;
}
transition(ID, Memory_Data, I) {
dr_sendDMAData;
l_popMemQueue;
+ kd_wakeUpDependents;
}
transition(I, DMA_WRITE, ID_W) {
transition(ID_W, Memory_Ack, I) {
da_sendDMAAck;
l_popMemQueue;
+ kd_wakeUpDependents;
}
transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
- z_recycleRequestQueue;
+ z_stallAndWaitRequest;
}
transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
aa_sendAck;
c_clearOwner;
l_popMemQueue;
+ kd_wakeUpDependents;
}
transition(M, DMA_WRITE, M_DWR) {
da_sendDMAAck;
w_deallocateTBE;
l_popMemQueue;
+ kd_wakeUpDependents;
}
}