transition(SR, {Load, Ifetch}, S) {
h_load_hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
transition({S, SR}, Store, SM) {
transition(OR, {Load, Ifetch}, O) {
h_load_hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
transition({O, OR}, Store, OM) {
}
// Transitions from Modified
- transition({MM, MMR}, {Load, Ifetch}, MM) {
+ transition({MM, M}, {Load, Ifetch}) {
+ h_load_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MM, Store) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MMR, {Load, Ifetch}, MM) {
h_load_hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
- transition({MM, MMR}, Store, MM) {
+ transition(MMR, Store, MM) {
hh_store_hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
transition({MM, M, MMR, MR}, Flush_line, MM_F) {
}
// Transitions from Dirty Exclusive
- transition({M, MR}, {Load, Ifetch}, M) {
+ transition(M, Store, MM) {
+ hh_store_hit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MR, {Load, Ifetch}, M) {
h_load_hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
- transition({M, MR}, Store, MM) {
+ transition(MR, Store, MM) {
hh_store_hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
transition(M, L2_Replacement, MI) {
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
+ check_allocate(TBEs);
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
}
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
+ check_allocate(TBEs);
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
+ DPRINTF(RubySlicc, "%s\n", in_msg.DataBlk);
+ DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
// NOTE: The following check would not be valid in a real
// implementation. We include the data in the "dataless"