3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * $Id: MOSI_directory_1level-cache.sm 1.18 04/09/07 13:52:52-05:00 mikem@maya.cs.wisc.edu $
35 machine(L1Cache, "MOSI Directory Optimized") {
37 MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
38 MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
40 MessageBuffer forwardedRequestToCache, network="From", virtual_network="1", ordered="true";
41 MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
44 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
49 M, desc="Modified", format="!b";
52 MI, desc="modified, issued PUTX, have not seen response yet";
53 OI, desc="owned, issued PUTX, have not seen response yet";
55 IS, desc="idle, issued GETS, have not seen response yet";
56 ISI, desc="idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
58 IM, desc="idle, issued GETX, have not seen response yet";
59 IMI, desc="idle, issued GETX, saw forwarded GETX";
60 IMO, desc="idle, issued GETX, saw forwarded GETS";
61 IMOI, desc="idle, issued GETX, saw forwarded GETS, saw forwarded GETX";
63 // Note: OM is a strange state, because it is waiting for the line
64 // to be stolen away, or look like it has been stolen away. The
65 // common case is that we see a forward from the directory that is
66 // really from us, we forwarded the data to our dataqueue, and
67 // everythings works fine.
69 OM, desc="owned, issued GETX, have not seen response yet";
73 enumeration(Event, desc="Cache events") {
74 Load, desc="Load request from the processor";
75 Load_prefetch, desc="Load prefetch request from the processor";
76 Ifetch, desc="I-fetch request from the processor";
77 Store_prefetch, desc="Store prefetch request from the processor";
78 Store, desc="Store request from the processor";
79 Replacement, desc="Replacement", format="!r";
81 Forwarded_GETS, "Forwarded GETS", desc="Directory forwards GETS to us";
82 Forwarded_GETX, "Forwarded GETX", desc="Directory forwards GETX to us";
83 INV, "INV", desc="Invalidation", format="!r";
85 Proc_ack, "Proc ack", desc="Ack from proc";
86 Proc_last_ack, "Proc last ack", desc="Last ack", format="!r";
88 Data_ack_0, "Data ack 0", desc="Data with ack count = 0";
89 Data_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
90 Data_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
92 Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
98 structure(Entry, desc="...", interface="AbstractCacheEntry") {
99 State CacheState, desc="cache state";
100 DataBlock DataBlk, desc="data for the block";
104 structure(TBE, desc="...") {
105 Address Address, desc="Physical address for this TBE";
106 State TBEState, desc="Transient state";
107 DataBlock DataBlk, desc="Buffer for the data block";
108 int NumPendingAcks, desc="Number of acks that this processor is waiting for";
109 NetDest ForwardGetS_IDs, desc="Set of the processors to forward the block";
110 MachineID ForwardGetX_ID, desc="ID of the processor to forward the block";
111 int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
112 bool isPrefetch, desc="Set if this was caused by a prefetch";
115 external_type(CacheMemory) {
116 bool cacheAvail(Address);
117 Address cacheProbe(Address);
118 void allocate(Address);
119 void deallocate(Address);
120 Entry lookup(Address);
121 void changePermission(Address, AccessPermission);
122 bool isTagPresent(Address);
125 external_type(TBETable) {
127 void allocate(Address);
128 void deallocate(Address);
129 bool isPresent(Address);
132 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
133 MessageBuffer optionalQueue, ordered="false", abstract_chip_ptr="true";
134 Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
135 StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
137 TBETable TBEs, template_hack="<L1Cache_TBE>";
138 CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified L1"', abstract_chip_ptr="true";
140 State getState(Address addr) {
141 if(TBEs.isPresent(addr)) {
142 return TBEs[addr].TBEState;
143 } else if (cacheMemory.isTagPresent(addr)) {
144 return cacheMemory[addr].CacheState;
149 void setState(Address addr, State state) {
150 if (TBEs.isPresent(addr)) {
151 TBEs[addr].TBEState := state;
153 if (cacheMemory.isTagPresent(addr)) {
154 cacheMemory[addr].CacheState := state;
157 if ((state == State:I) || (state == State:MI) || (state == State:OI)) {
158 cacheMemory.changePermission(addr, AccessPermission:Invalid);
159 } else if (state == State:S || state == State:O) {
160 cacheMemory.changePermission(addr, AccessPermission:Read_Only);
161 } else if (state == State:M) {
162 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
164 cacheMemory.changePermission(addr, AccessPermission:Busy);
171 out_port(requestNetwork_out, RequestMsg, requestFromCache);
172 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
177 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
178 if (responseNetwork_in.isReady()) {
179 peek(responseNetwork_in, ResponseMsg) {
180 if(in_msg.Type == CoherenceResponseType:DATA) {
181 if(in_msg.NumPendingAcks == 0) {
182 trigger(Event:Data_ack_0, in_msg.Address);
184 if(in_msg.NumPendingAcks + TBEs[in_msg.Address].NumPendingAcks != 0) {
185 trigger(Event:Data_ack_not_0, in_msg.Address);
187 trigger(Event:Data_ack_not_0_last, in_msg.Address);
190 } else if(in_msg.Type == CoherenceResponseType:ACK) {
191 if(TBEs[in_msg.Address].NumPendingAcks != 1){
192 trigger(Event:Proc_ack, in_msg.Address);
194 trigger(Event:Proc_last_ack, in_msg.Address);
201 // Forwarded Request network
202 in_port(forwardedRequestNetwork_in, RequestMsg, forwardedRequestToCache) {
203 if(forwardedRequestNetwork_in.isReady()) {
204 peek(forwardedRequestNetwork_in, RequestMsg) {
205 if(in_msg.Type == CoherenceRequestType:GETS) {
206 trigger(Event:Forwarded_GETS, in_msg.Address);
207 } else if (in_msg.Type == CoherenceRequestType:GETX) {
208 trigger(Event:Forwarded_GETX, in_msg.Address);
209 } else if (in_msg.Type == CoherenceRequestType:INV) {
210 trigger(Event:INV, in_msg.Address);
211 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
212 trigger(Event:Dir_WB_ack, in_msg.Address);
214 error("Invalid forwarded request type");
221 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
222 if (mandatoryQueue_in.isReady()) {
223 peek(mandatoryQueue_in, CacheMsg) {
224 if (cacheMemory.cacheAvail(in_msg.Address) == false) {
225 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
227 if (in_msg.Type == CacheRequestType:LD) {
228 trigger(Event:Load, in_msg.Address);
229 } else if (in_msg.Type == CacheRequestType:IFETCH) {
230 trigger(Event:Ifetch, in_msg.Address);
231 } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
232 trigger(Event:Store, in_msg.Address);
234 error("Invalid CacheRequestType");
242 in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
243 if (optionalQueue_in.isReady()) {
244 peek(optionalQueue_in, CacheMsg) {
245 if (cacheMemory.cacheAvail(in_msg.Address) == false) {
246 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
248 if (in_msg.Type == CacheRequestType:LD) {
249 trigger(Event:Load_prefetch, in_msg.Address);
250 } else if (in_msg.Type == CacheRequestType:IFETCH) {
251 trigger(Event:Load_prefetch, in_msg.Address);
252 } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
253 trigger(Event:Store_prefetch, in_msg.Address);
255 error("Invalid CacheRequestType");
264 action(a_issueGETS, "a", desc="Issue GETS") {
265 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
266 out_msg.Address := address;
267 out_msg.Type := CoherenceRequestType:GETS;
268 out_msg.Requestor := machineID;
269 out_msg.Destination.add(map_Address_to_Directory(address));
270 out_msg.MessageSize := MessageSizeType:Control;
274 action(b_issueGETX, "b", desc="Issue GETX") {
275 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
276 out_msg.Address := address;
277 out_msg.Type := CoherenceRequestType:GETX;
278 out_msg.Requestor := machineID;
279 out_msg.Destination.add(map_Address_to_Directory(address));
280 out_msg.MessageSize := MessageSizeType:Control;
284 action(d_issuePUTX, "d", desc="Issue PUTX") {
285 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
286 out_msg.Address := address;
287 out_msg.Type := CoherenceRequestType:PUTX;
288 out_msg.Requestor := machineID;
289 out_msg.Destination.add(map_Address_to_Directory(address));
290 out_msg.DataBlk := cacheMemory[address].DataBlk;
291 out_msg.MessageSize := MessageSizeType:Data;
295 action(e_dataFromCacheToRequestor, "e", desc="Send data from cache to requestor") {
296 peek(forwardedRequestNetwork_in, RequestMsg) {
297 enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
298 out_msg.Address := address;
299 out_msg.Type := CoherenceResponseType:DATA;
300 out_msg.Sender := machineID;
301 out_msg.SenderMachine := MachineType:L1Cache;
302 out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state O and we see a GetX
303 out_msg.Destination.add(in_msg.Requestor);
304 out_msg.DestMachine := MachineType:L1Cache;
305 DEBUG_EXPR(out_msg.Destination);
306 out_msg.DataBlk := cacheMemory[address].DataBlk;
307 out_msg.MessageSize := MessageSizeType:Data;
312 action(g_allocateCacheBlock, "g", desc="Allocate cache block") {
313 if (cacheMemory.isTagPresent(address) == false) {
314 cacheMemory.allocate(address);
318 action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
319 DEBUG_EXPR(cacheMemory[address].DataBlk);
320 if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
322 sequencer.readCallback(address, cacheMemory[address].DataBlk);
324 // Prefetch - don't call back
328 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
329 DEBUG_EXPR(cacheMemory[address].DataBlk);
330 if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
332 sequencer.writeCallback(address, cacheMemory[address].DataBlk);
334 // Prefetch - don't call back
338 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
339 check_allocate(TBEs);
340 TBEs.allocate(address);
341 TBEs[address].NumPendingAcks := 0; // default value
342 TBEs[address].isPrefetch := false;
343 TBEs[address].ForwardGetS_IDs.clear();
346 action(j_setPrefetchBit, "j", desc="Set prefetch bit") {
347 TBEs[address].isPrefetch := true;
350 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
351 mandatoryQueue_in.dequeue();
354 action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
355 forwardedRequestNetwork_in.dequeue();
358 action(m_popOptionalQueue, "m", desc="Pop optional queue") {
359 optionalQueue_in.dequeue();
362 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
363 responseNetwork_in.dequeue();
366 action(p_addNumberOfPendingAcks, "p", desc="Add number of pending acks to TBE") {
367 peek(responseNetwork_in, ResponseMsg) {
368 DEBUG_EXPR(TBEs[address].NumPendingAcks);
369 TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks + in_msg.NumPendingAcks;
370 DEBUG_EXPR(in_msg.NumPendingAcks);
371 DEBUG_EXPR(TBEs[address].NumPendingAcks);
375 action(q_decrementNumberOfPendingAcks, "q", desc="Decrement number of pending invalidations by one") {
376 DEBUG_EXPR(TBEs[address].NumPendingAcks);
377 TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks - 1;
378 DEBUG_EXPR(TBEs[address].NumPendingAcks);
381 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
382 TBEs.deallocate(address);
385 action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
386 peek(forwardedRequestNetwork_in, RequestMsg) {
387 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
388 out_msg.Address := address;
389 out_msg.Type := CoherenceResponseType:ACK;
390 out_msg.Sender := machineID;
391 out_msg.SenderMachine := MachineType:L1Cache;
392 out_msg.Destination.add(in_msg.Requestor);
393 out_msg.DestMachine := MachineType:L1Cache;
394 DEBUG_EXPR(out_msg.Destination);
395 out_msg.NumPendingAcks := 0;
396 out_msg.MessageSize := MessageSizeType:Control;
401 action(u_writeDataToCache, "u", desc="Write data to cache") {
402 peek(responseNetwork_in, ResponseMsg) {
403 cacheMemory[address].DataBlk := in_msg.DataBlk;
407 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
408 TBEs[address].DataBlk := cacheMemory[address].DataBlk;
411 action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
412 peek(forwardedRequestNetwork_in, RequestMsg) {
413 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
414 out_msg.Address := address;
415 out_msg.Type := CoherenceResponseType:DATA;
416 out_msg.Sender := machineID;
417 out_msg.SenderMachine := MachineType:L1Cache;
418 out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state MS and we see a GetX
419 out_msg.Destination.add(in_msg.Requestor);
420 out_msg.DestMachine := MachineType:L1Cache;
421 DEBUG_EXPR(out_msg.Destination);
422 out_msg.DataBlk := TBEs[address].DataBlk;
423 out_msg.MessageSize := MessageSizeType:Data;
428 action(z_stall, "z", desc="Stall") {
431 action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
432 peek(forwardedRequestNetwork_in, RequestMsg) {
433 TBEs[address].ForwardGetS_IDs.add(in_msg.Requestor);
437 action(ee_dataFromCacheToGetSForwardIDs, "\e", desc="Send data from cache to GetS ForwardIDs") {
438 // FIXME - In some cases this should be from the TBE, not the cache.
439 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
440 out_msg.Address := address;
441 out_msg.Type := CoherenceResponseType:DATA;
442 out_msg.Sender := machineID;
443 out_msg.SenderMachine := MachineType:L1Cache;
444 out_msg.Destination := TBEs[address].ForwardGetS_IDs;
445 out_msg.DestMachine := MachineType:L1Cache;
446 DEBUG_EXPR(out_msg.Destination);
447 out_msg.DataBlk := cacheMemory[address].DataBlk;
448 out_msg.NumPendingAcks := 0;
449 out_msg.MessageSize := MessageSizeType:Data;
453 action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
454 cacheMemory.deallocate(address);
457 action(gg_dataFromCacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
458 // FIXME - In some cases this should be from the TBE, not the cache.
459 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
460 out_msg.Address := address;
461 out_msg.Type := CoherenceResponseType:DATA;
462 out_msg.Sender := machineID;
463 out_msg.SenderMachine := MachineType:L1Cache;
464 out_msg.Destination.add(TBEs[address].ForwardGetX_ID);
465 out_msg.DestMachine := MachineType:L1Cache;
466 DEBUG_EXPR(out_msg.Destination);
467 out_msg.DataBlk := cacheMemory[address].DataBlk;
468 out_msg.NumPendingAcks := TBEs[address].ForwardGetX_AckCount;
469 out_msg.MessageSize := MessageSizeType:Data;
473 action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
474 peek(forwardedRequestNetwork_in, RequestMsg) {
475 TBEs[address].ForwardGetX_ID := in_msg.Requestor;
476 TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingAcks;
480 //*****************************************************
482 //*****************************************************
484 // Transitions for Load/Store/Prefetch/Replacement from transient states
485 transition({OM, OI, IS, ISI, IM, IMO, IMOI, IMI, MI}, {Load, Load_prefetch, Ifetch, Store, Store_prefetch, Replacement}) {
489 // Transitions from Idle
490 transition(I, {Load,Ifetch}, IS) {
491 g_allocateCacheBlock;
497 transition(I, {Load_prefetch}, IS) {
498 g_allocateCacheBlock;
505 transition(I, Store, IM) {
506 g_allocateCacheBlock;
512 transition(I, Store_prefetch, IM) {
513 g_allocateCacheBlock;
520 transition(I, Replacement) {
521 ff_deallocateCacheBlock;
525 t_sendAckToInvalidator;
526 l_popForwardedRequestQueue;
529 // Transitions from Shared
530 transition({S, O}, {Load,Ifetch}) {
535 transition({S, O, M}, Load_prefetch) {
539 transition(S, Store, IM) {
545 transition(S, Store_prefetch, IM) {
552 transition(S, Replacement, I) {
553 ff_deallocateCacheBlock;
556 transition(S, INV, I) {
557 t_sendAckToInvalidator;
558 l_popForwardedRequestQueue;
561 // Transitions from Modified
562 transition(M, {Load, Ifetch}) {
567 transition(M, Store) {
572 transition(M, Store_prefetch) {
576 transition(M, Replacement, MI) {
579 x_copyDataFromCacheToTBE;
580 ff_deallocateCacheBlock;
583 transition(M, Forwarded_GETS, O) {
584 e_dataFromCacheToRequestor;
585 l_popForwardedRequestQueue;
588 transition(M, Forwarded_GETX, I) {
589 e_dataFromCacheToRequestor;
590 l_popForwardedRequestQueue;
593 // Transitions from O
594 transition(O, Store, OM) {
600 transition(O, Store_prefetch, OM) {
607 transition(O, Replacement, OI){
610 x_copyDataFromCacheToTBE;
611 ff_deallocateCacheBlock;
614 transition(O, Forwarded_GETS) {
615 e_dataFromCacheToRequestor;
616 l_popForwardedRequestQueue;
619 transition(O, Forwarded_GETX, I) {
620 e_dataFromCacheToRequestor;
621 l_popForwardedRequestQueue;
624 // transitions from OI
626 transition(OI, Forwarded_GETS) {
627 y_dataFromTBEToRequestor;
628 l_popForwardedRequestQueue;
631 transition(OI, Forwarded_GETX) {
632 y_dataFromTBEToRequestor;
633 l_popForwardedRequestQueue;
636 transition(OI, Dir_WB_ack, I) {
638 l_popForwardedRequestQueue;
641 // Transitions from IS
643 transition(IS, INV, ISI) {
644 t_sendAckToInvalidator;
645 l_popForwardedRequestQueue;
648 transition(IS, Data_ack_0, S) {
652 o_popIncomingResponseQueue;
655 // transitions from ISI
657 // in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
658 // or, could get data from Dir if Dir's data lost race to Dir's INV
659 // or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
660 // processor already wrote it back
662 transition(ISI, Data_ack_0, I) {
666 o_popIncomingResponseQueue;
669 transition(ISI, INV) {
670 t_sendAckToInvalidator;
671 l_popForwardedRequestQueue;
674 // Transitions from IM
676 transition(IM, INV) { // do not need to go to IMI, since INV is for earlier epoch
677 t_sendAckToInvalidator;
678 l_popForwardedRequestQueue;
681 transition({IM, IMO}, Forwarded_GETS, IMO) {
682 dd_recordGetSForwardID;
683 l_popForwardedRequestQueue;
686 transition(IM, Forwarded_GETX, IMI) {
687 ii_recordGetXForwardID;
688 l_popForwardedRequestQueue;
691 transition(IM, {Data_ack_0, Data_ack_not_0_last}, M) {
695 o_popIncomingResponseQueue;
698 transition(IM, Data_ack_not_0) {
700 p_addNumberOfPendingAcks;
701 o_popIncomingResponseQueue;
704 transition(IM, Proc_ack) {
705 q_decrementNumberOfPendingAcks;
706 o_popIncomingResponseQueue;
709 transition(IM, Proc_last_ack, M) {
712 o_popIncomingResponseQueue;
715 // transitions from IMO
717 transition(IMO, Forwarded_GETX, IMOI) {
718 ii_recordGetXForwardID;
719 l_popForwardedRequestQueue;
722 transition(IMO, {Data_ack_0, Data_ack_not_0_last}, O) {
725 ee_dataFromCacheToGetSForwardIDs;
727 o_popIncomingResponseQueue;
730 transition(IMO, Data_ack_not_0) {
732 p_addNumberOfPendingAcks;
733 o_popIncomingResponseQueue;
736 transition(IMO, Proc_ack) {
737 q_decrementNumberOfPendingAcks;
738 o_popIncomingResponseQueue;
741 transition(IMO, Proc_last_ack, O) {
743 ee_dataFromCacheToGetSForwardIDs;
745 o_popIncomingResponseQueue;
748 // transitions from IMI
750 transition(IMI, {Data_ack_0, Data_ack_not_0_last}, I) {
753 gg_dataFromCacheToGetXForwardID;
755 o_popIncomingResponseQueue;
758 transition(IMI, Data_ack_not_0) {
760 p_addNumberOfPendingAcks;
761 o_popIncomingResponseQueue;
764 transition(IMI, Proc_ack) {
765 q_decrementNumberOfPendingAcks;
766 o_popIncomingResponseQueue;
769 transition(IMI, Proc_last_ack, I) {
771 gg_dataFromCacheToGetXForwardID;
773 o_popIncomingResponseQueue;
776 // transitions from IMOI
778 transition(IMOI, {Data_ack_0, Data_ack_not_0_last}, I) {
781 ee_dataFromCacheToGetSForwardIDs;
782 gg_dataFromCacheToGetXForwardID;
784 o_popIncomingResponseQueue;
787 transition(IMOI, Data_ack_not_0) {
789 p_addNumberOfPendingAcks;
790 o_popIncomingResponseQueue;
793 transition(IMOI, Proc_ack) {
794 q_decrementNumberOfPendingAcks;
795 o_popIncomingResponseQueue;
798 transition(IMOI, Proc_last_ack, I) {
800 ee_dataFromCacheToGetSForwardIDs;
801 gg_dataFromCacheToGetXForwardID;
803 o_popIncomingResponseQueue;
806 // Transitions from OM
807 transition(OM, Proc_ack) {
808 q_decrementNumberOfPendingAcks;
809 o_popIncomingResponseQueue;
812 transition(OM, Forwarded_GETS) {
813 e_dataFromCacheToRequestor;
814 l_popForwardedRequestQueue;
817 transition(OM, Forwarded_GETX, IM) {
818 e_dataFromCacheToRequestor;
819 l_popForwardedRequestQueue;
822 // Transitions from MI
824 transition(MI, Forwarded_GETS) {
825 y_dataFromTBEToRequestor;
826 l_popForwardedRequestQueue;
829 transition(MI, Forwarded_GETX) {
830 y_dataFromTBEToRequestor;
831 l_popForwardedRequestQueue;
834 transition(MI, Dir_WB_ack, I) {
836 l_popForwardedRequestQueue;