2 * Copyright (c) 2013 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 machine(MachineType:L0Cache, "MESI Directory L0 Cache")
30 : Sequencer * sequencer;
33 Cycles request_latency := 2;
34 Cycles response_latency := 2;
37 // From this node's L0 cache to the network
38 MessageBuffer * bufferToL1, network="To";
40 // To this node's L0 cache FROM the network
41 MessageBuffer * bufferFromL1, network="From";
43 // Message queue between this controller and the processor
44 MessageBuffer * mandatoryQueue;
47 state_declaration(State, desc="Cache states", default="L0Cache_State_I") {
50 // The cache entry has not been allocated.
51 I, AccessPermission:Invalid;
53 // The cache entry is in shared mode. The processor can read this entry
54 // but it cannot write to it.
55 S, AccessPermission:Read_Only;
57 // The cache entry is in exclusive mode. The processor can read this
58 // entry. It can write to this entry without informing the directory.
59 // On writing, the entry moves to M state.
60 E, AccessPermission:Read_Only;
62 // The processor has read and write permissions on this entry.
63 M, AccessPermission:Read_Write;
67 // The cache controller has requested an instruction. It will be stored
68 // in the shared state so that the processor can read it.
69 Inst_IS, AccessPermission:Busy;
71 // The cache controller has requested that this entry be fetched in
72 // shared state so that the processor can read it.
73 IS, AccessPermission:Busy;
75 // The cache controller has requested that this entry be fetched in
76 // modify state so that the processor can read/write it.
77 IM, AccessPermission:Busy;
79 // The cache controller had read permission over the entry. But now the
80 // processor needs to write to it. So, the controller has requested for
82 SM, AccessPermission:Read_Only;
86 enumeration(Event, desc="Cache events") {
88 Load, desc="Load request from the home processor";
89 Ifetch, desc="I-fetch request from the home processor";
90 Store, desc="Store request from the home processor";
92 Inv, desc="Invalidate request from L2 bank";
94 // internal generated request
95 L0_Replacement, desc="L0 Replacement", format="!r";
98 Fwd_GETX, desc="GETX from other processor";
99 Fwd_GETS, desc="GETS from other processor";
100 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
102 Data, desc="Data for processor";
103 Data_Exclusive, desc="Data for processor";
105 Ack, desc="Ack for processor";
106 Ack_all, desc="Last ack for processor";
108 WB_Ack, desc="Ack for replacement";
114 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
115 State CacheState, desc="cache state";
116 DataBlock DataBlk, desc="data for the block";
117 bool Dirty, default="false", desc="data is dirty";
121 structure(TBE, desc="...") {
122 Addr addr, desc="Physical address for this TBE";
123 State TBEState, desc="Transient state";
124 DataBlock DataBlk, desc="Buffer for the data block";
125 bool Dirty, default="false", desc="data is dirty";
126 int pendingAcks, default="0", desc="number of pending acks";
129 structure(TBETable, external="yes") {
132 void deallocate(Addr);
133 bool isPresent(Addr);
136 TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
139 Cycles ticksToCycles(Tick t);
140 void set_cache_entry(AbstractCacheEntry a);
141 void unset_cache_entry();
144 void wakeUpBuffers(Addr a);
145 void wakeUpAllBuffers(Addr a);
146 void profileMsgDelay(int virtualNetworkType, Cycles c);
148 // inclusive cache returns L0 entries only
149 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
150 Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
151 if(is_valid(Dcache_entry)) {
155 Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
159 Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
160 Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
164 Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
165 Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
169 State getState(TBE tbe, Entry cache_entry, Addr addr) {
170 assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
174 } else if (is_valid(cache_entry)) {
175 return cache_entry.CacheState;
180 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
181 assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
185 tbe.TBEState := state;
188 if (is_valid(cache_entry)) {
189 cache_entry.CacheState := state;
193 AccessPermission getAccessPermission(Addr addr) {
194 TBE tbe := TBEs[addr];
196 DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
197 return L0Cache_State_to_permission(tbe.TBEState);
200 Entry cache_entry := getCacheEntry(addr);
201 if(is_valid(cache_entry)) {
202 DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(cache_entry.CacheState));
203 return L0Cache_State_to_permission(cache_entry.CacheState);
206 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
207 return AccessPermission:NotPresent;
210 void functionalRead(Addr addr, Packet *pkt) {
211 TBE tbe := TBEs[addr];
213 testAndRead(addr, tbe.DataBlk, pkt);
215 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
219 int functionalWrite(Addr addr, Packet *pkt) {
220 int num_functional_writes := 0;
222 TBE tbe := TBEs[addr];
224 num_functional_writes := num_functional_writes +
225 testAndWrite(addr, tbe.DataBlk, pkt);
226 return num_functional_writes;
229 num_functional_writes := num_functional_writes +
230 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
231 return num_functional_writes;
234 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
235 if (is_valid(cache_entry)) {
236 cache_entry.changePermission(L0Cache_State_to_permission(state));
240 Event mandatory_request_type_to_event(RubyRequestType type) {
241 if (type == RubyRequestType:LD) {
243 } else if (type == RubyRequestType:IFETCH) {
245 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
248 error("Invalid RubyRequestType");
252 int getPendingAcks(TBE tbe) {
253 return tbe.pendingAcks;
256 out_port(requestNetwork_out, CoherenceMsg, bufferToL1);
258 // Messages for this L0 cache from the L1 cache
259 in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
260 if (messgeBuffer_in.isReady(clockEdge())) {
261 peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
262 assert(in_msg.Dest == machineID);
264 Entry cache_entry := getCacheEntry(in_msg.addr);
265 TBE tbe := TBEs[in_msg.addr];
267 if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
268 trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
269 } else if(in_msg.Class == CoherenceClass:DATA) {
270 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
271 } else if (in_msg.Class == CoherenceClass:ACK) {
272 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
273 } else if (in_msg.Class == CoherenceClass:WB_ACK) {
274 trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
275 } else if (in_msg.Class == CoherenceClass:INV) {
276 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
277 } else if (in_msg.Class == CoherenceClass:GETX ||
278 in_msg.Class == CoherenceClass:UPGRADE) {
279 // upgrade transforms to GETX due to race
280 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
281 } else if (in_msg.Class == CoherenceClass:GETS) {
282 trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
283 } else if (in_msg.Class == CoherenceClass:GET_INSTR) {
284 trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
286 error("Invalid forwarded request type");
292 // Mandatory Queue betweens Node's CPU and it's L0 caches
293 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
294 if (mandatoryQueue_in.isReady(clockEdge())) {
295 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
297 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
299 if (in_msg.Type == RubyRequestType:IFETCH) {
300 // ** INSTRUCTION ACCESS ***
302 Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
303 if (is_valid(Icache_entry)) {
304 // The tag matches for the L0, so the L0 asks the L2 for it.
305 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
306 Icache_entry, TBEs[in_msg.LineAddress]);
309 // Check to see if it is in the OTHER L0
310 Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
311 if (is_valid(Dcache_entry)) {
312 // The block is in the wrong L0, put the request on the queue to the shared L2
313 trigger(Event:L0_Replacement, in_msg.LineAddress,
314 Dcache_entry, TBEs[in_msg.LineAddress]);
317 if (Icache.cacheAvail(in_msg.LineAddress)) {
318 // L0 does't have the line, but we have space for it
319 // in the L0 so let's see if the L2 has it
320 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
321 Icache_entry, TBEs[in_msg.LineAddress]);
323 // No room in the L0, so we need to make room in the L0
324 trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
325 getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
326 TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
331 // *** DATA ACCESS ***
332 Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
333 if (is_valid(Dcache_entry)) {
334 // The tag matches for the L0, so the L0 ask the L1 for it
335 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
336 Dcache_entry, TBEs[in_msg.LineAddress]);
339 // Check to see if it is in the OTHER L0
340 Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
341 if (is_valid(Icache_entry)) {
342 // The block is in the wrong L0, put the request on the queue to the private L1
343 trigger(Event:L0_Replacement, in_msg.LineAddress,
344 Icache_entry, TBEs[in_msg.LineAddress]);
347 if (Dcache.cacheAvail(in_msg.LineAddress)) {
348 // L1 does't have the line, but we have space for it
349 // in the L0 let's see if the L1 has it
350 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
351 Dcache_entry, TBEs[in_msg.LineAddress]);
353 // No room in the L1, so we need to make room in the L0
354 trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
355 getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
356 TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
365 action(a_issueGETS, "a", desc="Issue GETS") {
366 peek(mandatoryQueue_in, RubyRequest) {
367 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
368 out_msg.addr := address;
369 out_msg.Class := CoherenceClass:GETS;
370 out_msg.Sender := machineID;
371 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
372 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
373 address, out_msg.Dest);
374 out_msg.MessageSize := MessageSizeType:Control;
375 out_msg.AccessMode := in_msg.AccessMode;
380 action(b_issueGETX, "b", desc="Issue GETX") {
381 peek(mandatoryQueue_in, RubyRequest) {
382 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
383 out_msg.addr := address;
384 out_msg.Class := CoherenceClass:GETX;
385 out_msg.Sender := machineID;
386 DPRINTF(RubySlicc, "%s\n", machineID);
387 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
389 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
390 address, out_msg.Dest);
391 out_msg.MessageSize := MessageSizeType:Control;
392 out_msg.AccessMode := in_msg.AccessMode;
397 action(c_issueUPGRADE, "c", desc="Issue GETX") {
398 peek(mandatoryQueue_in, RubyRequest) {
399 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
400 out_msg.addr := address;
401 out_msg.Class := CoherenceClass:UPGRADE;
402 out_msg.Sender := machineID;
403 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
405 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
406 address, out_msg.Dest);
407 out_msg.MessageSize := MessageSizeType:Control;
408 out_msg.AccessMode := in_msg.AccessMode;
413 action(f_sendDataToL1, "f", desc="send data to the L2 cache") {
414 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
415 assert(is_valid(cache_entry));
416 out_msg.addr := address;
417 out_msg.Class := CoherenceClass:INV_DATA;
418 out_msg.DataBlk := cache_entry.DataBlk;
419 out_msg.Dirty := cache_entry.Dirty;
420 out_msg.Sender := machineID;
421 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
422 out_msg.MessageSize := MessageSizeType:Writeback_Data;
424 cache_entry.Dirty := false;
427 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
428 peek(messgeBuffer_in, CoherenceMsg) {
429 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
430 out_msg.addr := address;
431 out_msg.Class := CoherenceClass:INV_ACK;
432 out_msg.Sender := machineID;
433 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
434 out_msg.MessageSize := MessageSizeType:Response_Control;
439 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
440 if (send_evictions) {
441 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
442 sequencer.evictionCallback(address);
446 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
447 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
448 assert(is_valid(cache_entry));
449 out_msg.addr := address;
450 out_msg.Class := CoherenceClass:PUTX;
451 out_msg.Dirty := cache_entry.Dirty;
452 out_msg.Sender:= machineID;
453 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
455 if (cache_entry.Dirty) {
456 out_msg.MessageSize := MessageSizeType:Writeback_Data;
457 out_msg.DataBlk := cache_entry.DataBlk;
459 out_msg.MessageSize := MessageSizeType:Writeback_Control;
464 action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
465 assert(is_valid(cache_entry));
466 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
467 Dcache.setMRU(cache_entry);
468 sequencer.readCallback(address, cache_entry.DataBlk);
471 action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
472 assert(is_valid(cache_entry));
473 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
474 Icache.setMRU(cache_entry);
475 sequencer.readCallback(address, cache_entry.DataBlk);
478 action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
479 assert(is_valid(cache_entry));
480 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
481 Dcache.setMRU(cache_entry);
482 sequencer.readCallback(address, cache_entry.DataBlk, true);
485 action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
486 assert(is_valid(cache_entry));
487 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
488 Icache.setMRU(cache_entry);
489 sequencer.readCallback(address, cache_entry.DataBlk, true);
492 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
493 assert(is_valid(cache_entry));
494 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
495 Dcache.setMRU(cache_entry);
496 sequencer.writeCallback(address, cache_entry.DataBlk);
497 cache_entry.Dirty := true;
500 action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
501 assert(is_valid(cache_entry));
502 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
503 Dcache.setMRU(cache_entry);
504 sequencer.writeCallback(address, cache_entry.DataBlk, true);
505 cache_entry.Dirty := true;
508 action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
509 check_allocate(TBEs);
510 assert(is_valid(cache_entry));
511 TBEs.allocate(address);
512 set_tbe(TBEs[address]);
513 tbe.Dirty := cache_entry.Dirty;
514 tbe.DataBlk := cache_entry.DataBlk;
517 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
518 mandatoryQueue_in.dequeue(clockEdge());
521 action(l_popRequestQueue, "l",
522 desc="Pop incoming request queue and profile the delay within this virtual network") {
523 Tick delay := messgeBuffer_in.dequeue(clockEdge());
524 profileMsgDelay(2, ticksToCycles(delay));
527 action(o_popIncomingResponseQueue, "o",
528 desc="Pop Incoming Response queue and profile the delay within this virtual network") {
529 Tick delay := messgeBuffer_in.dequeue(clockEdge());
530 profileMsgDelay(1, ticksToCycles(delay));
533 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
534 TBEs.deallocate(address);
538 action(u_writeDataToCache, "u", desc="Write data to cache") {
539 peek(messgeBuffer_in, CoherenceMsg) {
540 assert(is_valid(cache_entry));
541 cache_entry.DataBlk := in_msg.DataBlk;
545 action(u_writeInstToCache, "ui", desc="Write data to cache") {
546 peek(messgeBuffer_in, CoherenceMsg) {
547 assert(is_valid(cache_entry));
548 cache_entry.DataBlk := in_msg.DataBlk;
552 action(ff_deallocateCacheBlock, "\f",
553 desc="Deallocate L1 cache block.") {
554 if (Dcache.isTagPresent(address)) {
555 Dcache.deallocate(address);
557 Icache.deallocate(address);
562 action(oo_allocateDCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
563 if (is_invalid(cache_entry)) {
564 set_cache_entry(Dcache.allocate(address, new Entry));
568 action(pp_allocateICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
569 if (is_invalid(cache_entry)) {
570 set_cache_entry(Icache.allocate(address, new Entry));
574 action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle cpu request queue") {
575 stall_and_wait(mandatoryQueue_in, address);
578 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
579 wakeUpAllBuffers(address);
582 action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
583 ++Icache.demand_misses;
586 action(uu_profileInstHit, "\uih", desc="Profile the demand miss") {
587 ++Icache.demand_hits;
590 action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
591 ++Dcache.demand_misses;
594 action(uu_profileDataHit, "\udh", desc="Profile the demand miss") {
595 ++Dcache.demand_hits;
598 //*****************************************************
600 //*****************************************************
602 // Transitions for Load/Store/Replacement/WriteBack from transient states
603 transition({Inst_IS, IS, IM, SM}, {Load, Ifetch, Store, L0_Replacement}) {
604 z_stallAndWaitMandatoryQueue;
607 // Transitions from Idle
608 transition(I, Load, IS) {
609 oo_allocateDCacheBlock;
616 transition(I, Ifetch, Inst_IS) {
617 pp_allocateICacheBlock;
624 transition(I, Store, IM) {
625 oo_allocateDCacheBlock;
632 transition({I, IS, IM, Inst_IS}, Inv) {
637 transition(SM, Inv, IM) {
642 // Transitions from Shared
643 transition({S,E,M}, Load) {
649 transition({S,E,M}, Ifetch) {
655 transition(S, Store, SM) {
662 transition(S, L0_Replacement, I) {
663 forward_eviction_to_cpu;
664 ff_deallocateCacheBlock;
667 transition(S, Inv, I) {
668 forward_eviction_to_cpu;
670 ff_deallocateCacheBlock;
674 // Transitions from Exclusive
675 transition({E,M}, Store, M) {
681 transition(E, L0_Replacement, I) {
682 forward_eviction_to_cpu;
684 ff_deallocateCacheBlock;
687 transition(E, {Inv, Fwd_GETX}, I) {
689 forward_eviction_to_cpu;
691 ff_deallocateCacheBlock;
695 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
700 // Transitions from Modified
701 transition(M, L0_Replacement, I) {
702 forward_eviction_to_cpu;
704 ff_deallocateCacheBlock;
707 transition(M, {Inv, Fwd_GETX}, I) {
708 forward_eviction_to_cpu;
710 ff_deallocateCacheBlock;
714 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
719 transition(IS, Data, S) {
723 o_popIncomingResponseQueue;
727 transition(IS, Data_Exclusive, E) {
731 o_popIncomingResponseQueue;
735 transition(Inst_IS, Data, S) {
739 o_popIncomingResponseQueue;
743 transition(Inst_IS, Data_Exclusive, E) {
747 o_popIncomingResponseQueue;
751 transition({IM,SM}, Data_Exclusive, M) {
755 o_popIncomingResponseQueue;