3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 machine(L1Cache, "MOSI Broadcast Optimized") {
36 MessageBuffer addressFromCache, network="To", virtual_network="0", ordered="true";
37 MessageBuffer dataFromCache, network="To", virtual_network="1", ordered="false";
39 MessageBuffer addressToCache, network="From", virtual_network="0", ordered="true";
40 MessageBuffer dataToCache, network="From", virtual_network="1", ordered="false";
44 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
45 NP, desc="Not Present";
49 M, desc="Modified", format="!b";
50 IS_AD, "IS^AD", desc="idle, issued GETS, have not seen GETS or data yet";
51 IM_AD, "IM^AD", desc="idle, issued GETX, have not seen GETX or data yet";
52 SM_AD, "SM^AD",desc="shared, issued GETX, have not seen GETX or data yet";
53 OM_A, "OM^A",desc="owned, issued GETX, have not seen GETX yet", format="!b";
55 IS_A, "IS^A",desc="idle, issued GETS, have not seen GETS, have seen data";
56 IM_A, "IM^A",desc="idle, issued GETX, have not seen GETX, have seen data";
57 SM_A, "SM^A",desc="shared, issued GETX, have not seen GETX, have seen data", format="!b";
59 MI_A, "MI^A", desc="modified, issued PUTX, have not seen PUTX yet";
60 OI_A, "OI^A", desc="owned, issued PUTX, have not seen PUTX yet";
61 II_A, "II^A", desc="modified, issued PUTX, have not seen PUTX, then saw other GETX", format="!b";
63 IS_D, "IS^D", desc="idle, issued GETS, have seen GETS, have not seen data yet";
64 IS_D_I, "IS^D^I", desc="idle, issued GETS, have seen GETS, have not seen data, then saw other GETX";
65 IM_D, "IM^D", desc="idle, issued GETX, have seen GETX, have not seen data yet";
66 IM_D_O, "IM^D^O", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
67 IM_D_I, "IM^D^I", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETX";
68 IM_D_OI, "IM^D^OI", desc="idle, issued GETX, have seen GETX, have not seen data yet, then saw other GETS, then saw other GETX";
69 SM_D, "SM^D", desc="shared, issued GETX, have seen GETX, have not seen data yet";
70 SM_D_O, "SM^D^O", desc="shared, issued GETX, have seen GETX, have not seen data yet, then saw other GETS";
75 enumeration(Event, desc="Cache events") {
77 Load, desc="Load request from the processor";
78 Ifetch, desc="I-fetch request from the processor";
79 Store, desc="Store request from the processor";
80 Replacement, desc="Replacement";
81 Load_prefetch, desc="Read only prefetch";
82 Store_prefetch, desc="Read write prefetch", format="!r";
84 // From Address network
85 Own_GETS, desc="Occurs when we observe our own GETS request in the global order";
86 Own_GET_INSTR, desc="Occurs when we observe our own GETInstr request in the global order";
87 Own_GETX, desc="Occurs when we observe our own GETX request in the global order";
88 Own_PUTX, desc="Occurs when we observe our own PUTX request in the global order", format="!r";
89 Other_GETS, desc="Occurs when we observe a GETS request from another processor";
90 Other_GET_INSTR, desc="Occurs when we observe a GETInstr request from another processor";
91 Other_GETX, desc="Occurs when we observe a GETX request from another processor";
92 Other_PUTX, desc="Occurs when we observe a PUTX request from another processor", format="!r";
95 Data, desc="Data for this block from the data network";
101 structure(Entry, desc="...", interface="AbstractCacheEntry") {
102 State CacheState, desc="cache state";
103 DataBlock DataBlk, desc="data for the block";
107 structure(TBE, desc="...") {
108 Address Address, desc="Physical address for this TBE";
109 State TBEState, desc="Transient state";
110 DataBlock DataBlk, desc="Buffer for the data block";
111 NetDest ForwardIDs, desc="IDs of the processors to forward the block";
112 Address ForwardAddress, desc="Address of request for forwarding";
113 bool isPrefetch, desc="Set if this request is a prefetch";
116 external_type(CacheMemory) {
117 bool cacheAvail(Address);
118 Address cacheProbe(Address);
119 void allocate(Address);
120 void deallocate(Address);
121 Entry lookup(Address);
122 void changePermission(Address, AccessPermission);
123 bool isTagPresent(Address);
126 external_type(TBETable) {
128 void allocate(Address);
129 void deallocate(Address);
130 bool isPresent(Address);
133 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
134 MessageBuffer optionalQueue, ordered="true", abstract_chip_ptr="true";
135 Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
136 StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
139 TBETable TBEs, template_hack="<L1Cache_TBE>";
140 CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified"', abstract_chip_ptr="true";
142 int cache_state_to_int(State state);
144 State getState(Address addr) {
145 if(TBEs.isPresent(addr)) {
146 return TBEs[addr].TBEState;
147 } else if (cacheMemory.isTagPresent(addr)) {
148 return cacheMemory[addr].CacheState;
153 void setState(Address addr, State state) {
154 if (TBEs.isPresent(addr)) {
155 TBEs[addr].TBEState := state;
157 if (cacheMemory.isTagPresent(addr)) {
158 cacheMemory[addr].CacheState := state;
161 if ((state == State:I) || (state == State:MI_A) || (state == State:II_A)) {
162 cacheMemory.changePermission(addr, AccessPermission:Invalid);
163 } else if (state == State:S || state == State:O) {
164 cacheMemory.changePermission(addr, AccessPermission:Read_Only);
165 } else if (state == State:M) {
166 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
168 cacheMemory.changePermission(addr, AccessPermission:Busy);
175 out_port(dataNetwork_out, DataMsg, dataFromCache);
176 out_port(addressNetwork_out, AddressMsg, addressFromCache);
181 in_port(dataNetwork_in, DataMsg, dataToCache) {
182 if (dataNetwork_in.isReady()) {
183 peek(dataNetwork_in, DataMsg) {
184 trigger(Event:Data, in_msg.Address);
190 in_port(addressNetwork_in, AddressMsg, addressToCache) {
191 if (addressNetwork_in.isReady()) {
192 peek(addressNetwork_in, AddressMsg) {
193 if (in_msg.Type == CoherenceRequestType:GETS) {
194 if (in_msg.Requestor == machineID) {
195 trigger(Event:Own_GETS, in_msg.Address);
197 trigger(Event:Other_GETS, in_msg.Address);
199 } else if (in_msg.Type == CoherenceRequestType:GETX) {
200 if (in_msg.Requestor == machineID) {
201 trigger(Event:Own_GETX, in_msg.Address);
203 trigger(Event:Other_GETX, in_msg.Address);
205 } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
206 if (in_msg.Requestor == machineID) {
207 trigger(Event:Own_GET_INSTR, in_msg.Address);
209 trigger(Event:Other_GET_INSTR, in_msg.Address);
211 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
212 if (in_msg.Requestor == machineID) {
213 trigger(Event:Own_PUTX, in_msg.Address);
215 trigger(Event:Other_PUTX, in_msg.Address);
218 error("Unexpected message");
225 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
226 if (mandatoryQueue_in.isReady()) {
227 peek(mandatoryQueue_in, CacheMsg) {
228 if (cacheMemory.cacheAvail(in_msg.Address) == false) {
229 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
231 if (in_msg.Type == CacheRequestType:LD) {
232 trigger(Event:Load, in_msg.Address);
233 } else if (in_msg.Type == CacheRequestType:IFETCH) {
234 trigger(Event:Ifetch, in_msg.Address);
235 } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
236 trigger(Event:Store, in_msg.Address);
238 error("Invalid CacheRequestType");
246 in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
247 if (optionalQueue_in.isReady()) {
248 peek(optionalQueue_in, CacheMsg) {
249 if (cacheMemory.cacheAvail(in_msg.Address) == false) {
250 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
252 if ((in_msg.Type == CacheRequestType:LD) || (in_msg.Type == CacheRequestType:IFETCH)) {
253 trigger(Event:Load_prefetch, in_msg.Address);
254 } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
255 trigger(Event:Store_prefetch, in_msg.Address);
257 error("Invalid CacheRequestType");
265 action(a_allocateTBE, "a", desc="Allocate TBE with Address=B, ForwardID=null, RetryCount=zero, ForwardIDRetryCount=zero, ForwardProgressBit=unset.") {
266 check_allocate(TBEs);
267 TBEs.allocate(address);
268 TBEs[address].isPrefetch := false;
269 TBEs[address].ForwardIDs.clear();
271 // Keep the TBE state consistent with the cache state
272 if (cacheMemory.isTagPresent(address)) {
273 TBEs[address].TBEState := cacheMemory[address].CacheState;
278 action(b_setPrefetchBit, "b", desc="Set prefetch bit in TBE.") {
279 TBEs[address].isPrefetch := true;
282 action(c_allocateCacheBlock, "c", desc="Set cache tag equal to tag of block B.") {
283 if (cacheMemory.isTagPresent(address) == false) {
284 cacheMemory.allocate(address);
288 action(d_deallocateTBE, "d", desc="Deallocate TBE.") {
289 TBEs.deallocate(address);
292 action(e_recordForwardingInfo, "e", desc="Record ID of other processor in ForwardID.") {
293 peek(addressNetwork_in, AddressMsg){
294 TBEs[address].ForwardIDs.add(in_msg.Requestor);
295 TBEs[address].ForwardAddress := in_msg.Address;
299 action(f_issueGETS, "f", desc="Issue GETS.") {
300 enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
301 out_msg.Address := address;
302 out_msg.Type := CoherenceRequestType:GETS;
303 out_msg.CacheState := cache_state_to_int(getState(address));
304 out_msg.Requestor := machineID;
305 out_msg.Destination.broadcast(MachineType:L1Cache);
306 out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
307 out_msg.MessageSize := MessageSizeType:Control;
311 action(g_issueGETX, "g", desc="Issue GETX.") {
312 enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
313 out_msg.Address := address;
314 out_msg.Type := CoherenceRequestType:GETX;
315 out_msg.CacheState := cache_state_to_int(getState(address));
316 out_msg.Requestor := machineID;
317 out_msg.Destination.broadcast(MachineType:L1Cache);
318 out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
319 out_msg.MessageSize := MessageSizeType:Control;
323 action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
324 DEBUG_EXPR(cacheMemory[address].DataBlk);
325 if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
327 sequencer.readCallback(address, cacheMemory[address].DataBlk);
329 // Prefetch - don't call back
333 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
334 DEBUG_EXPR(cacheMemory[address].DataBlk);
335 if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
337 sequencer.writeCallback(address, cacheMemory[address].DataBlk);
339 // Prefetch - don't call back
343 action(i_popAddressQueue, "i", desc="Pop incoming address queue.") {
344 addressNetwork_in.dequeue();
347 action(j_popDataQueue, "j", desc="Pop incoming data queue.") {
348 dataNetwork_in.dequeue();
351 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
352 mandatoryQueue_in.dequeue();
355 action(l_popOptionalQueue, "l", desc="Pop optional queue.") {
356 optionalQueue_in.dequeue();
360 action(o_cacheToForward, "o", desc="Send data from the cache to the processor indicated by ForwardIDs.") {
361 peek(dataNetwork_in, DataMsg){
362 // This has a CACHE_RESPONSE_LATENCY latency because we want to avoid the
363 // timing strangeness that can occur if requests that source the
364 // data from the TBE are faster than data sourced from the cache
365 enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY"){
366 out_msg.Address := TBEs[address].ForwardAddress;
367 out_msg.Sender := machineID;
368 out_msg.DataBlk := cacheMemory[address].DataBlk;
369 out_msg.Destination := TBEs[address].ForwardIDs;
370 out_msg.DestMachine := MachineType:L1Cache;
371 out_msg.MessageSize := MessageSizeType:Data;
376 action(p_issuePUTX, "p", desc="Issue PUTX.") {
377 enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
378 out_msg.Address := address;
379 out_msg.Type := CoherenceRequestType:PUTX;
380 out_msg.CacheState := cache_state_to_int(getState(address));
381 out_msg.Requestor := machineID;
382 out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
383 out_msg.Destination.add(machineID); // Back to us
384 out_msg.DataBlk := cacheMemory[address].DataBlk;
385 out_msg.MessageSize := MessageSizeType:Data;
389 action(q_writeDataFromCacheToTBE, "q", desc="Write data from the cache into the TBE.") {
390 TBEs[address].DataBlk := cacheMemory[address].DataBlk;
391 DEBUG_EXPR(TBEs[address].DataBlk);
394 action(r_cacheToRequestor, "r", desc="Send data from the cache to the requestor") {
395 peek(addressNetwork_in, AddressMsg) {
396 enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") {
397 out_msg.Address := address;
398 out_msg.Sender := machineID;
399 out_msg.Destination.add(in_msg.Requestor);
400 out_msg.DestMachine := MachineType:L1Cache;
401 out_msg.DataBlk := cacheMemory[address].DataBlk;
402 out_msg.MessageSize := MessageSizeType:Data;
404 DEBUG_EXPR(cacheMemory[address].DataBlk);
409 action(s_saveDataInTBE, "s", desc="Save data in data field of TBE.") {
410 peek(dataNetwork_in, DataMsg) {
411 TBEs[address].DataBlk := in_msg.DataBlk;
412 DEBUG_EXPR(TBEs[address].DataBlk);
416 action(t_issueGET_INSTR, "t", desc="Issue GETInstr.") {
417 enqueue(addressNetwork_out, AddressMsg, latency="ISSUE_LATENCY") {
418 out_msg.Address := address;
419 out_msg.Type := CoherenceRequestType:GET_INSTR;
420 out_msg.CacheState := cache_state_to_int(getState(address));
421 out_msg.Requestor := machineID;
422 out_msg.Destination.broadcast(MachineType:L1Cache);
423 out_msg.Destination.add(map_Address_to_Directory(address)); // To memory
424 out_msg.MessageSize := MessageSizeType:Control;
428 action(w_writeDataFromTBEToCache, "w", desc="Write data from the TBE into the cache.") {
429 cacheMemory[address].DataBlk := TBEs[address].DataBlk;
430 DEBUG_EXPR(cacheMemory[address].DataBlk);
433 action(y_tbeToReq, "y", desc="Send data from the TBE to the requestor.") {
434 peek(addressNetwork_in, AddressMsg) {
435 enqueue(dataNetwork_out, DataMsg, latency="CACHE_RESPONSE_LATENCY") { // Either this or the PutX should have a real latency
436 out_msg.Address := address;
437 out_msg.Sender := machineID;
438 out_msg.Destination.add(in_msg.Requestor);
439 out_msg.DestMachine := MachineType:L1Cache;
440 out_msg.DataBlk := TBEs[address].DataBlk;
441 out_msg.MessageSize := MessageSizeType:Data;
446 action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
447 cacheMemory.deallocate(address);
450 action(z_stall, "z", desc="Cannot be handled right now.") {
451 // Special name recognized as do nothing case
456 // Transitions from Idle
457 transition({NP, I}, Load, IS_AD) {
459 c_allocateCacheBlock;
464 transition({NP, I}, Ifetch, IS_AD) {
466 c_allocateCacheBlock;
471 transition({NP, I}, Load_prefetch, IS_AD) {
473 c_allocateCacheBlock;
479 transition({NP, I}, Store, IM_AD) {
481 c_allocateCacheBlock;
486 transition({NP, I}, Store_prefetch, IM_AD) {
488 c_allocateCacheBlock;
494 transition(I, Replacement) {
495 ff_deallocateCacheBlock; // the cache line is now in NotPresent
498 transition({NP, I}, { Other_GETS, Other_GET_INSTR, Other_GETX } ) {
502 // Transitions from Shared
503 transition(S, {Load,Ifetch}) {
508 transition(S, Load_prefetch) {
512 transition(S, Store, SM_AD) {
518 transition(S, Store_prefetch, IM_AD) {
521 b_setPrefetchBit; // Must be after allocate TBE
525 transition(S, Replacement, I) {
526 ff_deallocateCacheBlock; // the cache line is now in NotPresent
529 transition(S, {Other_GETS, Other_GET_INSTR}) {
533 transition(S, Other_GETX, I) {
537 // Transitions from Owned
538 transition(O, {Load,Ifetch}) {
543 transition(O, Store, OM_A){
549 transition(O, Load_prefetch) {
553 transition(O, Store_prefetch, OM_A) {
560 transition(O, Replacement, OI_A) {
563 q_writeDataFromCacheToTBE;// the cache line is now empty
564 ff_deallocateCacheBlock; // the cache line is now in NotPresent
567 transition(O, {Other_GETS,Other_GET_INSTR}) {
572 transition(O, Other_GETX, I) {
577 // Transitions from Modified
578 transition(M, {Load,Ifetch}) {
583 transition(M, Store) {
588 transition(M, {Load_prefetch,Store_prefetch}) {
592 transition(M, Replacement, MI_A) {
595 q_writeDataFromCacheToTBE;// the cache line is now empty
596 ff_deallocateCacheBlock; // the cache line is now in NotPresent
599 transition(M, {Other_GETS,Other_GET_INSTR}, O) {
604 transition(M, Other_GETX, I) {
610 // Transitions for Load/Store/Replacement from transient states
612 transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O}, {Load, Ifetch, Store, Replacement}) {
616 transition({IS_AD, IM_AD, IS_A, IM_A, SM_AD, OM_A, SM_A, IS_D, IM_D, IM_D_O, SM_D, SM_D_O}, Load_prefetch) {
620 transition({IS_D_I, IM_D_I, IM_D_OI}, Load_prefetch) {
624 transition({IM_AD, SM_AD, OM_A, IM_A, SM_A, IM_D, SM_D}, Store_prefetch) {
628 transition({IS_AD, IS_A, IS_D, IS_D_I, IM_D_O, IM_D_I, IM_D_OI, SM_D_O}, Store_prefetch) {
632 transition({MI_A, OI_A, II_A}, {Load, Ifetch, Store, Load_prefetch, Store_prefetch, Replacement}) {
636 // Always ignore PUTXs which we are not the owner of
637 transition({NP, I, S, O, M, IS_AD, IM_AD, SM_AD, OM_A, IS_A, IM_A, SM_A, MI_A, OI_A, II_A, IS_D, IS_D_I, IM_D, IM_D_O, IM_D_I, IM_D_OI, SM_D, SM_D_O }, Other_PUTX) {
641 // transitions from IS_AD
643 transition(IS_AD, {Own_GETS,Own_GET_INSTR}, IS_D) {
646 transition(IS_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
649 transition(IS_AD, Data, IS_A) {
655 // Transitions from IM_AD
657 transition(IM_AD, Own_GETX, IM_D) {
660 transition(IM_AD, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
663 transition(IM_AD, Data, IM_A) {
668 // Transitions from OM_A
670 transition(OM_A, Own_GETX, M){
676 transition(OM_A, {Other_GETS, Other_GET_INSTR}){
681 transition(OM_A, Other_GETX, IM_AD){
686 transition(OM_A, Data, IM_A) { // if we get data, we know we're going to lose block before we see own GETX
691 // Transitions from SM_AD
693 transition(SM_AD, Own_GETX, SM_D) {
696 transition(SM_AD, {Other_GETS,Other_GET_INSTR}) {
699 transition(SM_AD, Other_GETX, IM_AD) {
702 transition(SM_AD, Data, SM_A) {
708 // Transitions from IS_A
710 transition(IS_A, {Own_GETS,Own_GET_INSTR}, S) {
711 w_writeDataFromTBEToCache;
716 transition(IS_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
720 // Transitions from IM_A
722 transition(IM_A, Own_GETX, M) {
723 w_writeDataFromTBEToCache;
728 transition(IM_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
732 // Transitions from SM_A
734 transition(SM_A, Own_GETX, M) {
735 w_writeDataFromTBEToCache;
740 transition(SM_A, {Other_GETS,Other_GET_INSTR}) {
743 transition(SM_A, Other_GETX, IM_A) {
748 // Transitions from MI_A
750 transition(MI_A, Own_PUTX, I) {
755 transition(MI_A, {Other_GETS, Other_GET_INSTR}) {
760 transition(MI_A, Other_GETX, II_A) {
765 // Transitions from OI_A
767 transition(OI_A, Own_PUTX, I) {
772 transition(OI_A, {Other_GETS, Other_GET_INSTR}) {
777 transition(OI_A, Other_GETX, II_A) {
783 // Transitions from II_A
785 transition(II_A, Own_PUTX, I) {
790 transition(II_A, {Other_GETS, Other_GET_INSTR, Other_GETX}) {
794 // Transitions from IS_D, IS_D_I
796 transition({IS_D, IS_D_I}, {Other_GETS,Other_GET_INSTR}) {
799 transition(IS_D, Other_GETX, IS_D_I) {
802 transition(IS_D_I, Other_GETX) {
805 transition(IS_D, Data, S) {
807 w_writeDataFromTBEToCache;
813 transition(IS_D_I, Data, I) {
815 w_writeDataFromTBEToCache;
821 // Transitions from IM_D, IM_D_O, IM_D_I, IM_D_OI
823 transition( IM_D, {Other_GETS,Other_GET_INSTR}, IM_D_O ) {
824 e_recordForwardingInfo;
828 transition( IM_D, Other_GETX, IM_D_I ) {
829 e_recordForwardingInfo;
833 transition(IM_D_O, {Other_GETS,Other_GET_INSTR} ) {
834 e_recordForwardingInfo;
838 transition(IM_D_O, Other_GETX, IM_D_OI) {
839 e_recordForwardingInfo;
843 transition( {IM_D_I, IM_D_OI}, {Other_GETS, Other_GET_INSTR, Other_GETX} ) {
847 transition(IM_D, Data, M) {
849 w_writeDataFromTBEToCache;
855 transition(IM_D_O, Data, O) {
857 w_writeDataFromTBEToCache;
864 transition(IM_D_I, Data, I) {
866 w_writeDataFromTBEToCache;
873 transition(IM_D_OI, Data, I) {
875 w_writeDataFromTBEToCache;
882 // Transitions for SM_D, SM_D_O
884 transition(SM_D, {Other_GETS,Other_GET_INSTR}, SM_D_O) {
885 e_recordForwardingInfo;
889 transition(SM_D, Other_GETX, IM_D_I) {
890 e_recordForwardingInfo;
894 transition(SM_D_O, {Other_GETS,Other_GET_INSTR}) {
895 e_recordForwardingInfo;
899 transition(SM_D_O, Other_GETX, IM_D_OI) {
900 e_recordForwardingInfo;
904 transition(SM_D, Data, M) {
906 w_writeDataFromTBEToCache;
912 transition(SM_D_O, Data, O) {
914 w_writeDataFromTBEToCache;