2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
32 * Authors: Milo Martin
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
57 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
62 M, desc="Modified (dirty)";
63 MM, desc="Modified (dirty and locally modified)";
66 IM, "IM", desc="Issued GetX";
67 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
68 OM, "OM", desc="Issued GetX, received data";
69 ISM, "ISM", desc="Issued GetX, received data, waiting for all acks";
70 M_W, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, "IS", desc="Issued GetS";
73 SS, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, "OI", desc="Issued PutO, waiting for ack";
75 MI, "MI", desc="Issued PutX, waiting for ack";
76 II, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, "IT", desc="Invalid block transferring to L1";
78 ST, "ST", desc="S block transferring to L1";
79 OT, "OT", desc="O block transferring to L1";
80 MT, "MT", desc="M block transferring to L1";
81 MMT, "MMT", desc="MM block transferring to L1";
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 NC_DMA_GETS, desc="special GetS when only DMA exists";
101 Invalidate, desc="Invalidate block";
104 Ack, desc="Received an ack message";
105 Shared_Ack, desc="Received an ack message, responder has a shared copy";
106 Data, desc="Received a data message";
107 Shared_Data, desc="Received a data message, responder has a shared copy";
108 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
110 Writeback_Ack, desc="Writeback O.K. from directory";
111 Writeback_Nack, desc="Writeback not O.K. from directory";
114 All_acks, desc="Received all required data and message acks";
115 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
120 // STRUCTURE DEFINITIONS
122 MessageBuffer mandatoryQueue, ordered="false";
125 structure(Entry, desc="...", interface="AbstractCacheEntry") {
126 State CacheState, desc="cache state";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 DataBlock DataBlk, desc="data for the block";
129 bool FromL2, default="false", desc="block just moved from L2";
130 bool AtomicAccessed, default="false", desc="block just moved from L2";
134 structure(TBE, desc="...") {
135 State TBEState, desc="Transient state";
136 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
137 bool Dirty, desc="Is the data dirty (different than memory)?";
138 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
139 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
140 MachineID LastResponder, desc="last machine to send a response for this request";
141 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
142 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
143 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
144 Time FirstResponseTime, default="0", desc="the time the first response was received";
147 external_type(TBETable) {
149 void allocate(Address);
150 void deallocate(Address);
151 bool isPresent(Address);
154 TBETable TBEs, template_hack="<L1Cache_TBE>";
156 Entry getCacheEntry(Address addr), return_by_ref="yes" {
157 if (L2cacheMemory.isTagPresent(addr)) {
158 return static_cast(Entry, L2cacheMemory[addr]);
159 } else if (L1DcacheMemory.isTagPresent(addr)) {
160 return static_cast(Entry, L1DcacheMemory[addr]);
162 return static_cast(Entry, L1IcacheMemory[addr]);
166 void changePermission(Address addr, AccessPermission permission) {
167 if (L2cacheMemory.isTagPresent(addr)) {
168 return L2cacheMemory.changePermission(addr, permission);
169 } else if (L1DcacheMemory.isTagPresent(addr)) {
170 return L1DcacheMemory.changePermission(addr, permission);
172 return L1IcacheMemory.changePermission(addr, permission);
176 bool isCacheTagPresent(Address addr) {
177 return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
180 State getState(Address addr) {
181 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
182 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
183 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
185 if(TBEs.isPresent(addr)) {
186 return TBEs[addr].TBEState;
187 } else if (isCacheTagPresent(addr)) {
188 return getCacheEntry(addr).CacheState;
193 void setState(Address addr, State state) {
194 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
195 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
196 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
198 if (TBEs.isPresent(addr)) {
199 TBEs[addr].TBEState := state;
202 if (isCacheTagPresent(addr)) {
203 getCacheEntry(addr).CacheState := state;
206 if ((state == State:MM) ||
207 (state == State:MM_W)) {
208 changePermission(addr, AccessPermission:Read_Write);
209 } else if (state == State:S ||
212 state == State:M_W ||
214 state == State:ISM ||
217 changePermission(addr, AccessPermission:Read_Only);
219 changePermission(addr, AccessPermission:Invalid);
224 Event mandatory_request_type_to_event(CacheRequestType type) {
225 if (type == CacheRequestType:LD) {
227 } else if (type == CacheRequestType:IFETCH) {
229 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
232 error("Invalid CacheRequestType");
236 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
237 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
239 // NOTE direct local hits should not call this
241 return GenericMachineType:L1Cache_wCC;
243 return ConvertMachToGenericMach(machineIDToMachineType(sender));
247 GenericMachineType testAndClearLocalHit(Address addr) {
248 if (getCacheEntry(addr).FromL2) {
249 getCacheEntry(addr).FromL2 := false;
250 return GenericMachineType:L2Cache;
252 return GenericMachineType:L1Cache;
256 MessageBuffer triggerQueue, ordered="true";
260 out_port(requestNetwork_out, RequestMsg, requestFromCache);
261 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
262 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
263 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
268 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
269 if (triggerQueue_in.isReady()) {
270 peek(triggerQueue_in, TriggerMsg) {
271 if (in_msg.Type == TriggerType:L2_to_L1) {
272 trigger(Event:Complete_L2_to_L1, in_msg.Address);
273 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
274 trigger(Event:All_acks, in_msg.Address);
275 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
276 trigger(Event:All_acks_no_sharers, in_msg.Address);
278 error("Unexpected message");
284 // Nothing from the request network
287 in_port(forwardToCache_in, RequestMsg, forwardToCache) {
288 if (forwardToCache_in.isReady()) {
289 peek(forwardToCache_in, RequestMsg, block_on="Address") {
290 if (in_msg.Type == CoherenceRequestType:GETX) {
291 trigger(Event:Other_GETX, in_msg.Address);
292 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
293 trigger(Event:Merged_GETS, in_msg.Address);
294 } else if (in_msg.Type == CoherenceRequestType:GETS) {
295 if (machineCount(MachineType:L1Cache) > 1) {
296 if (isCacheTagPresent(in_msg.Address)) {
297 if (getCacheEntry(in_msg.Address).AtomicAccessed && no_mig_atomic) {
298 trigger(Event:Other_GETS_No_Mig, in_msg.Address);
300 trigger(Event:Other_GETS, in_msg.Address);
303 trigger(Event:Other_GETS, in_msg.Address);
306 trigger(Event:NC_DMA_GETS, in_msg.Address);
308 } else if (in_msg.Type == CoherenceRequestType:INV) {
309 trigger(Event:Invalidate, in_msg.Address);
310 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
311 trigger(Event:Writeback_Ack, in_msg.Address);
312 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
313 trigger(Event:Writeback_Nack, in_msg.Address);
315 error("Unexpected message");
322 in_port(responseToCache_in, ResponseMsg, responseToCache) {
323 if (responseToCache_in.isReady()) {
324 peek(responseToCache_in, ResponseMsg, block_on="Address") {
325 if (in_msg.Type == CoherenceResponseType:ACK) {
326 trigger(Event:Ack, in_msg.Address);
327 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
328 trigger(Event:Shared_Ack, in_msg.Address);
329 } else if (in_msg.Type == CoherenceResponseType:DATA) {
330 trigger(Event:Data, in_msg.Address);
331 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
332 trigger(Event:Shared_Data, in_msg.Address);
333 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
334 trigger(Event:Exclusive_Data, in_msg.Address);
336 error("Unexpected message");
342 // Nothing from the unblock network
345 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
346 if (mandatoryQueue_in.isReady()) {
347 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
349 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
351 if (in_msg.Type == CacheRequestType:IFETCH) {
352 // ** INSTRUCTION ACCESS ***
354 // Check to see if it is in the OTHER L1
355 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
356 // The block is in the wrong L1, try to write it to the L2
357 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
358 trigger(Event:L1_to_L2, in_msg.LineAddress);
360 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
364 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
365 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
366 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
368 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
369 // L1 does't have the line, but we have space for it in the L1
370 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
371 // L2 has it (maybe not with the right permissions)
372 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress);
374 // We have room, the L2 doesn't have it, so the L1 fetches the line
375 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
378 // No room in the L1, so we need to make room
379 if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) {
380 // The L2 has room, so we move the line from the L1 to the L2
381 trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
383 // The L2 does not have room, so we replace a line from the L2
384 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)));
389 // *** DATA ACCESS ***
391 // Check to see if it is in the OTHER L1
392 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
393 // The block is in the wrong L1, try to write it to the L2
394 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
395 trigger(Event:L1_to_L2, in_msg.LineAddress);
397 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
401 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
402 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
403 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
405 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
406 // L1 does't have the line, but we have space for it in the L1
407 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
408 // L2 has it (maybe not with the right permissions)
409 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress);
411 // We have room, the L2 doesn't have it, so the L1 fetches the line
412 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
415 // No room in the L1, so we need to make room
416 if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) {
417 // The L2 has room, so we move the line from the L1 to the L2
418 trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
420 // The L2 does not have room, so we replace a line from the L2
421 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)));
432 action(a_issueGETS, "a", desc="Issue GETS") {
433 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
434 out_msg.Address := address;
435 out_msg.Type := CoherenceRequestType:GETS;
436 out_msg.Requestor := machineID;
437 out_msg.Destination.add(map_Address_to_Directory(address));
438 out_msg.MessageSize := MessageSizeType:Request_Control;
439 out_msg.InitialRequestTime := get_time();
440 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
444 action(b_issueGETX, "b", desc="Issue GETX") {
445 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
446 out_msg.Address := address;
447 out_msg.Type := CoherenceRequestType:GETX;
448 out_msg.Requestor := machineID;
449 out_msg.Destination.add(map_Address_to_Directory(address));
450 out_msg.MessageSize := MessageSizeType:Request_Control;
451 out_msg.InitialRequestTime := get_time();
452 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
456 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
457 peek(forwardToCache_in, RequestMsg) {
458 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
459 out_msg.Address := address;
460 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
461 out_msg.Sender := machineID;
462 out_msg.Destination.add(in_msg.Requestor);
463 out_msg.DataBlk := getCacheEntry(address).DataBlk;
464 out_msg.Dirty := getCacheEntry(address).Dirty;
465 if (in_msg.DirectedProbe) {
466 out_msg.Acks := machineCount(MachineType:L1Cache);
470 out_msg.MessageSize := MessageSizeType:Response_Data;
471 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
472 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
477 action(d_issuePUT, "d", desc="Issue PUT") {
478 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
479 out_msg.Address := address;
480 out_msg.Type := CoherenceRequestType:PUT;
481 out_msg.Requestor := machineID;
482 out_msg.Destination.add(map_Address_to_Directory(address));
483 out_msg.MessageSize := MessageSizeType:Writeback_Control;
487 action(e_sendData, "e", desc="Send data from cache to requestor") {
488 peek(forwardToCache_in, RequestMsg) {
489 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
490 out_msg.Address := address;
491 out_msg.Type := CoherenceResponseType:DATA;
492 out_msg.Sender := machineID;
493 out_msg.Destination.add(in_msg.Requestor);
494 out_msg.DataBlk := getCacheEntry(address).DataBlk;
495 out_msg.Dirty := getCacheEntry(address).Dirty;
496 if (in_msg.DirectedProbe) {
497 out_msg.Acks := machineCount(MachineType:L1Cache);
501 out_msg.MessageSize := MessageSizeType:Response_Data;
502 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
503 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
508 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
509 peek(forwardToCache_in, RequestMsg) {
510 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
511 out_msg.Address := address;
512 out_msg.Type := CoherenceResponseType:DATA_SHARED;
513 out_msg.Sender := machineID;
514 out_msg.Destination.add(in_msg.Requestor);
515 out_msg.DataBlk := getCacheEntry(address).DataBlk;
516 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
517 out_msg.Dirty := getCacheEntry(address).Dirty;
518 if (in_msg.DirectedProbe) {
519 out_msg.Acks := machineCount(MachineType:L1Cache);
523 out_msg.MessageSize := MessageSizeType:Response_Data;
524 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
525 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
530 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
531 peek(forwardToCache_in, RequestMsg) {
532 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
533 out_msg.Address := address;
534 out_msg.Type := CoherenceResponseType:DATA_SHARED;
535 out_msg.Sender := machineID;
536 out_msg.Destination := in_msg.MergedRequestors;
537 out_msg.DataBlk := getCacheEntry(address).DataBlk;
538 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
539 out_msg.Dirty := getCacheEntry(address).Dirty;
540 out_msg.Acks := machineCount(MachineType:L1Cache);
541 out_msg.MessageSize := MessageSizeType:Response_Data;
542 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
543 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
548 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
549 peek(forwardToCache_in, RequestMsg) {
550 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
551 out_msg.Address := address;
552 out_msg.Type := CoherenceResponseType:ACK;
553 out_msg.Sender := machineID;
554 out_msg.Destination.add(in_msg.Requestor);
556 assert(in_msg.DirectedProbe == false);
557 out_msg.MessageSize := MessageSizeType:Response_Control;
558 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
559 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
564 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
565 peek(forwardToCache_in, RequestMsg) {
566 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
567 out_msg.Address := address;
568 out_msg.Type := CoherenceResponseType:ACK_SHARED;
569 out_msg.Sender := machineID;
570 out_msg.Destination.add(in_msg.Requestor);
572 assert(in_msg.DirectedProbe == false);
573 out_msg.MessageSize := MessageSizeType:Response_Control;
574 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
575 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
580 action(g_sendUnblock, "g", desc="Send unblock to memory") {
581 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
582 out_msg.Address := address;
583 out_msg.Type := CoherenceResponseType:UNBLOCK;
584 out_msg.Sender := machineID;
585 out_msg.Destination.add(map_Address_to_Directory(address));
586 out_msg.MessageSize := MessageSizeType:Unblock_Control;
590 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
591 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
592 out_msg.Address := address;
593 out_msg.Type := CoherenceResponseType:UNBLOCKM;
594 out_msg.Sender := machineID;
595 out_msg.Destination.add(map_Address_to_Directory(address));
596 out_msg.MessageSize := MessageSizeType:Unblock_Control;
600 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
601 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
602 out_msg.Address := address;
603 out_msg.Type := CoherenceResponseType:UNBLOCKS;
604 out_msg.Sender := machineID;
605 out_msg.CurOwner := TBEs[address].CurOwner;
606 out_msg.Destination.add(map_Address_to_Directory(address));
607 out_msg.MessageSize := MessageSizeType:Unblock_Control;
611 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
612 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
614 sequencer.readCallback(address,
615 testAndClearLocalHit(address),
616 getCacheEntry(address).DataBlk);
620 action(hx_external_load_hit, "hx", desc="load required external msgs") {
621 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
622 peek(responseToCache_in, ResponseMsg) {
624 sequencer.readCallback(address,
625 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
626 getCacheEntry(address).DataBlk,
627 TBEs[address].InitialRequestTime,
628 TBEs[address].ForwardRequestTime,
629 TBEs[address].FirstResponseTime);
634 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
635 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
636 peek(mandatoryQueue_in, CacheMsg) {
637 sequencer.writeCallback(address,
638 testAndClearLocalHit(address),
639 getCacheEntry(address).DataBlk);
641 getCacheEntry(address).Dirty := true;
642 if (in_msg.Type == CacheRequestType:ATOMIC) {
643 getCacheEntry(address).AtomicAccessed := true;
648 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
649 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
650 peek(responseToCache_in, ResponseMsg) {
652 sequencer.writeCallback(address,
653 getNondirectHitMachType(address, in_msg.Sender),
654 getCacheEntry(address).DataBlk,
655 TBEs[address].InitialRequestTime,
656 TBEs[address].ForwardRequestTime,
657 TBEs[address].FirstResponseTime);
660 getCacheEntry(address).Dirty := true;
663 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
664 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
666 sequencer.writeCallback(address,
667 getNondirectHitMachType(address,
668 TBEs[address].LastResponder),
669 getCacheEntry(address).DataBlk,
670 TBEs[address].InitialRequestTime,
671 TBEs[address].ForwardRequestTime,
672 TBEs[address].FirstResponseTime);
674 getCacheEntry(address).Dirty := true;
677 action(i_allocateTBE, "i", desc="Allocate TBE") {
678 check_allocate(TBEs);
679 TBEs.allocate(address);
680 TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
681 TBEs[address].Dirty := getCacheEntry(address).Dirty;
682 TBEs[address].Sharers := false;
685 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
686 triggerQueue_in.dequeue();
689 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
690 mandatoryQueue_in.dequeue();
693 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
694 forwardToCache_in.dequeue();
697 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
698 peek(responseToCache_in, ResponseMsg) {
699 assert(in_msg.Acks > 0);
700 DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
701 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
702 DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
703 TBEs[address].LastResponder := in_msg.Sender;
704 if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
705 assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime);
707 if (in_msg.InitialRequestTime != zero_time()) {
708 TBEs[address].InitialRequestTime := in_msg.InitialRequestTime;
710 if (TBEs[address].ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
711 assert(TBEs[address].ForwardRequestTime == in_msg.ForwardRequestTime);
713 if (in_msg.ForwardRequestTime != zero_time()) {
714 TBEs[address].ForwardRequestTime := in_msg.ForwardRequestTime;
716 if (TBEs[address].FirstResponseTime == zero_time()) {
717 TBEs[address].FirstResponseTime := get_time();
721 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
722 peek(responseToCache_in, ResponseMsg) {
723 TBEs[address].CurOwner := in_msg.Sender;
727 action(n_popResponseQueue, "n", desc="Pop response queue") {
728 responseToCache_in.dequeue();
731 action(ll_L2toL1Transfer, "ll", desc="") {
732 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
733 out_msg.Address := address;
734 out_msg.Type := TriggerType:L2_to_L1;
738 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
739 if (TBEs[address].NumPendingMsgs == 0) {
740 enqueue(triggerQueue_out, TriggerMsg) {
741 out_msg.Address := address;
742 if (TBEs[address].Sharers) {
743 out_msg.Type := TriggerType:ALL_ACKS;
745 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
751 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
752 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
755 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
756 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs + 1;
759 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
760 peek(forwardToCache_in, RequestMsg) {
761 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
762 out_msg.Address := address;
763 out_msg.Type := CoherenceResponseType:DATA;
764 out_msg.Sender := machineID;
765 out_msg.Destination.add(in_msg.Requestor);
766 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
767 out_msg.DataBlk := TBEs[address].DataBlk;
768 out_msg.Dirty := TBEs[address].Dirty;
769 if (in_msg.DirectedProbe) {
770 out_msg.Acks := machineCount(MachineType:L1Cache);
774 out_msg.MessageSize := MessageSizeType:Response_Data;
775 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
776 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
781 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
782 peek(forwardToCache_in, RequestMsg) {
783 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
784 out_msg.Address := address;
785 out_msg.Type := CoherenceResponseType:DATA;
786 out_msg.Sender := machineID;
787 out_msg.Destination := in_msg.MergedRequestors;
788 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
789 out_msg.DataBlk := TBEs[address].DataBlk;
790 out_msg.Dirty := TBEs[address].Dirty;
791 out_msg.Acks := machineCount(MachineType:L1Cache);
792 out_msg.MessageSize := MessageSizeType:Response_Data;
793 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
794 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
799 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
800 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
801 out_msg.Address := address;
802 out_msg.Sender := machineID;
803 out_msg.Destination.add(map_Address_to_Directory(address));
804 out_msg.Dirty := TBEs[address].Dirty;
805 if (TBEs[address].Dirty) {
806 out_msg.Type := CoherenceResponseType:WB_DIRTY;
807 out_msg.DataBlk := TBEs[address].DataBlk;
808 out_msg.MessageSize := MessageSizeType:Writeback_Data;
810 out_msg.Type := CoherenceResponseType:WB_CLEAN;
811 // NOTE: in a real system this would not send data. We send
812 // data here only so we can check it at the memory
813 out_msg.DataBlk := TBEs[address].DataBlk;
814 out_msg.MessageSize := MessageSizeType:Writeback_Control;
819 action(r_setSharerBit, "r", desc="We saw other sharers") {
820 TBEs[address].Sharers := true;
823 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
824 TBEs.deallocate(address);
827 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
828 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
829 out_msg.Address := address;
830 out_msg.Sender := machineID;
831 out_msg.Destination.add(map_Address_to_Directory(address));
832 out_msg.DataBlk := TBEs[address].DataBlk;
833 out_msg.Dirty := TBEs[address].Dirty;
834 if (TBEs[address].Dirty) {
835 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
836 out_msg.DataBlk := TBEs[address].DataBlk;
837 out_msg.MessageSize := MessageSizeType:Writeback_Data;
839 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
840 // NOTE: in a real system this would not send data. We send
841 // data here only so we can check it at the memory
842 out_msg.DataBlk := TBEs[address].DataBlk;
843 out_msg.MessageSize := MessageSizeType:Writeback_Control;
848 action(u_writeDataToCache, "u", desc="Write data to cache") {
849 peek(responseToCache_in, ResponseMsg) {
850 getCacheEntry(address).DataBlk := in_msg.DataBlk;
851 getCacheEntry(address).Dirty := in_msg.Dirty;
855 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
856 peek(responseToCache_in, ResponseMsg) {
857 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
858 getCacheEntry(address).DataBlk, in_msg.DataBlk);
859 assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
860 getCacheEntry(address).DataBlk := in_msg.DataBlk;
861 getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty;
865 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
866 if (L1DcacheMemory.isTagPresent(address)) {
867 L1DcacheMemory.deallocate(address);
869 L1IcacheMemory.deallocate(address);
873 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
874 if (L1DcacheMemory.isTagPresent(address) == false) {
875 L1DcacheMemory.allocate(address, new Entry);
879 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
880 if (L1IcacheMemory.isTagPresent(address) == false) {
881 L1IcacheMemory.allocate(address, new Entry);
885 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
886 L2cacheMemory.allocate(address, new Entry);
889 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
890 L2cacheMemory.deallocate(address);
893 action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
894 if (L1DcacheMemory.isTagPresent(address)) {
895 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty;
896 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk;
898 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty;
899 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk;
903 action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
904 if (L1DcacheMemory.isTagPresent(address)) {
905 static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
906 static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
907 static_cast(Entry, L1DcacheMemory[address]).FromL2 := true;
909 static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
910 static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
911 static_cast(Entry, L1IcacheMemory[address]).FromL2 := true;
915 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
916 peek(mandatoryQueue_in, CacheMsg) {
917 if (L1IcacheMemory.isTagPresent(address)) {
918 L1IcacheMemory.profileMiss(in_msg);
919 } else if (L1DcacheMemory.isTagPresent(address)) {
920 L1DcacheMemory.profileMiss(in_msg);
922 if (L2cacheMemory.isTagPresent(address) == false) {
923 L2cacheMemory.profileMiss(in_msg);
928 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
929 mandatoryQueue_in.recycle();
932 //*****************************************************
934 //*****************************************************
936 // Transitions for Load/Store/L2_Replacement from transient states
937 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
938 zz_recycleMandatoryQueue;
941 transition({M_W, MM_W}, {L2_Replacement}) {
942 zz_recycleMandatoryQueue;
945 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
946 zz_recycleMandatoryQueue;
949 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
950 zz_recycleMandatoryQueue;
953 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
957 // Transitions moving data between the L1 and L2 caches
958 transition({I, S, O, M, MM}, L1_to_L2) {
959 vv_allocateL2CacheBlock;
960 ss_copyFromL1toL2; // Not really needed for state I
961 gg_deallocateL1CacheBlock;
964 transition(I, Trigger_L2_to_L1D, IT) {
965 ii_allocateL1DCacheBlock;
966 tt_copyFromL2toL1; // Not really needed for state I
968 rr_deallocateL2CacheBlock;
969 zz_recycleMandatoryQueue;
973 transition(S, Trigger_L2_to_L1D, ST) {
974 ii_allocateL1DCacheBlock;
977 rr_deallocateL2CacheBlock;
978 zz_recycleMandatoryQueue;
982 transition(O, Trigger_L2_to_L1D, OT) {
983 ii_allocateL1DCacheBlock;
986 rr_deallocateL2CacheBlock;
987 zz_recycleMandatoryQueue;
991 transition(M, Trigger_L2_to_L1D, MT) {
992 ii_allocateL1DCacheBlock;
995 rr_deallocateL2CacheBlock;
996 zz_recycleMandatoryQueue;
1000 transition(MM, Trigger_L2_to_L1D, MMT) {
1001 ii_allocateL1DCacheBlock;
1004 rr_deallocateL2CacheBlock;
1005 zz_recycleMandatoryQueue;
1009 transition(I, Trigger_L2_to_L1I, IT) {
1010 jj_allocateL1ICacheBlock;
1011 tt_copyFromL2toL1; // Not really needed for state I
1013 rr_deallocateL2CacheBlock;
1014 zz_recycleMandatoryQueue;
1018 transition(S, Trigger_L2_to_L1I, ST) {
1019 jj_allocateL1ICacheBlock;
1022 rr_deallocateL2CacheBlock;
1023 zz_recycleMandatoryQueue;
1027 transition(O, Trigger_L2_to_L1I, OT) {
1028 jj_allocateL1ICacheBlock;
1031 rr_deallocateL2CacheBlock;
1032 zz_recycleMandatoryQueue;
1036 transition(M, Trigger_L2_to_L1I, MT) {
1037 jj_allocateL1ICacheBlock;
1040 rr_deallocateL2CacheBlock;
1041 zz_recycleMandatoryQueue;
1045 transition(MM, Trigger_L2_to_L1I, MMT) {
1046 jj_allocateL1ICacheBlock;
1049 rr_deallocateL2CacheBlock;
1050 zz_recycleMandatoryQueue;
1054 transition(IT, Complete_L2_to_L1, I) {
1058 transition(ST, Complete_L2_to_L1, S) {
1062 transition(OT, Complete_L2_to_L1, O) {
1066 transition(MT, Complete_L2_to_L1, M) {
1070 transition(MMT, Complete_L2_to_L1, MM) {
1074 // Transitions from Idle
1075 transition(I, Load, IS) {
1076 ii_allocateL1DCacheBlock;
1080 k_popMandatoryQueue;
1083 transition(I, Ifetch, IS) {
1084 jj_allocateL1ICacheBlock;
1088 k_popMandatoryQueue;
1091 transition(I, Store, IM) {
1092 ii_allocateL1DCacheBlock;
1096 k_popMandatoryQueue;
1099 transition(I, L2_Replacement) {
1100 rr_deallocateL2CacheBlock;
1103 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1108 // Transitions from Shared
1109 transition({S, SM, ISM}, {Load, Ifetch}) {
1111 k_popMandatoryQueue;
1114 transition(S, Store, SM) {
1118 k_popMandatoryQueue;
1121 transition(S, L2_Replacement, I) {
1122 rr_deallocateL2CacheBlock;
1125 transition(S, {Other_GETX, Invalidate}, I) {
1130 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1135 // Transitions from Owned
1136 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1138 k_popMandatoryQueue;
1141 transition(O, Store, OM) {
1144 p_decrementNumberOfMessagesByOne;
1146 k_popMandatoryQueue;
1149 transition(O, L2_Replacement, OI) {
1152 rr_deallocateL2CacheBlock;
1155 transition(O, {Other_GETX, Invalidate}, I) {
1160 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1165 transition(O, Merged_GETS) {
1166 em_sendDataSharedMultiple;
1170 // Transitions from Modified
1171 transition(MM, {Load, Ifetch}) {
1173 k_popMandatoryQueue;
1176 transition(MM, Store) {
1178 k_popMandatoryQueue;
1181 transition(MM, L2_Replacement, MI) {
1184 rr_deallocateL2CacheBlock;
1187 transition(MM, {Other_GETX, Invalidate}, I) {
1188 c_sendExclusiveData;
1192 transition(MM, Other_GETS, I) {
1193 c_sendExclusiveData;
1197 transition(MM, NC_DMA_GETS) {
1198 c_sendExclusiveData;
1202 transition(MM, Other_GETS_No_Mig, O) {
1207 transition(MM, Merged_GETS, O) {
1208 em_sendDataSharedMultiple;
1212 // Transitions from Dirty Exclusive
1213 transition(M, {Load, Ifetch}) {
1215 k_popMandatoryQueue;
1218 transition(M, Store, MM) {
1220 k_popMandatoryQueue;
1223 transition(M, L2_Replacement, MI) {
1226 rr_deallocateL2CacheBlock;
1229 transition(M, {Other_GETX, Invalidate}, I) {
1230 c_sendExclusiveData;
1234 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1239 transition(M, NC_DMA_GETS) {
1244 transition(M, Merged_GETS, O) {
1245 em_sendDataSharedMultiple;
1249 // Transitions from IM
1251 transition(IM, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1256 transition(IM, Ack) {
1257 m_decrementNumberOfMessages;
1258 o_checkForCompletion;
1262 transition(IM, Data, ISM) {
1264 m_decrementNumberOfMessages;
1265 o_checkForCompletion;
1269 transition(IM, Exclusive_Data, MM_W) {
1271 m_decrementNumberOfMessages;
1272 o_checkForCompletion;
1273 sx_external_store_hit;
1277 // Transitions from SM
1278 transition(SM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1283 transition(SM, {Other_GETX, Invalidate}, IM) {
1288 transition(SM, Ack) {
1289 m_decrementNumberOfMessages;
1290 o_checkForCompletion;
1294 transition(SM, Data, ISM) {
1295 v_writeDataToCacheVerify;
1296 m_decrementNumberOfMessages;
1297 o_checkForCompletion;
1301 // Transitions from ISM
1302 transition(ISM, Ack) {
1303 m_decrementNumberOfMessages;
1304 o_checkForCompletion;
1308 transition(ISM, All_acks_no_sharers, MM) {
1309 sxt_trig_ext_store_hit;
1315 // Transitions from OM
1317 transition(OM, {Other_GETX, Invalidate}, IM) {
1319 pp_incrementNumberOfMessagesByOne;
1323 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1328 transition(OM, Merged_GETS) {
1329 em_sendDataSharedMultiple;
1333 transition(OM, Ack) {
1334 m_decrementNumberOfMessages;
1335 o_checkForCompletion;
1339 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1340 sxt_trig_ext_store_hit;
1346 // Transitions from IS
1348 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1353 transition(IS, Ack) {
1354 m_decrementNumberOfMessages;
1355 o_checkForCompletion;
1359 transition(IS, Shared_Ack) {
1360 m_decrementNumberOfMessages;
1362 o_checkForCompletion;
1366 transition(IS, Data, SS) {
1368 m_decrementNumberOfMessages;
1369 o_checkForCompletion;
1370 hx_external_load_hit;
1371 uo_updateCurrentOwner;
1375 transition(IS, Exclusive_Data, M_W) {
1377 m_decrementNumberOfMessages;
1378 o_checkForCompletion;
1379 hx_external_load_hit;
1383 transition(IS, Shared_Data, SS) {
1386 m_decrementNumberOfMessages;
1387 o_checkForCompletion;
1388 hx_external_load_hit;
1389 uo_updateCurrentOwner;
1393 // Transitions from SS
1395 transition(SS, Ack) {
1396 m_decrementNumberOfMessages;
1397 o_checkForCompletion;
1401 transition(SS, Shared_Ack) {
1402 m_decrementNumberOfMessages;
1404 o_checkForCompletion;
1408 transition(SS, All_acks, S) {
1414 transition(SS, All_acks_no_sharers, S) {
1415 // Note: The directory might still be the owner, so that is why we go to S
1421 // Transitions from MM_W
1423 transition(MM_W, Store) {
1425 k_popMandatoryQueue;
1428 transition(MM_W, Ack) {
1429 m_decrementNumberOfMessages;
1430 o_checkForCompletion;
1434 transition(MM_W, All_acks_no_sharers, MM) {
1440 // Transitions from M_W
1442 transition(M_W, Store, MM_W) {
1444 k_popMandatoryQueue;
1447 transition(M_W, Ack) {
1448 m_decrementNumberOfMessages;
1449 o_checkForCompletion;
1453 transition(M_W, All_acks_no_sharers, M) {
1459 // Transitions from OI/MI
1461 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1462 q_sendDataFromTBEToCache;
1466 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1467 q_sendDataFromTBEToCache;
1471 transition({OI, MI}, Merged_GETS, OI) {
1472 qm_sendDataFromTBEToCache;
1476 transition(MI, Writeback_Ack, I) {
1477 t_sendExclusiveDataFromTBEToMemory;
1482 transition(OI, Writeback_Ack, I) {
1483 qq_sendDataFromTBEToMemory;
1488 // Transitions from II
1489 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1494 transition(II, Writeback_Ack, I) {
1500 transition(II, Writeback_Nack, I) {