2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
32 * Authors: Milo Martin
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
57 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
62 M, desc="Modified (dirty)";
63 MM, desc="Modified (dirty and locally modified)";
66 IM, "IM", desc="Issued GetX";
67 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
68 OM, "OM", desc="Issued GetX, received data";
69 ISM, "ISM", desc="Issued GetX, received data, waiting for all acks";
70 M_W, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, "IS", desc="Issued GetS";
73 SS, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, "OI", desc="Issued PutO, waiting for ack";
75 MI, "MI", desc="Issued PutX, waiting for ack";
76 II, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, "IT", desc="Invalid block transferring to L1";
78 ST, "ST", desc="S block transferring to L1";
79 OT, "OT", desc="O block transferring to L1";
80 MT, "MT", desc="M block transferring to L1";
81 MMT, "MMT", desc="MM block transferring to L1";
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 Invalidate, desc="Invalidate block";
103 Ack, desc="Received an ack message";
104 Shared_Ack, desc="Received an ack message, responder has a shared copy";
105 Data, desc="Received a data message";
106 Shared_Data, desc="Received a data message, responder has a shared copy";
107 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
109 Writeback_Ack, desc="Writeback O.K. from directory";
110 Writeback_Nack, desc="Writeback not O.K. from directory";
113 All_acks, desc="Received all required data and message acks";
114 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
119 // STRUCTURE DEFINITIONS
121 MessageBuffer mandatoryQueue, ordered="false";
124 structure(Entry, desc="...", interface="AbstractCacheEntry") {
125 State CacheState, desc="cache state";
126 bool Dirty, desc="Is the data dirty (different than memory)?";
127 DataBlock DataBlk, desc="data for the block";
128 bool FromL2, default="false", desc="block just moved from L2";
129 bool AtomicAccessed, default="false", desc="block just moved from L2";
133 structure(TBE, desc="...") {
134 State TBEState, desc="Transient state";
135 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
136 bool Dirty, desc="Is the data dirty (different than memory)?";
137 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
138 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
139 MachineID LastResponder, desc="last machine to send a response for this request";
140 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
141 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
142 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
143 Time FirstResponseTime, default="0", desc="the time the first response was received";
146 external_type(TBETable) {
148 void allocate(Address);
149 void deallocate(Address);
150 bool isPresent(Address);
153 TBETable TBEs, template_hack="<L1Cache_TBE>";
155 Entry getCacheEntry(Address addr), return_by_ref="yes" {
156 if (L2cacheMemory.isTagPresent(addr)) {
157 return static_cast(Entry, L2cacheMemory[addr]);
158 } else if (L1DcacheMemory.isTagPresent(addr)) {
159 return static_cast(Entry, L1DcacheMemory[addr]);
161 return static_cast(Entry, L1IcacheMemory[addr]);
165 void changePermission(Address addr, AccessPermission permission) {
166 if (L2cacheMemory.isTagPresent(addr)) {
167 return L2cacheMemory.changePermission(addr, permission);
168 } else if (L1DcacheMemory.isTagPresent(addr)) {
169 return L1DcacheMemory.changePermission(addr, permission);
171 return L1IcacheMemory.changePermission(addr, permission);
175 bool isCacheTagPresent(Address addr) {
176 return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
179 State getState(Address addr) {
180 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
181 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
182 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
184 if(TBEs.isPresent(addr)) {
185 return TBEs[addr].TBEState;
186 } else if (isCacheTagPresent(addr)) {
187 return getCacheEntry(addr).CacheState;
192 void setState(Address addr, State state) {
193 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
194 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
195 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
197 if (TBEs.isPresent(addr)) {
198 TBEs[addr].TBEState := state;
201 if (isCacheTagPresent(addr)) {
202 getCacheEntry(addr).CacheState := state;
205 if ((state == State:MM) ||
206 (state == State:MM_W)) {
207 changePermission(addr, AccessPermission:Read_Write);
208 } else if (state == State:S ||
211 state == State:M_W ||
213 state == State:ISM ||
216 changePermission(addr, AccessPermission:Read_Only);
218 changePermission(addr, AccessPermission:Invalid);
223 Event mandatory_request_type_to_event(CacheRequestType type) {
224 if (type == CacheRequestType:LD) {
226 } else if (type == CacheRequestType:IFETCH) {
228 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
231 error("Invalid CacheRequestType");
235 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
236 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
238 // NOTE direct local hits should not call this
240 return GenericMachineType:L1Cache_wCC;
242 return ConvertMachToGenericMach(machineIDToMachineType(sender));
246 GenericMachineType testAndClearLocalHit(Address addr) {
247 if (getCacheEntry(addr).FromL2) {
248 getCacheEntry(addr).FromL2 := false;
249 return GenericMachineType:L2Cache;
251 return GenericMachineType:L1Cache;
255 MessageBuffer triggerQueue, ordered="true";
259 out_port(requestNetwork_out, RequestMsg, requestFromCache);
260 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
261 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
262 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
267 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
268 if (triggerQueue_in.isReady()) {
269 peek(triggerQueue_in, TriggerMsg) {
270 if (in_msg.Type == TriggerType:L2_to_L1) {
271 trigger(Event:Complete_L2_to_L1, in_msg.Address);
272 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
273 trigger(Event:All_acks, in_msg.Address);
274 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
275 trigger(Event:All_acks_no_sharers, in_msg.Address);
277 error("Unexpected message");
283 // Nothing from the request network
286 in_port(forwardToCache_in, RequestMsg, forwardToCache) {
287 if (forwardToCache_in.isReady()) {
288 peek(forwardToCache_in, RequestMsg, block_on="Address") {
289 if (in_msg.Type == CoherenceRequestType:GETX) {
290 trigger(Event:Other_GETX, in_msg.Address);
291 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
292 trigger(Event:Merged_GETS, in_msg.Address);
293 } else if (in_msg.Type == CoherenceRequestType:GETS) {
294 if (isCacheTagPresent(in_msg.Address)) {
295 if (getCacheEntry(in_msg.Address).AtomicAccessed && no_mig_atomic) {
296 trigger(Event:Other_GETS_No_Mig, in_msg.Address);
298 trigger(Event:Other_GETS, in_msg.Address);
301 trigger(Event:Other_GETS, in_msg.Address);
303 } else if (in_msg.Type == CoherenceRequestType:INV) {
304 trigger(Event:Invalidate, in_msg.Address);
305 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
306 trigger(Event:Writeback_Ack, in_msg.Address);
307 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
308 trigger(Event:Writeback_Nack, in_msg.Address);
310 error("Unexpected message");
317 in_port(responseToCache_in, ResponseMsg, responseToCache) {
318 if (responseToCache_in.isReady()) {
319 peek(responseToCache_in, ResponseMsg, block_on="Address") {
320 if (in_msg.Type == CoherenceResponseType:ACK) {
321 trigger(Event:Ack, in_msg.Address);
322 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
323 trigger(Event:Shared_Ack, in_msg.Address);
324 } else if (in_msg.Type == CoherenceResponseType:DATA) {
325 trigger(Event:Data, in_msg.Address);
326 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
327 trigger(Event:Shared_Data, in_msg.Address);
328 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
329 trigger(Event:Exclusive_Data, in_msg.Address);
331 error("Unexpected message");
337 // Nothing from the unblock network
340 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
341 if (mandatoryQueue_in.isReady()) {
342 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
344 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
346 if (in_msg.Type == CacheRequestType:IFETCH) {
347 // ** INSTRUCTION ACCESS ***
349 // Check to see if it is in the OTHER L1
350 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
351 // The block is in the wrong L1, try to write it to the L2
352 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
353 trigger(Event:L1_to_L2, in_msg.LineAddress);
355 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
359 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
360 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
361 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
363 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
364 // L1 does't have the line, but we have space for it in the L1
365 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
366 // L2 has it (maybe not with the right permissions)
367 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress);
369 // We have room, the L2 doesn't have it, so the L1 fetches the line
370 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
373 // No room in the L1, so we need to make room
374 if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) {
375 // The L2 has room, so we move the line from the L1 to the L2
376 trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
378 // The L2 does not have room, so we replace a line from the L2
379 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)));
384 // *** DATA ACCESS ***
386 // Check to see if it is in the OTHER L1
387 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
388 // The block is in the wrong L1, try to write it to the L2
389 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
390 trigger(Event:L1_to_L2, in_msg.LineAddress);
392 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
396 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
397 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
398 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
400 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
401 // L1 does't have the line, but we have space for it in the L1
402 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
403 // L2 has it (maybe not with the right permissions)
404 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress);
406 // We have room, the L2 doesn't have it, so the L1 fetches the line
407 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
410 // No room in the L1, so we need to make room
411 if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) {
412 // The L2 has room, so we move the line from the L1 to the L2
413 trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
415 // The L2 does not have room, so we replace a line from the L2
416 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)));
427 action(a_issueGETS, "a", desc="Issue GETS") {
428 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
429 out_msg.Address := address;
430 out_msg.Type := CoherenceRequestType:GETS;
431 out_msg.Requestor := machineID;
432 out_msg.Destination.add(map_Address_to_Directory(address));
433 out_msg.MessageSize := MessageSizeType:Request_Control;
434 out_msg.InitialRequestTime := get_time();
435 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
439 action(b_issueGETX, "b", desc="Issue GETX") {
440 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
441 out_msg.Address := address;
442 out_msg.Type := CoherenceRequestType:GETX;
443 out_msg.Requestor := machineID;
444 out_msg.Destination.add(map_Address_to_Directory(address));
445 out_msg.MessageSize := MessageSizeType:Request_Control;
446 out_msg.InitialRequestTime := get_time();
447 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
451 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
452 peek(forwardToCache_in, RequestMsg) {
453 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
454 out_msg.Address := address;
455 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
456 out_msg.Sender := machineID;
457 out_msg.Destination.add(in_msg.Requestor);
458 out_msg.DataBlk := getCacheEntry(address).DataBlk;
459 out_msg.Dirty := getCacheEntry(address).Dirty;
460 if (in_msg.DirectedProbe) {
461 out_msg.Acks := machineCount(MachineType:L1Cache);
465 out_msg.MessageSize := MessageSizeType:Response_Data;
466 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
467 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
472 action(d_issuePUT, "d", desc="Issue PUT") {
473 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
474 out_msg.Address := address;
475 out_msg.Type := CoherenceRequestType:PUT;
476 out_msg.Requestor := machineID;
477 out_msg.Destination.add(map_Address_to_Directory(address));
478 out_msg.MessageSize := MessageSizeType:Writeback_Control;
482 action(e_sendData, "e", desc="Send data from cache to requestor") {
483 peek(forwardToCache_in, RequestMsg) {
484 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
485 out_msg.Address := address;
486 out_msg.Type := CoherenceResponseType:DATA;
487 out_msg.Sender := machineID;
488 out_msg.Destination.add(in_msg.Requestor);
489 out_msg.DataBlk := getCacheEntry(address).DataBlk;
490 out_msg.Dirty := getCacheEntry(address).Dirty;
491 if (in_msg.DirectedProbe) {
492 out_msg.Acks := machineCount(MachineType:L1Cache);
496 out_msg.MessageSize := MessageSizeType:Response_Data;
497 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
498 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
503 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
504 peek(forwardToCache_in, RequestMsg) {
505 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
506 out_msg.Address := address;
507 out_msg.Type := CoherenceResponseType:DATA_SHARED;
508 out_msg.Sender := machineID;
509 out_msg.Destination.add(in_msg.Requestor);
510 out_msg.DataBlk := getCacheEntry(address).DataBlk;
511 DEBUG_EXPR(out_msg.DataBlk);
512 out_msg.Dirty := getCacheEntry(address).Dirty;
513 if (in_msg.DirectedProbe) {
514 out_msg.Acks := machineCount(MachineType:L1Cache);
518 out_msg.MessageSize := MessageSizeType:Response_Data;
519 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
520 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
525 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
526 peek(forwardToCache_in, RequestMsg) {
527 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
528 out_msg.Address := address;
529 out_msg.Type := CoherenceResponseType:DATA_SHARED;
530 out_msg.Sender := machineID;
531 out_msg.Destination := in_msg.MergedRequestors;
532 out_msg.DataBlk := getCacheEntry(address).DataBlk;
533 DEBUG_EXPR(out_msg.DataBlk);
534 out_msg.Dirty := getCacheEntry(address).Dirty;
535 out_msg.Acks := machineCount(MachineType:L1Cache);
536 out_msg.MessageSize := MessageSizeType:Response_Data;
537 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
538 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
543 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
544 peek(forwardToCache_in, RequestMsg) {
545 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
546 out_msg.Address := address;
547 out_msg.Type := CoherenceResponseType:ACK;
548 out_msg.Sender := machineID;
549 out_msg.Destination.add(in_msg.Requestor);
551 assert(in_msg.DirectedProbe == false);
552 out_msg.MessageSize := MessageSizeType:Response_Control;
553 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
554 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
559 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
560 peek(forwardToCache_in, RequestMsg) {
561 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
562 out_msg.Address := address;
563 out_msg.Type := CoherenceResponseType:ACK_SHARED;
564 out_msg.Sender := machineID;
565 out_msg.Destination.add(in_msg.Requestor);
567 assert(in_msg.DirectedProbe == false);
568 out_msg.MessageSize := MessageSizeType:Response_Control;
569 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
570 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
575 action(g_sendUnblock, "g", desc="Send unblock to memory") {
576 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
577 out_msg.Address := address;
578 out_msg.Type := CoherenceResponseType:UNBLOCK;
579 out_msg.Sender := machineID;
580 out_msg.Destination.add(map_Address_to_Directory(address));
581 out_msg.MessageSize := MessageSizeType:Unblock_Control;
585 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
586 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
587 out_msg.Address := address;
588 out_msg.Type := CoherenceResponseType:UNBLOCKM;
589 out_msg.Sender := machineID;
590 out_msg.Destination.add(map_Address_to_Directory(address));
591 out_msg.MessageSize := MessageSizeType:Unblock_Control;
595 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
596 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
597 out_msg.Address := address;
598 out_msg.Type := CoherenceResponseType:UNBLOCKS;
599 out_msg.Sender := machineID;
600 out_msg.CurOwner := TBEs[address].CurOwner;
601 out_msg.Destination.add(map_Address_to_Directory(address));
602 out_msg.MessageSize := MessageSizeType:Unblock_Control;
606 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
607 DEBUG_EXPR(getCacheEntry(address).DataBlk);
609 sequencer.readCallback(address,
610 testAndClearLocalHit(address),
611 getCacheEntry(address).DataBlk);
615 action(hx_external_load_hit, "hx", desc="load required external msgs") {
616 DEBUG_EXPR(getCacheEntry(address).DataBlk);
617 peek(responseToCache_in, ResponseMsg) {
619 sequencer.readCallback(address,
620 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
621 getCacheEntry(address).DataBlk,
622 TBEs[address].InitialRequestTime,
623 TBEs[address].ForwardRequestTime,
624 TBEs[address].FirstResponseTime);
629 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
630 DEBUG_EXPR(getCacheEntry(address).DataBlk);
631 peek(mandatoryQueue_in, CacheMsg) {
632 sequencer.writeCallback(address,
633 testAndClearLocalHit(address),
634 getCacheEntry(address).DataBlk);
636 getCacheEntry(address).Dirty := true;
637 if (in_msg.Type == CacheRequestType:ATOMIC) {
638 getCacheEntry(address).AtomicAccessed := true;
643 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
644 DEBUG_EXPR(getCacheEntry(address).DataBlk);
645 peek(responseToCache_in, ResponseMsg) {
647 sequencer.writeCallback(address,
648 getNondirectHitMachType(address, in_msg.Sender),
649 getCacheEntry(address).DataBlk,
650 TBEs[address].InitialRequestTime,
651 TBEs[address].ForwardRequestTime,
652 TBEs[address].FirstResponseTime);
655 getCacheEntry(address).Dirty := true;
658 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
659 DEBUG_EXPR(getCacheEntry(address).DataBlk);
661 sequencer.writeCallback(address,
662 getNondirectHitMachType(address,
663 TBEs[address].LastResponder),
664 getCacheEntry(address).DataBlk,
665 TBEs[address].InitialRequestTime,
666 TBEs[address].ForwardRequestTime,
667 TBEs[address].FirstResponseTime);
669 getCacheEntry(address).Dirty := true;
672 action(i_allocateTBE, "i", desc="Allocate TBE") {
673 check_allocate(TBEs);
674 TBEs.allocate(address);
675 TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
676 TBEs[address].Dirty := getCacheEntry(address).Dirty;
677 TBEs[address].Sharers := false;
680 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
681 triggerQueue_in.dequeue();
684 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
685 mandatoryQueue_in.dequeue();
688 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
689 forwardToCache_in.dequeue();
692 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
693 peek(responseToCache_in, ResponseMsg) {
694 assert(in_msg.Acks > 0);
695 DEBUG_EXPR(TBEs[address].NumPendingMsgs);
696 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
697 DEBUG_EXPR(TBEs[address].NumPendingMsgs);
698 TBEs[address].LastResponder := in_msg.Sender;
699 if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
700 assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime);
702 if (in_msg.InitialRequestTime != zero_time()) {
703 TBEs[address].InitialRequestTime := in_msg.InitialRequestTime;
705 if (TBEs[address].ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
706 assert(TBEs[address].ForwardRequestTime == in_msg.ForwardRequestTime);
708 if (in_msg.ForwardRequestTime != zero_time()) {
709 TBEs[address].ForwardRequestTime := in_msg.ForwardRequestTime;
711 if (TBEs[address].FirstResponseTime == zero_time()) {
712 TBEs[address].FirstResponseTime := get_time();
716 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
717 peek(responseToCache_in, ResponseMsg) {
718 TBEs[address].CurOwner := in_msg.Sender;
722 action(n_popResponseQueue, "n", desc="Pop response queue") {
723 responseToCache_in.dequeue();
726 action(ll_L2toL1Transfer, "ll", desc="") {
727 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
728 out_msg.Address := address;
729 out_msg.Type := TriggerType:L2_to_L1;
733 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
734 if (TBEs[address].NumPendingMsgs == 0) {
735 enqueue(triggerQueue_out, TriggerMsg) {
736 out_msg.Address := address;
737 if (TBEs[address].Sharers) {
738 out_msg.Type := TriggerType:ALL_ACKS;
740 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
746 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
747 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
750 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
751 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs + 1;
754 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
755 peek(forwardToCache_in, RequestMsg) {
756 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
757 out_msg.Address := address;
758 out_msg.Type := CoherenceResponseType:DATA;
759 out_msg.Sender := machineID;
760 out_msg.Destination.add(in_msg.Requestor);
761 DEBUG_EXPR(out_msg.Destination);
762 out_msg.DataBlk := TBEs[address].DataBlk;
763 out_msg.Dirty := TBEs[address].Dirty;
764 if (in_msg.DirectedProbe) {
765 out_msg.Acks := machineCount(MachineType:L1Cache);
769 out_msg.MessageSize := MessageSizeType:Response_Data;
770 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
771 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
776 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
777 peek(forwardToCache_in, RequestMsg) {
778 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
779 out_msg.Address := address;
780 out_msg.Type := CoherenceResponseType:DATA;
781 out_msg.Sender := machineID;
782 out_msg.Destination := in_msg.MergedRequestors;
783 DEBUG_EXPR(out_msg.Destination);
784 out_msg.DataBlk := TBEs[address].DataBlk;
785 out_msg.Dirty := TBEs[address].Dirty;
786 out_msg.Acks := machineCount(MachineType:L1Cache);
787 out_msg.MessageSize := MessageSizeType:Response_Data;
788 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
789 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
794 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
795 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
796 out_msg.Address := address;
797 out_msg.Sender := machineID;
798 out_msg.Destination.add(map_Address_to_Directory(address));
799 out_msg.Dirty := TBEs[address].Dirty;
800 if (TBEs[address].Dirty) {
801 out_msg.Type := CoherenceResponseType:WB_DIRTY;
802 out_msg.DataBlk := TBEs[address].DataBlk;
803 out_msg.MessageSize := MessageSizeType:Writeback_Data;
805 out_msg.Type := CoherenceResponseType:WB_CLEAN;
806 // NOTE: in a real system this would not send data. We send
807 // data here only so we can check it at the memory
808 out_msg.DataBlk := TBEs[address].DataBlk;
809 out_msg.MessageSize := MessageSizeType:Writeback_Control;
814 action(r_setSharerBit, "r", desc="We saw other sharers") {
815 TBEs[address].Sharers := true;
818 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
819 TBEs.deallocate(address);
822 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
823 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
824 out_msg.Address := address;
825 out_msg.Sender := machineID;
826 out_msg.Destination.add(map_Address_to_Directory(address));
827 out_msg.DataBlk := TBEs[address].DataBlk;
828 out_msg.Dirty := TBEs[address].Dirty;
829 if (TBEs[address].Dirty) {
830 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
831 out_msg.DataBlk := TBEs[address].DataBlk;
832 out_msg.MessageSize := MessageSizeType:Writeback_Data;
834 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
835 // NOTE: in a real system this would not send data. We send
836 // data here only so we can check it at the memory
837 out_msg.DataBlk := TBEs[address].DataBlk;
838 out_msg.MessageSize := MessageSizeType:Writeback_Control;
843 action(u_writeDataToCache, "u", desc="Write data to cache") {
844 peek(responseToCache_in, ResponseMsg) {
845 getCacheEntry(address).DataBlk := in_msg.DataBlk;
846 getCacheEntry(address).Dirty := in_msg.Dirty;
850 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
851 peek(responseToCache_in, ResponseMsg) {
852 DEBUG_EXPR(getCacheEntry(address).DataBlk);
853 DEBUG_EXPR(in_msg.DataBlk);
854 assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
855 getCacheEntry(address).DataBlk := in_msg.DataBlk;
856 getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty;
860 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
861 if (L1DcacheMemory.isTagPresent(address)) {
862 L1DcacheMemory.deallocate(address);
864 L1IcacheMemory.deallocate(address);
868 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
869 if (L1DcacheMemory.isTagPresent(address) == false) {
870 L1DcacheMemory.allocate(address, new Entry);
874 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
875 if (L1IcacheMemory.isTagPresent(address) == false) {
876 L1IcacheMemory.allocate(address, new Entry);
880 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
881 L2cacheMemory.allocate(address, new Entry);
884 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
885 L2cacheMemory.deallocate(address);
888 action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
889 if (L1DcacheMemory.isTagPresent(address)) {
890 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty;
891 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk;
893 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty;
894 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk;
898 action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
899 if (L1DcacheMemory.isTagPresent(address)) {
900 static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
901 static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
902 static_cast(Entry, L1DcacheMemory[address]).FromL2 := true;
904 static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
905 static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
906 static_cast(Entry, L1IcacheMemory[address]).FromL2 := true;
910 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
911 peek(mandatoryQueue_in, CacheMsg) {
912 if (L1IcacheMemory.isTagPresent(address)) {
913 L1IcacheMemory.profileMiss(in_msg);
914 } else if (L1DcacheMemory.isTagPresent(address)) {
915 L1DcacheMemory.profileMiss(in_msg);
917 if (L2cacheMemory.isTagPresent(address) == false) {
918 L2cacheMemory.profileMiss(in_msg);
923 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
924 mandatoryQueue_in.recycle();
927 //*****************************************************
929 //*****************************************************
931 // Transitions for Load/Store/L2_Replacement from transient states
932 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
933 zz_recycleMandatoryQueue;
936 transition({M_W, MM_W}, {L2_Replacement}) {
937 zz_recycleMandatoryQueue;
940 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
941 zz_recycleMandatoryQueue;
944 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
945 zz_recycleMandatoryQueue;
948 transition({IT, ST, OT, MT, MMT}, {Other_GETX, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
952 // Transitions moving data between the L1 and L2 caches
953 transition({I, S, O, M, MM}, L1_to_L2) {
954 vv_allocateL2CacheBlock;
955 ss_copyFromL1toL2; // Not really needed for state I
956 gg_deallocateL1CacheBlock;
959 transition(I, Trigger_L2_to_L1D, IT) {
960 ii_allocateL1DCacheBlock;
961 tt_copyFromL2toL1; // Not really needed for state I
963 rr_deallocateL2CacheBlock;
964 zz_recycleMandatoryQueue;
968 transition(S, Trigger_L2_to_L1D, ST) {
969 ii_allocateL1DCacheBlock;
972 rr_deallocateL2CacheBlock;
973 zz_recycleMandatoryQueue;
977 transition(O, Trigger_L2_to_L1D, OT) {
978 ii_allocateL1DCacheBlock;
981 rr_deallocateL2CacheBlock;
982 zz_recycleMandatoryQueue;
986 transition(M, Trigger_L2_to_L1D, MT) {
987 ii_allocateL1DCacheBlock;
990 rr_deallocateL2CacheBlock;
991 zz_recycleMandatoryQueue;
995 transition(MM, Trigger_L2_to_L1D, MMT) {
996 ii_allocateL1DCacheBlock;
999 rr_deallocateL2CacheBlock;
1000 zz_recycleMandatoryQueue;
1004 transition(I, Trigger_L2_to_L1I, IT) {
1005 jj_allocateL1ICacheBlock;
1006 tt_copyFromL2toL1; // Not really needed for state I
1008 rr_deallocateL2CacheBlock;
1009 zz_recycleMandatoryQueue;
1013 transition(S, Trigger_L2_to_L1I, ST) {
1014 jj_allocateL1ICacheBlock;
1017 rr_deallocateL2CacheBlock;
1018 zz_recycleMandatoryQueue;
1022 transition(O, Trigger_L2_to_L1I, OT) {
1023 jj_allocateL1ICacheBlock;
1026 rr_deallocateL2CacheBlock;
1027 zz_recycleMandatoryQueue;
1031 transition(M, Trigger_L2_to_L1I, MT) {
1032 jj_allocateL1ICacheBlock;
1035 rr_deallocateL2CacheBlock;
1036 zz_recycleMandatoryQueue;
1040 transition(MM, Trigger_L2_to_L1I, MMT) {
1041 jj_allocateL1ICacheBlock;
1044 rr_deallocateL2CacheBlock;
1045 zz_recycleMandatoryQueue;
1049 transition(IT, Complete_L2_to_L1, I) {
1053 transition(ST, Complete_L2_to_L1, S) {
1057 transition(OT, Complete_L2_to_L1, O) {
1061 transition(MT, Complete_L2_to_L1, M) {
1065 transition(MMT, Complete_L2_to_L1, MM) {
1069 // Transitions from Idle
1070 transition(I, Load, IS) {
1071 ii_allocateL1DCacheBlock;
1075 k_popMandatoryQueue;
1078 transition(I, Ifetch, IS) {
1079 jj_allocateL1ICacheBlock;
1083 k_popMandatoryQueue;
1086 transition(I, Store, IM) {
1087 ii_allocateL1DCacheBlock;
1091 k_popMandatoryQueue;
1094 transition(I, L2_Replacement) {
1095 rr_deallocateL2CacheBlock;
1098 transition(I, {Other_GETX, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1103 // Transitions from Shared
1104 transition({S, SM, ISM}, {Load, Ifetch}) {
1106 k_popMandatoryQueue;
1109 transition(S, Store, SM) {
1113 k_popMandatoryQueue;
1116 transition(S, L2_Replacement, I) {
1117 rr_deallocateL2CacheBlock;
1120 transition(S, {Other_GETX, Invalidate}, I) {
1125 transition(S, {Other_GETS, Other_GETS_No_Mig}) {
1130 // Transitions from Owned
1131 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1133 k_popMandatoryQueue;
1136 transition(O, Store, OM) {
1139 p_decrementNumberOfMessagesByOne;
1141 k_popMandatoryQueue;
1144 transition(O, L2_Replacement, OI) {
1147 rr_deallocateL2CacheBlock;
1150 transition(O, {Other_GETX, Invalidate}, I) {
1155 transition(O, {Other_GETS, Other_GETS_No_Mig}) {
1160 transition(O, Merged_GETS) {
1161 em_sendDataSharedMultiple;
1165 // Transitions from Modified
1166 transition(MM, {Load, Ifetch}) {
1168 k_popMandatoryQueue;
1171 transition(MM, Store) {
1173 k_popMandatoryQueue;
1176 transition(MM, L2_Replacement, MI) {
1179 rr_deallocateL2CacheBlock;
1182 transition(MM, {Other_GETX, Invalidate}, I) {
1183 c_sendExclusiveData;
1187 transition(MM, Other_GETS, I) {
1188 c_sendExclusiveData;
1192 transition(MM, Other_GETS_No_Mig, O) {
1197 transition(MM, Merged_GETS, O) {
1198 em_sendDataSharedMultiple;
1202 // Transitions from Dirty Exclusive
1203 transition(M, {Load, Ifetch}) {
1205 k_popMandatoryQueue;
1208 transition(M, Store, MM) {
1210 k_popMandatoryQueue;
1213 transition(M, L2_Replacement, MI) {
1216 rr_deallocateL2CacheBlock;
1219 transition(M, {Other_GETX, Invalidate}, I) {
1220 c_sendExclusiveData;
1224 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1229 transition(M, Merged_GETS, O) {
1230 em_sendDataSharedMultiple;
1234 // Transitions from IM
1236 transition(IM, {Other_GETX, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1241 transition(IM, Ack) {
1242 m_decrementNumberOfMessages;
1243 o_checkForCompletion;
1247 transition(IM, Data, ISM) {
1249 m_decrementNumberOfMessages;
1250 o_checkForCompletion;
1254 transition(IM, Exclusive_Data, MM_W) {
1256 m_decrementNumberOfMessages;
1257 o_checkForCompletion;
1258 sx_external_store_hit;
1262 // Transitions from SM
1263 transition(SM, {Other_GETS, Other_GETS_No_Mig}) {
1268 transition(SM, {Other_GETX, Invalidate}, IM) {
1273 transition(SM, Ack) {
1274 m_decrementNumberOfMessages;
1275 o_checkForCompletion;
1279 transition(SM, Data, ISM) {
1280 v_writeDataToCacheVerify;
1281 m_decrementNumberOfMessages;
1282 o_checkForCompletion;
1286 // Transitions from ISM
1287 transition(ISM, Ack) {
1288 m_decrementNumberOfMessages;
1289 o_checkForCompletion;
1293 transition(ISM, All_acks_no_sharers, MM) {
1294 sxt_trig_ext_store_hit;
1300 // Transitions from OM
1302 transition(OM, {Other_GETX, Invalidate}, IM) {
1304 pp_incrementNumberOfMessagesByOne;
1308 transition(OM, {Other_GETS, Other_GETS_No_Mig}) {
1313 transition(OM, Merged_GETS) {
1314 em_sendDataSharedMultiple;
1318 transition(OM, Ack) {
1319 m_decrementNumberOfMessages;
1320 o_checkForCompletion;
1324 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1325 sxt_trig_ext_store_hit;
1331 // Transitions from IS
1333 transition(IS, {Other_GETX, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1338 transition(IS, Ack) {
1339 m_decrementNumberOfMessages;
1340 o_checkForCompletion;
1344 transition(IS, Shared_Ack) {
1345 m_decrementNumberOfMessages;
1347 o_checkForCompletion;
1351 transition(IS, Data, SS) {
1353 m_decrementNumberOfMessages;
1354 o_checkForCompletion;
1355 hx_external_load_hit;
1356 uo_updateCurrentOwner;
1360 transition(IS, Exclusive_Data, M_W) {
1362 m_decrementNumberOfMessages;
1363 o_checkForCompletion;
1364 hx_external_load_hit;
1368 transition(IS, Shared_Data, SS) {
1371 m_decrementNumberOfMessages;
1372 o_checkForCompletion;
1373 hx_external_load_hit;
1374 uo_updateCurrentOwner;
1378 // Transitions from SS
1380 transition(SS, Ack) {
1381 m_decrementNumberOfMessages;
1382 o_checkForCompletion;
1386 transition(SS, Shared_Ack) {
1387 m_decrementNumberOfMessages;
1389 o_checkForCompletion;
1393 transition(SS, All_acks, S) {
1399 transition(SS, All_acks_no_sharers, S) {
1400 // Note: The directory might still be the owner, so that is why we go to S
1406 // Transitions from MM_W
1408 transition(MM_W, Store) {
1410 k_popMandatoryQueue;
1413 transition(MM_W, Ack) {
1414 m_decrementNumberOfMessages;
1415 o_checkForCompletion;
1419 transition(MM_W, All_acks_no_sharers, MM) {
1425 // Transitions from M_W
1427 transition(M_W, Store, MM_W) {
1429 k_popMandatoryQueue;
1432 transition(M_W, Ack) {
1433 m_decrementNumberOfMessages;
1434 o_checkForCompletion;
1438 transition(M_W, All_acks_no_sharers, M) {
1444 // Transitions from OI/MI
1446 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1447 q_sendDataFromTBEToCache;
1451 transition({OI, MI}, {Other_GETS, Other_GETS_No_Mig}, OI) {
1452 q_sendDataFromTBEToCache;
1456 transition({OI, MI}, Merged_GETS, OI) {
1457 qm_sendDataFromTBEToCache;
1461 transition(MI, Writeback_Ack, I) {
1462 t_sendExclusiveDataFromTBEToMemory;
1467 transition(OI, Writeback_Ack, I) {
1468 qq_sendDataFromTBEToMemory;
1473 // Transitions from II
1474 transition(II, {Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1479 transition(II, Writeback_Ack, I) {
1485 transition(II, Writeback_Nack, I) {