3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 machine(L2Cache, "Token protocol")
36 : CacheMemory * L2cacheMemory,
38 Cycles l2_request_latency = 5,
39 Cycles l2_response_latency = 5,
40 bool filtering_enabled = true
44 // From local bank of L2 cache TO the network
46 // this L2 bank -> a local L1 || mod-directory
47 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
48 // this L2 bank -> mod-directory
49 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
50 // this L2 bank -> a local L1
51 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
54 // FROM the network to this local bank of L2 cache
56 // a local L1 || mod-directory -> this L2 bank
57 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
58 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
59 // mod-directory -> this L2 bank
60 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
61 // a local L1 -> this L2 bank
62 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
65 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
67 NP, AccessPermission:Invalid, desc="Not Present";
68 I, AccessPermission:Invalid, desc="Idle";
69 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
70 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
71 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
74 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
75 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
79 enumeration(Event, desc="Cache events") {
82 L1_GETS, desc="local L1 GETS request";
83 L1_GETS_Last_Token, desc="local L1 GETS request";
84 L1_GETX, desc="local L1 GETX request";
85 L1_INV, desc="L1 no longer has tokens";
86 Transient_GETX, desc="A GetX from another processor";
87 Transient_GETS, desc="A GetS from another processor";
88 Transient_GETS_Last_Token, desc="A GetS from another processor";
90 // events initiated by this L2
91 L2_Replacement, desc="L2 Replacement", format="!r";
93 // events of external L2 responses
96 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
97 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
98 Writeback_All_Tokens, desc="Received a writeback from L1";
99 Writeback_Owned, desc="Received a writeback from L1";
102 Data_Shared, desc="Received a data message, we are now a sharer";
103 Data_Owner, desc="Received a data message, we are now the owner";
104 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
105 Ack, desc="Received an ack message";
106 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
109 Persistent_GETX, desc="Another processor has priority to read/write";
110 Persistent_GETS, desc="Another processor has priority to read";
111 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
112 Own_Lock_or_Unlock, desc="This processor now has priority";
118 structure(Entry, desc="...", interface="AbstractCacheEntry") {
119 State CacheState, desc="cache state";
120 bool Dirty, desc="Is the data dirty (different than memory)?";
121 int Tokens, desc="The number of tokens we're holding for the line";
122 DataBlock DataBlk, desc="data for the block";
125 structure(DirEntry, desc="...") {
126 Set Sharers, desc="Set of the internal processors that want the block in shared state";
127 bool exclusive, default="false", desc="if local exclusive is likely";
130 structure(PerfectCacheMemory, external="yes") {
131 void allocate(Address);
132 void deallocate(Address);
133 DirEntry lookup(Address);
134 bool isTagPresent(Address);
137 structure(PersistentTable, external="yes") {
138 void persistentRequestLock(Address, MachineID, AccessType);
139 void persistentRequestUnlock(Address, MachineID);
140 MachineID findSmallest(Address);
141 AccessType typeOfSmallest(Address);
142 void markEntries(Address);
143 bool isLocked(Address);
144 int countStarvingForAddress(Address);
145 int countReadStarvingForAddress(Address);
148 PersistentTable persistentTable;
149 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
151 void set_cache_entry(AbstractCacheEntry b);
152 void unset_cache_entry();
154 Entry getCacheEntry(Address address), return_by_pointer="yes" {
155 Entry cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
159 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
160 return getCacheEntry(addr).DataBlk;
163 int getTokens(Entry cache_entry) {
164 if (is_valid(cache_entry)) {
165 return cache_entry.Tokens;
171 State getState(Entry cache_entry, Address addr) {
172 if (is_valid(cache_entry)) {
173 return cache_entry.CacheState;
174 } else if (persistentTable.isLocked(addr) == true) {
181 void setState(Entry cache_entry, Address addr, State state) {
183 if (is_valid(cache_entry)) {
184 // Make sure the token count is in range
185 assert(cache_entry.Tokens >= 0);
186 assert(cache_entry.Tokens <= max_tokens());
187 assert(cache_entry.Tokens != (max_tokens() / 2));
189 // Make sure we have no tokens in L
190 if ((state == State:I_L) ) {
191 assert(cache_entry.Tokens == 0);
194 // in M and E you have all the tokens
195 if (state == State:M ) {
196 assert(cache_entry.Tokens == max_tokens());
199 // in NP you have no tokens
200 if (state == State:NP) {
201 assert(cache_entry.Tokens == 0);
204 // You have at least one token in S-like states
205 if (state == State:S ) {
206 assert(cache_entry.Tokens > 0);
209 // You have at least half the token in O-like states
210 if (state == State:O ) {
211 assert(cache_entry.Tokens > (max_tokens() / 2));
214 cache_entry.CacheState := state;
218 AccessPermission getAccessPermission(Address addr) {
219 Entry cache_entry := getCacheEntry(addr);
220 if(is_valid(cache_entry)) {
221 return L2Cache_State_to_permission(cache_entry.CacheState);
224 return AccessPermission:NotPresent;
227 void setAccessPermission(Entry cache_entry, Address addr, State state) {
228 if (is_valid(cache_entry)) {
229 cache_entry.changePermission(L2Cache_State_to_permission(state));
233 void removeSharer(Address addr, NodeID id) {
235 if (localDirectory.isTagPresent(addr)) {
236 localDirectory[addr].Sharers.remove(id);
237 if (localDirectory[addr].Sharers.count() == 0) {
238 localDirectory.deallocate(addr);
243 bool sharersExist(Address addr) {
244 if (localDirectory.isTagPresent(addr)) {
245 if (localDirectory[addr].Sharers.count() > 0) {
257 bool exclusiveExists(Address addr) {
258 if (localDirectory.isTagPresent(addr)) {
259 if (localDirectory[addr].exclusive == true) {
271 // assumes that caller will check to make sure tag is present
272 Set getSharers(Address addr) {
273 return localDirectory[addr].Sharers;
276 void setNewWriter(Address addr, NodeID id) {
277 if (localDirectory.isTagPresent(addr) == false) {
278 localDirectory.allocate(addr);
280 localDirectory[addr].Sharers.clear();
281 localDirectory[addr].Sharers.add(id);
282 localDirectory[addr].exclusive := true;
285 void addNewSharer(Address addr, NodeID id) {
286 if (localDirectory.isTagPresent(addr) == false) {
287 localDirectory.allocate(addr);
289 localDirectory[addr].Sharers.add(id);
290 // localDirectory[addr].exclusive := false;
293 void clearExclusiveBitIfExists(Address addr) {
294 if (localDirectory.isTagPresent(addr) == true) {
295 localDirectory[addr].exclusive := false;
299 GenericRequestType convertToGenericType(CoherenceRequestType type) {
300 if(type == CoherenceRequestType:GETS) {
301 return GenericRequestType:GETS;
302 } else if(type == CoherenceRequestType:GETX) {
303 return GenericRequestType:GETX;
305 DPRINTF(RubySlicc, "%s\n", type);
306 error("invalid CoherenceRequestType");
311 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
312 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
313 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
319 // Persistent Network
320 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
321 if (persistentNetwork_in.isReady()) {
322 peek(persistentNetwork_in, PersistentMsg) {
323 assert(in_msg.Destination.isElement(machineID));
325 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
326 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
327 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
328 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
329 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
330 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
332 error("Unexpected message");
335 Entry cache_entry := getCacheEntry(in_msg.Address);
336 // React to the message based on the current state of the table
337 if (persistentTable.isLocked(in_msg.Address)) {
339 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
340 if (getTokens(cache_entry) == 1 ||
341 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
342 trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
345 trigger(Event:Persistent_GETS, in_msg.Address, cache_entry);
348 trigger(Event:Persistent_GETX, in_msg.Address, cache_entry);
352 trigger(Event:Own_Lock_or_Unlock, in_msg.Address, cache_entry);
360 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
361 if (requestNetwork_in.isReady()) {
362 peek(requestNetwork_in, RequestMsg) {
363 assert(in_msg.Destination.isElement(machineID));
365 Entry cache_entry := getCacheEntry(in_msg.Address);
366 if (in_msg.Type == CoherenceRequestType:GETX) {
367 trigger(Event:Transient_GETX, in_msg.Address, cache_entry);
368 } else if (in_msg.Type == CoherenceRequestType:GETS) {
369 if (getTokens(cache_entry) == 1) {
370 trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
374 trigger(Event:Transient_GETS, in_msg.Address, cache_entry);
377 error("Unexpected message");
383 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
384 if (L1requestNetwork_in.isReady()) {
385 peek(L1requestNetwork_in, RequestMsg) {
386 assert(in_msg.Destination.isElement(machineID));
387 Entry cache_entry := getCacheEntry(in_msg.Address);
388 if (in_msg.Type == CoherenceRequestType:GETX) {
389 trigger(Event:L1_GETX, in_msg.Address, cache_entry);
390 } else if (in_msg.Type == CoherenceRequestType:GETS) {
391 if (getTokens(cache_entry) == 1 ||
392 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
393 trigger(Event:L1_GETS_Last_Token, in_msg.Address, cache_entry);
396 trigger(Event:L1_GETS, in_msg.Address, cache_entry);
399 error("Unexpected message");
407 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
408 if (responseNetwork_in.isReady()) {
409 peek(responseNetwork_in, ResponseMsg) {
410 assert(in_msg.Destination.isElement(machineID));
411 Entry cache_entry := getCacheEntry(in_msg.Address);
413 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
414 if (in_msg.Type == CoherenceResponseType:ACK) {
415 assert(in_msg.Tokens < (max_tokens() / 2));
416 trigger(Event:Ack, in_msg.Address, cache_entry);
417 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
418 trigger(Event:Data_Owner, in_msg.Address, cache_entry);
419 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
420 trigger(Event:Data_Shared, in_msg.Address, cache_entry);
421 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
422 in_msg.Type == CoherenceResponseType:WB_OWNED ||
423 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
425 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
427 // either room is available or the block is already present
429 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
430 assert(in_msg.Dirty == false);
431 trigger(Event:Writeback_Tokens, in_msg.Address, cache_entry);
432 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
433 assert(in_msg.Dirty == false);
434 trigger(Event:Writeback_Shared_Data, in_msg.Address, cache_entry);
436 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
437 //assert(in_msg.Dirty == false);
438 trigger(Event:Writeback_Owned, in_msg.Address, cache_entry);
442 trigger(Event:L2_Replacement,
443 L2cacheMemory.cacheProbe(in_msg.Address),
444 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
446 } else if (in_msg.Type == CoherenceResponseType:INV) {
447 trigger(Event:L1_INV, in_msg.Address, cache_entry);
449 error("Unexpected message");
452 if (in_msg.Type == CoherenceResponseType:ACK) {
453 assert(in_msg.Tokens < (max_tokens() / 2));
454 trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry);
455 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
456 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
457 trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry);
458 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
459 in_msg.Type == CoherenceResponseType:WB_OWNED ||
460 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
461 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
463 // either room is available or the block is already present
465 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
466 assert(in_msg.Dirty == false);
467 assert( (getState(cache_entry, in_msg.Address) != State:NP)
468 && (getState(cache_entry, in_msg.Address) != State:I) );
469 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
470 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
471 assert(in_msg.Dirty == false);
472 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
474 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
475 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
479 trigger(Event:L2_Replacement,
480 L2cacheMemory.cacheProbe(in_msg.Address),
481 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
483 } else if (in_msg.Type == CoherenceResponseType:INV) {
484 trigger(Event:L1_INV, in_msg.Address, cache_entry);
486 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
487 error("Unexpected message");
497 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
499 peek(L1requestNetwork_in, RequestMsg) {
501 // if this is a retry or no local sharers, broadcast normally
503 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
504 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
505 out_msg.Address := in_msg.Address;
506 out_msg.Type := in_msg.Type;
507 out_msg.Requestor := in_msg.Requestor;
508 out_msg.RetryNum := in_msg.RetryNum;
511 // If a statically shared L2 cache, then no other L2 caches can
514 //out_msg.Destination.broadcast(MachineType:L2Cache);
515 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
516 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
518 out_msg.Destination.add(map_Address_to_Directory(address));
519 out_msg.MessageSize := MessageSizeType:Request_Control;
520 out_msg.AccessMode := in_msg.AccessMode;
521 out_msg.Prefetch := in_msg.Prefetch;
525 //profile_filter_action(0);
530 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
531 peek(responseNetwork_in, ResponseMsg) {
532 // FIXME, should use a 3rd vnet
533 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
534 out_msg.Address := address;
535 out_msg.Type := in_msg.Type;
536 out_msg.Sender := machineID;
537 out_msg.Destination.add(map_Address_to_Directory(address));
538 out_msg.Tokens := in_msg.Tokens;
539 out_msg.MessageSize := in_msg.MessageSize;
540 out_msg.DataBlk := in_msg.DataBlk;
541 out_msg.Dirty := in_msg.Dirty;
546 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
547 assert(is_valid(cache_entry));
548 if (cache_entry.Tokens > 0) {
549 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
550 out_msg.Address := address;
551 out_msg.Type := CoherenceResponseType:ACK;
552 out_msg.Sender := machineID;
553 out_msg.Destination.add(map_Address_to_Directory(address));
554 out_msg.Tokens := cache_entry.Tokens;
555 out_msg.MessageSize := MessageSizeType:Writeback_Control;
557 cache_entry.Tokens := 0;
561 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
562 assert(is_valid(cache_entry));
563 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
564 out_msg.Address := address;
565 out_msg.Sender := machineID;
566 out_msg.Destination.add(map_Address_to_Directory(address));
567 out_msg.Tokens := cache_entry.Tokens;
568 out_msg.DataBlk := cache_entry.DataBlk;
569 out_msg.Dirty := cache_entry.Dirty;
571 if (cache_entry.Dirty) {
572 out_msg.MessageSize := MessageSizeType:Writeback_Data;
573 out_msg.Type := CoherenceResponseType:DATA_OWNER;
575 out_msg.MessageSize := MessageSizeType:Writeback_Control;
576 out_msg.Type := CoherenceResponseType:ACK_OWNER;
579 cache_entry.Tokens := 0;
582 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
583 peek(requestNetwork_in, RequestMsg) {
584 assert(is_valid(cache_entry));
585 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
586 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
587 out_msg.Address := address;
588 out_msg.Type := CoherenceResponseType:DATA_SHARED;
589 out_msg.Sender := machineID;
590 out_msg.Destination.add(in_msg.Requestor);
591 out_msg.Tokens := N_tokens;
592 out_msg.DataBlk := cache_entry.DataBlk;
593 out_msg.Dirty := false;
594 out_msg.MessageSize := MessageSizeType:Response_Data;
596 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
599 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
600 out_msg.Address := address;
601 out_msg.Type := CoherenceResponseType:DATA_SHARED;
602 out_msg.Sender := machineID;
603 out_msg.Destination.add(in_msg.Requestor);
605 out_msg.DataBlk := cache_entry.DataBlk;
606 out_msg.Dirty := false;
607 out_msg.MessageSize := MessageSizeType:Response_Data;
609 cache_entry.Tokens := cache_entry.Tokens - 1;
614 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
615 assert(is_valid(cache_entry));
616 peek(requestNetwork_in, RequestMsg) {
617 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
618 out_msg.Address := address;
619 out_msg.Type := CoherenceResponseType:DATA_OWNER;
620 out_msg.Sender := machineID;
621 out_msg.Destination.add(in_msg.Requestor);
622 assert(cache_entry.Tokens >= 1);
623 out_msg.Tokens := cache_entry.Tokens;
624 out_msg.DataBlk := cache_entry.DataBlk;
625 out_msg.Dirty := cache_entry.Dirty;
626 out_msg.MessageSize := MessageSizeType:Response_Data;
629 cache_entry.Tokens := 0;
632 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
633 assert(is_valid(cache_entry));
634 if (cache_entry.Tokens > 0) {
635 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
636 out_msg.Address := address;
637 out_msg.Type := CoherenceResponseType:ACK;
638 out_msg.Sender := machineID;
639 out_msg.Destination.add(persistentTable.findSmallest(address));
640 assert(cache_entry.Tokens >= 1);
641 out_msg.Tokens := cache_entry.Tokens;
642 out_msg.MessageSize := MessageSizeType:Response_Control;
645 cache_entry.Tokens := 0;
648 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
649 assert(is_valid(cache_entry));
650 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
651 out_msg.Address := address;
652 out_msg.Type := CoherenceResponseType:DATA_OWNER;
653 out_msg.Sender := machineID;
654 out_msg.Destination.add(persistentTable.findSmallest(address));
655 assert(cache_entry.Tokens >= 1);
656 out_msg.Tokens := cache_entry.Tokens;
657 out_msg.DataBlk := cache_entry.DataBlk;
658 out_msg.Dirty := cache_entry.Dirty;
659 out_msg.MessageSize := MessageSizeType:Response_Data;
661 cache_entry.Tokens := 0;
664 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
665 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
666 assert(is_valid(cache_entry));
667 assert(cache_entry.Tokens > 0);
668 if (cache_entry.Tokens > 1) {
669 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
670 out_msg.Address := address;
671 out_msg.Type := CoherenceResponseType:ACK;
672 out_msg.Sender := machineID;
673 out_msg.Destination.add(persistentTable.findSmallest(address));
674 assert(cache_entry.Tokens >= 1);
675 out_msg.Tokens := cache_entry.Tokens - 1;
676 out_msg.MessageSize := MessageSizeType:Response_Control;
679 cache_entry.Tokens := 1;
682 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
683 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
684 assert(is_valid(cache_entry));
685 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
686 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
687 out_msg.Address := address;
688 out_msg.Type := CoherenceResponseType:DATA_OWNER;
689 out_msg.Sender := machineID;
690 out_msg.Destination.add(persistentTable.findSmallest(address));
691 out_msg.Tokens := cache_entry.Tokens - 1;
692 out_msg.DataBlk := cache_entry.DataBlk;
693 out_msg.Dirty := cache_entry.Dirty;
694 out_msg.MessageSize := MessageSizeType:Response_Data;
696 cache_entry.Tokens := 1;
699 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
700 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
701 assert(is_valid(cache_entry));
702 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
703 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
704 out_msg.Address := address;
705 out_msg.Type := CoherenceResponseType:DATA_OWNER;
706 out_msg.Sender := machineID;
707 out_msg.Destination.add(persistentTable.findSmallest(address));
708 out_msg.Tokens := cache_entry.Tokens;
709 out_msg.DataBlk := cache_entry.DataBlk;
710 out_msg.Dirty := cache_entry.Dirty;
711 out_msg.MessageSize := MessageSizeType:Response_Data;
713 cache_entry.Tokens := 0;
718 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
719 // assert(persistentTable.isLocked(address));
720 peek(responseNetwork_in, ResponseMsg) {
721 // FIXME, should use a 3rd vnet in some cases
722 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
723 out_msg.Address := address;
724 out_msg.Type := in_msg.Type;
725 out_msg.Sender := machineID;
726 out_msg.Destination.add(persistentTable.findSmallest(address));
727 out_msg.Tokens := in_msg.Tokens;
728 out_msg.DataBlk := in_msg.DataBlk;
729 out_msg.Dirty := in_msg.Dirty;
730 out_msg.MessageSize := in_msg.MessageSize;
735 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
736 //assert(persistentTable.isLocked(address));
737 peek(responseNetwork_in, ResponseMsg) {
738 // FIXME, should use a 3rd vnet in some cases
739 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
740 out_msg.Address := address;
741 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
742 out_msg.Type := CoherenceResponseType:DATA_SHARED;
744 assert(in_msg.Tokens < (max_tokens() / 2));
745 out_msg.Type := CoherenceResponseType:ACK;
747 out_msg.Sender := machineID;
748 out_msg.Destination.add(persistentTable.findSmallest(address));
749 out_msg.Tokens := in_msg.Tokens;
750 out_msg.DataBlk := in_msg.DataBlk;
751 out_msg.Dirty := in_msg.Dirty;
752 out_msg.MessageSize := in_msg.MessageSize;
757 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
758 // assert(persistentTable.isLocked(address));
759 peek(responseNetwork_in, ResponseMsg) {
760 // FIXME, should use a 3rd vnet in some cases
761 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
762 out_msg.Address := address;
763 out_msg.Type := CoherenceResponseType:DATA_OWNER;
764 out_msg.Sender := machineID;
765 out_msg.Destination.add(persistentTable.findSmallest(address));
766 out_msg.Tokens := in_msg.Tokens;
767 out_msg.DataBlk := in_msg.DataBlk;
768 out_msg.Dirty := in_msg.Dirty;
769 out_msg.MessageSize := in_msg.MessageSize;
775 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
776 peek(responseNetwork_in, ResponseMsg) {
777 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
781 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
782 peek(requestNetwork_in, RequestMsg) {
783 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
784 //profile_filter_action(1);
785 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
789 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
790 out_msg.Address := in_msg.Address;
791 out_msg.Requestor := in_msg.Requestor;
794 // Currently assuming only one chip so all L1s are local
796 //out_msg.Destination := getLocalL1IDs(machineID);
797 out_msg.Destination.broadcast(MachineType:L1Cache);
798 out_msg.Destination.remove(in_msg.Requestor);
800 out_msg.Type := in_msg.Type;
801 out_msg.isLocal := false;
802 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
803 out_msg.AccessMode := in_msg.AccessMode;
804 out_msg.Prefetch := in_msg.Prefetch;
806 //profile_filter_action(0);
811 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
812 peek(L1requestNetwork_in, RequestMsg) {
813 assert(is_valid(cache_entry));
814 assert(cache_entry.Tokens > 0);
815 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
816 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
817 out_msg.Address := address;
818 out_msg.Type := CoherenceResponseType:DATA_SHARED;
819 out_msg.Sender := machineID;
820 out_msg.Destination.add(in_msg.Requestor);
821 out_msg.DataBlk := cache_entry.DataBlk;
822 out_msg.Dirty := false;
823 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
826 cache_entry.Tokens := cache_entry.Tokens - 1;
830 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
831 peek(L1requestNetwork_in, RequestMsg) {
832 assert(is_valid(cache_entry));
833 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
834 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
835 out_msg.Address := address;
836 out_msg.Type := CoherenceResponseType:DATA_OWNER;
837 out_msg.Sender := machineID;
838 out_msg.Destination.add(in_msg.Requestor);
839 out_msg.DataBlk := cache_entry.DataBlk;
840 out_msg.Dirty := cache_entry.Dirty;
841 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
842 out_msg.Tokens := cache_entry.Tokens;
844 cache_entry.Tokens := 0;
848 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
849 peek(L1requestNetwork_in, RequestMsg) {
850 assert(is_valid(cache_entry));
851 // assert(cache_entry.Tokens == max_tokens());
852 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
853 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
854 out_msg.Address := address;
855 out_msg.Type := CoherenceResponseType:DATA_OWNER;
856 out_msg.Sender := machineID;
857 out_msg.Destination.add(in_msg.Requestor);
858 out_msg.DataBlk := cache_entry.DataBlk;
859 out_msg.Dirty := cache_entry.Dirty;
860 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
861 //out_msg.Tokens := max_tokens();
862 out_msg.Tokens := cache_entry.Tokens;
864 cache_entry.Tokens := 0;
868 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
869 persistentNetwork_in.dequeue();
872 action(m_popRequestQueue, "m", desc="Pop request queue.") {
873 requestNetwork_in.dequeue();
876 action(n_popResponseQueue, "n", desc="Pop response queue") {
877 responseNetwork_in.dequeue();
880 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
881 L1requestNetwork_in.dequeue();
885 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
886 peek(responseNetwork_in, ResponseMsg) {
887 assert(is_valid(cache_entry));
888 assert(in_msg.Tokens != 0);
889 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
891 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
892 // may not trigger this action.
893 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
894 cache_entry.Dirty := true;
899 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
900 peek(L1requestNetwork_in, RequestMsg) {
901 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
902 if (in_msg.Type == CoherenceRequestType:GETX) {
903 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
904 } else if (in_msg.Type == CoherenceRequestType:GETS) {
905 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
911 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
912 clearExclusiveBitIfExists(address);
915 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
916 peek(L1requestNetwork_in, RequestMsg) {
917 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
918 (is_valid(cache_entry))) {
919 L2cacheMemory.setMRU(address);
924 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
925 assert(is_valid(cache_entry));
926 if (cache_entry.Tokens > 0) {
927 peek(requestNetwork_in, RequestMsg) {
928 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
929 out_msg.Address := address;
930 out_msg.Type := CoherenceResponseType:ACK;
931 out_msg.Sender := machineID;
932 out_msg.Destination.add(in_msg.Requestor);
933 assert(cache_entry.Tokens >= 1);
934 out_msg.Tokens := cache_entry.Tokens;
935 out_msg.MessageSize := MessageSizeType:Response_Control;
939 cache_entry.Tokens := 0;
942 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
943 assert(is_valid(cache_entry));
944 if (cache_entry.Tokens > 0) {
945 peek(L1requestNetwork_in, RequestMsg) {
946 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
947 out_msg.Address := address;
948 out_msg.Type := CoherenceResponseType:ACK;
949 out_msg.Sender := machineID;
950 out_msg.Destination.add(in_msg.Requestor);
951 assert(cache_entry.Tokens >= 1);
952 out_msg.Tokens := cache_entry.Tokens;
953 out_msg.MessageSize := MessageSizeType:Response_Control;
957 cache_entry.Tokens := 0;
960 action(u_writeDataToCache, "u", desc="Write data to cache") {
961 peek(responseNetwork_in, ResponseMsg) {
962 assert(is_valid(cache_entry));
963 cache_entry.DataBlk := in_msg.DataBlk;
964 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
965 cache_entry.Dirty := in_msg.Dirty;
970 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
971 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
974 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
975 L2cacheMemory.deallocate(address);
979 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
980 peek(L1requestNetwork_in, RequestMsg) {
981 L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
988 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
989 peek(responseNetwork_in, ResponseMsg) {
990 if (in_msg.Type != CoherenceResponseType:ACK &&
991 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
992 assert(is_valid(cache_entry));
993 assert(cache_entry.DataBlk == in_msg.DataBlk);
999 //*****************************************************
1001 //*****************************************************
1003 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
1005 h_updateFilterFromL1HintOrWB;
1009 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
1010 l_popPersistentQueue;
1014 // Transitions from NP
1016 transition(NP, {Transient_GETX, Transient_GETS}) {
1017 // forward message to local sharers
1019 j_forwardTransientRequestToLocalSharers;
1024 transition(NP, {L1_GETS, L1_GETX}) {
1025 a_broadcastLocalRequest;
1028 o_popL1RequestQueue;
1031 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1036 transition(NP, Writeback_Shared_Data, S) {
1037 vv_allocateL2CacheBlock;
1039 q_updateTokensFromResponse;
1040 h_updateFilterFromL1HintOrWB;
1044 transition(NP, Writeback_Tokens, I) {
1045 vv_allocateL2CacheBlock;
1046 q_updateTokensFromResponse;
1047 h_updateFilterFromL1HintOrWB;
1051 transition(NP, Writeback_All_Tokens, M) {
1052 vv_allocateL2CacheBlock;
1054 q_updateTokensFromResponse;
1055 h_updateFilterFromL1HintOrWB;
1059 transition(NP, Writeback_Owned, O) {
1060 vv_allocateL2CacheBlock;
1062 q_updateTokensFromResponse;
1063 h_updateFilterFromL1HintOrWB;
1069 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1071 l_popPersistentQueue;
1074 // Transitions from Idle
1076 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1077 a_broadcastLocalRequest;
1078 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1081 o_popL1RequestQueue;
1084 transition(I, L1_GETX) {
1085 a_broadcastLocalRequest;
1086 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1089 o_popL1RequestQueue;
1092 transition(I, L2_Replacement) {
1093 c_cleanReplacement; // Only needed in some cases
1094 rr_deallocateL2CacheBlock;
1097 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1099 t_sendAckWithCollectedTokens;
1100 j_forwardTransientRequestToLocalSharers;
1105 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1107 e_sendAckWithCollectedTokens;
1108 l_popPersistentQueue;
1112 transition(I, Ack) {
1113 q_updateTokensFromResponse;
1117 transition(I, Data_Shared, S) {
1119 q_updateTokensFromResponse;
1123 transition(I, Writeback_Shared_Data, S) {
1125 q_updateTokensFromResponse;
1126 h_updateFilterFromL1HintOrWB;
1130 transition(I, Writeback_Tokens) {
1131 q_updateTokensFromResponse;
1132 h_updateFilterFromL1HintOrWB;
1136 transition(I, Data_Owner, O) {
1138 q_updateTokensFromResponse;
1142 transition(I, Writeback_Owned, O) {
1144 q_updateTokensFromResponse;
1145 h_updateFilterFromL1HintOrWB;
1149 transition(I, Data_All_Tokens, M) {
1151 q_updateTokensFromResponse;
1156 transition(I, Writeback_All_Tokens, M) {
1158 q_updateTokensFromResponse;
1159 h_updateFilterFromL1HintOrWB;
1163 // Transitions from Shared
1165 transition(S, L2_Replacement, I) {
1167 rr_deallocateL2CacheBlock;
1170 transition(S, Transient_GETX, I) {
1172 t_sendAckWithCollectedTokens;
1173 j_forwardTransientRequestToLocalSharers;
1177 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1178 j_forwardTransientRequestToLocalSharers;
1183 transition(S, Persistent_GETX, I_L) {
1184 e_sendAckWithCollectedTokens;
1185 l_popPersistentQueue;
1189 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1190 f_sendAckWithAllButOneTokens;
1191 l_popPersistentQueue;
1195 transition(S, Ack) {
1196 q_updateTokensFromResponse;
1200 transition(S, Data_Shared) {
1201 w_assertIncomingDataAndCacheDataMatch;
1202 q_updateTokensFromResponse;
1206 transition(S, Writeback_Tokens) {
1207 q_updateTokensFromResponse;
1208 h_updateFilterFromL1HintOrWB;
1212 transition(S, Writeback_Shared_Data) {
1213 w_assertIncomingDataAndCacheDataMatch;
1214 q_updateTokensFromResponse;
1215 h_updateFilterFromL1HintOrWB;
1220 transition(S, Data_Owner, O) {
1221 w_assertIncomingDataAndCacheDataMatch;
1222 q_updateTokensFromResponse;
1226 transition(S, Writeback_Owned, O) {
1227 w_assertIncomingDataAndCacheDataMatch;
1228 q_updateTokensFromResponse;
1229 h_updateFilterFromL1HintOrWB;
1233 transition(S, Data_All_Tokens, M) {
1234 w_assertIncomingDataAndCacheDataMatch;
1235 q_updateTokensFromResponse;
1239 transition(S, Writeback_All_Tokens, M) {
1240 w_assertIncomingDataAndCacheDataMatch;
1241 q_updateTokensFromResponse;
1242 h_updateFilterFromL1HintOrWB;
1246 transition(S, L1_GETX, I) {
1247 a_broadcastLocalRequest;
1248 tt_sendLocalAckWithCollectedTokens;
1252 o_popL1RequestQueue;
1256 transition(S, L1_GETS) {
1257 k_dataFromL2CacheToL1Requestor;
1260 o_popL1RequestQueue;
1263 transition(S, L1_GETS_Last_Token, I) {
1265 k_dataFromL2CacheToL1Requestor;
1268 o_popL1RequestQueue;
1271 // Transitions from Owned
1273 transition(O, L2_Replacement, I) {
1274 cc_dirtyReplacement;
1275 rr_deallocateL2CacheBlock;
1278 transition(O, Transient_GETX, I) {
1280 dd_sendDataWithAllTokens;
1281 j_forwardTransientRequestToLocalSharers;
1285 transition(O, Persistent_GETX, I_L) {
1286 ee_sendDataWithAllTokens;
1287 l_popPersistentQueue;
1290 transition(O, Persistent_GETS, S_L) {
1291 ff_sendDataWithAllButOneTokens;
1292 l_popPersistentQueue;
1295 transition(O, Persistent_GETS_Last_Token, I_L) {
1296 fa_sendDataWithAllTokens;
1297 l_popPersistentQueue;
1300 transition(O, Transient_GETS) {
1301 // send multiple tokens
1303 d_sendDataWithTokens;
1307 transition(O, Transient_GETS_Last_Token) {
1308 // WAIT FOR IT TO GO PERSISTENT
1313 transition(O, Ack) {
1314 q_updateTokensFromResponse;
1318 transition(O, Ack_All_Tokens, M) {
1319 q_updateTokensFromResponse;
1323 transition(O, Data_Shared) {
1324 w_assertIncomingDataAndCacheDataMatch;
1325 q_updateTokensFromResponse;
1330 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1331 w_assertIncomingDataAndCacheDataMatch;
1332 q_updateTokensFromResponse;
1333 h_updateFilterFromL1HintOrWB;
1337 transition(O, Data_All_Tokens, M) {
1338 w_assertIncomingDataAndCacheDataMatch;
1339 q_updateTokensFromResponse;
1343 transition(O, Writeback_All_Tokens, M) {
1344 w_assertIncomingDataAndCacheDataMatch;
1345 q_updateTokensFromResponse;
1346 h_updateFilterFromL1HintOrWB;
1350 transition(O, L1_GETS) {
1351 k_dataFromL2CacheToL1Requestor;
1354 o_popL1RequestQueue;
1357 transition(O, L1_GETS_Last_Token, I) {
1358 k_dataOwnerFromL2CacheToL1Requestor;
1361 o_popL1RequestQueue;
1364 transition(O, L1_GETX, I) {
1365 a_broadcastLocalRequest;
1366 k_dataAndAllTokensFromL2CacheToL1Requestor;
1370 o_popL1RequestQueue;
1373 // Transitions from M
1375 transition(M, L2_Replacement, I) {
1376 cc_dirtyReplacement;
1377 rr_deallocateL2CacheBlock;
1380 // MRM_DEBUG: Give up all tokens even for GETS? ???
1381 transition(M, {Transient_GETX, Transient_GETS}, I) {
1383 dd_sendDataWithAllTokens;
1387 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1388 ee_sendDataWithAllTokens;
1389 l_popPersistentQueue;
1393 transition(M, L1_GETS, O) {
1394 k_dataFromL2CacheToL1Requestor;
1397 o_popL1RequestQueue;
1400 transition(M, L1_GETX, I) {
1401 k_dataAndAllTokensFromL2CacheToL1Requestor;
1404 o_popL1RequestQueue;
1408 //Transitions from locked states
1410 transition({I_L, S_L}, Ack) {
1411 gg_bounceResponseToStarver;
1415 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1416 gg_bounceResponseToStarver;
1420 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1421 gg_bounceWBSharedToStarver;
1422 h_updateFilterFromL1HintOrWB;
1426 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1427 gg_bounceWBOwnedToStarver;
1428 h_updateFilterFromL1HintOrWB;
1432 transition(S_L, L2_Replacement, I) {
1434 rr_deallocateL2CacheBlock;
1437 transition(I_L, L2_Replacement, I) {
1438 rr_deallocateL2CacheBlock;
1441 transition(I_L, Own_Lock_or_Unlock, I) {
1442 l_popPersistentQueue;
1445 transition(S_L, Own_Lock_or_Unlock, S) {
1446 l_popPersistentQueue;
1449 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1454 transition(I_L, {L1_GETX, L1_GETS}) {
1455 a_broadcastLocalRequest;
1458 o_popL1RequestQueue;
1461 transition(S_L, L1_GETX, I_L) {
1462 a_broadcastLocalRequest;
1463 tt_sendLocalAckWithCollectedTokens;
1467 o_popL1RequestQueue;
1470 transition(S_L, L1_GETS) {
1471 k_dataFromL2CacheToL1Requestor;
1474 o_popL1RequestQueue;
1477 transition(S_L, L1_GETS_Last_Token, I_L) {
1478 k_dataFromL2CacheToL1Requestor;
1481 o_popL1RequestQueue;
1484 transition(S_L, Persistent_GETX, I_L) {
1485 e_sendAckWithCollectedTokens;
1486 l_popPersistentQueue;
1489 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1490 l_popPersistentQueue;
1493 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1494 l_popPersistentQueue;