2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 machine(MachineType:L2Cache, "Token protocol")
30 : CacheMemory * L2cache;
32 Cycles l2_request_latency := 5;
33 Cycles l2_response_latency := 5;
34 bool filtering_enabled := "True";
37 // From local bank of L2 cache TO the network
39 // this L2 bank -> a local L1 || mod-directory
40 MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
42 // this L2 bank -> mod-directory
43 MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
45 // this L2 bank -> a local L1
46 MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
50 // FROM the network to this local bank of L2 cache
52 // a local L1 || mod-directory -> this L2 bank
53 MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
55 MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
56 vnet_type="persistent";
57 // mod-directory -> this L2 bank
58 MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
60 // a local L1 -> this L2 bank
61 MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
66 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
68 NP, AccessPermission:Invalid, desc="Not Present";
69 I, AccessPermission:Invalid, desc="Idle";
70 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
71 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
72 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
75 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
76 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
80 enumeration(Event, desc="Cache events") {
83 L1_GETS, desc="local L1 GETS request";
84 L1_GETS_Last_Token, desc="local L1 GETS request";
85 L1_GETX, desc="local L1 GETX request";
86 L1_INV, desc="L1 no longer has tokens";
87 Transient_GETX, desc="A GetX from another processor";
88 Transient_GETS, desc="A GetS from another processor";
89 Transient_GETS_Last_Token, desc="A GetS from another processor";
91 // events initiated by this L2
92 L2_Replacement, desc="L2 Replacement", format="!r";
94 // events of external L2 responses
97 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
98 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
99 Writeback_All_Tokens, desc="Received a writeback from L1";
100 Writeback_Owned, desc="Received a writeback from L1";
103 Data_Shared, desc="Received a data message, we are now a sharer";
104 Data_Owner, desc="Received a data message, we are now the owner";
105 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
106 Ack, desc="Received an ack message";
107 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
110 Persistent_GETX, desc="Another processor has priority to read/write";
111 Persistent_GETS, desc="Another processor has priority to read";
112 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
113 Own_Lock_or_Unlock, desc="This processor now has priority";
119 structure(Entry, desc="...", interface="AbstractCacheEntry") {
120 State CacheState, desc="cache state";
121 bool Dirty, desc="Is the data dirty (different than memory)?";
122 int Tokens, desc="The number of tokens we're holding for the line";
123 DataBlock DataBlk, desc="data for the block";
126 structure(DirEntry, desc="...", interface="AbstractEntry") {
127 Set Sharers, desc="Set of the internal processors that want the block in shared state";
128 bool exclusive, default="false", desc="if local exclusive is likely";
131 structure(PerfectCacheMemory, external="yes") {
133 void deallocate(Addr);
134 DirEntry lookup(Addr);
135 bool isTagPresent(Addr);
138 structure(PersistentTable, external="yes") {
139 void persistentRequestLock(Addr, MachineID, AccessType);
140 void persistentRequestUnlock(Addr, MachineID);
141 MachineID findSmallest(Addr);
142 AccessType typeOfSmallest(Addr);
143 void markEntries(Addr);
145 int countStarvingForAddress(Addr);
146 int countReadStarvingForAddress(Addr);
149 PersistentTable persistentTable;
150 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
153 void set_cache_entry(AbstractCacheEntry b);
154 void unset_cache_entry();
155 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
157 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
158 Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
162 DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
163 return localDirectory.lookup(address);
166 void functionalRead(Addr addr, Packet *pkt) {
167 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
170 int functionalWrite(Addr addr, Packet *pkt) {
171 int num_functional_writes := 0;
172 num_functional_writes := num_functional_writes +
173 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
174 return num_functional_writes;
177 int getTokens(Entry cache_entry) {
178 if (is_valid(cache_entry)) {
179 return cache_entry.Tokens;
185 State getState(Entry cache_entry, Addr addr) {
186 if (is_valid(cache_entry)) {
187 return cache_entry.CacheState;
188 } else if (persistentTable.isLocked(addr)) {
195 void setState(Entry cache_entry, Addr addr, State state) {
197 if (is_valid(cache_entry)) {
198 // Make sure the token count is in range
199 assert(cache_entry.Tokens >= 0);
200 assert(cache_entry.Tokens <= max_tokens());
201 assert(cache_entry.Tokens != (max_tokens() / 2));
203 // Make sure we have no tokens in L
204 if ((state == State:I_L) ) {
205 assert(cache_entry.Tokens == 0);
208 // in M and E you have all the tokens
209 if (state == State:M ) {
210 assert(cache_entry.Tokens == max_tokens());
213 // in NP you have no tokens
214 if (state == State:NP) {
215 assert(cache_entry.Tokens == 0);
218 // You have at least one token in S-like states
219 if (state == State:S ) {
220 assert(cache_entry.Tokens > 0);
223 // You have at least half the token in O-like states
224 if (state == State:O ) {
225 assert(cache_entry.Tokens > (max_tokens() / 2));
228 cache_entry.CacheState := state;
232 AccessPermission getAccessPermission(Addr addr) {
233 Entry cache_entry := getCacheEntry(addr);
234 if(is_valid(cache_entry)) {
235 return L2Cache_State_to_permission(cache_entry.CacheState);
238 return AccessPermission:NotPresent;
241 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
242 if (is_valid(cache_entry)) {
243 cache_entry.changePermission(L2Cache_State_to_permission(state));
247 void removeSharer(Addr addr, NodeID id) {
249 if (localDirectory.isTagPresent(addr)) {
250 DirEntry dir_entry := getDirEntry(addr);
251 dir_entry.Sharers.remove(id);
252 if (dir_entry.Sharers.count() == 0) {
253 localDirectory.deallocate(addr);
258 bool sharersExist(Addr addr) {
259 if (localDirectory.isTagPresent(addr)) {
260 DirEntry dir_entry := getDirEntry(addr);
261 if (dir_entry.Sharers.count() > 0) {
273 bool exclusiveExists(Addr addr) {
274 if (localDirectory.isTagPresent(addr)) {
275 DirEntry dir_entry := getDirEntry(addr);
276 if (dir_entry.exclusive) {
288 // assumes that caller will check to make sure tag is present
289 Set getSharers(Addr addr) {
290 DirEntry dir_entry := getDirEntry(addr);
291 return dir_entry.Sharers;
294 void setNewWriter(Addr addr, NodeID id) {
295 if (localDirectory.isTagPresent(addr) == false) {
296 localDirectory.allocate(addr);
298 DirEntry dir_entry := getDirEntry(addr);
299 dir_entry.Sharers.clear();
300 dir_entry.Sharers.add(id);
301 dir_entry.exclusive := true;
304 void addNewSharer(Addr addr, NodeID id) {
305 if (localDirectory.isTagPresent(addr) == false) {
306 localDirectory.allocate(addr);
308 DirEntry dir_entry := getDirEntry(addr);
309 dir_entry.Sharers.add(id);
310 // dir_entry.exclusive := false;
313 void clearExclusiveBitIfExists(Addr addr) {
314 if (localDirectory.isTagPresent(addr)) {
315 DirEntry dir_entry := getDirEntry(addr);
316 dir_entry.exclusive := false;
321 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
322 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
323 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
329 // Persistent Network
330 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
331 if (persistentNetwork_in.isReady(clockEdge())) {
332 peek(persistentNetwork_in, PersistentMsg) {
333 assert(in_msg.Destination.isElement(machineID));
335 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
336 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
337 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
338 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
339 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
340 persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
342 error("Unexpected message");
345 Entry cache_entry := getCacheEntry(in_msg.addr);
346 // React to the message based on the current state of the table
347 if (persistentTable.isLocked(in_msg.addr)) {
349 if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
350 if (getTokens(cache_entry) == 1 ||
351 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
352 trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
355 trigger(Event:Persistent_GETS, in_msg.addr, cache_entry);
358 trigger(Event:Persistent_GETX, in_msg.addr, cache_entry);
362 trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry);
370 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
371 if (requestNetwork_in.isReady(clockEdge())) {
372 peek(requestNetwork_in, RequestMsg) {
373 assert(in_msg.Destination.isElement(machineID));
375 Entry cache_entry := getCacheEntry(in_msg.addr);
376 if (in_msg.Type == CoherenceRequestType:GETX) {
377 trigger(Event:Transient_GETX, in_msg.addr, cache_entry);
378 } else if (in_msg.Type == CoherenceRequestType:GETS) {
379 if (getTokens(cache_entry) == 1) {
380 trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
384 trigger(Event:Transient_GETS, in_msg.addr, cache_entry);
387 error("Unexpected message");
393 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
394 if (L1requestNetwork_in.isReady(clockEdge())) {
395 peek(L1requestNetwork_in, RequestMsg) {
396 assert(in_msg.Destination.isElement(machineID));
397 Entry cache_entry := getCacheEntry(in_msg.addr);
398 if (in_msg.Type == CoherenceRequestType:GETX) {
399 trigger(Event:L1_GETX, in_msg.addr, cache_entry);
400 } else if (in_msg.Type == CoherenceRequestType:GETS) {
401 if (getTokens(cache_entry) == 1 ||
402 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
403 trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry);
406 trigger(Event:L1_GETS, in_msg.addr, cache_entry);
409 error("Unexpected message");
417 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
418 if (responseNetwork_in.isReady(clockEdge())) {
419 peek(responseNetwork_in, ResponseMsg) {
420 assert(in_msg.Destination.isElement(machineID));
421 Entry cache_entry := getCacheEntry(in_msg.addr);
423 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
424 if (in_msg.Type == CoherenceResponseType:ACK) {
425 assert(in_msg.Tokens < (max_tokens() / 2));
426 trigger(Event:Ack, in_msg.addr, cache_entry);
427 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
428 trigger(Event:Data_Owner, in_msg.addr, cache_entry);
429 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
430 trigger(Event:Data_Shared, in_msg.addr, cache_entry);
431 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
432 in_msg.Type == CoherenceResponseType:WB_OWNED ||
433 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
435 if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
437 // either room is available or the block is already present
439 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
440 assert(in_msg.Dirty == false);
441 trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry);
442 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
443 assert(in_msg.Dirty == false);
444 trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry);
446 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
447 //assert(in_msg.Dirty == false);
448 trigger(Event:Writeback_Owned, in_msg.addr, cache_entry);
452 trigger(Event:L2_Replacement,
453 L2cache.cacheProbe(in_msg.addr),
454 getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
456 } else if (in_msg.Type == CoherenceResponseType:INV) {
457 trigger(Event:L1_INV, in_msg.addr, cache_entry);
459 error("Unexpected message");
462 if (in_msg.Type == CoherenceResponseType:ACK) {
463 assert(in_msg.Tokens < (max_tokens() / 2));
464 trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry);
465 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
466 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
467 trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry);
468 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
469 in_msg.Type == CoherenceResponseType:WB_OWNED ||
470 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
471 if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
473 // either room is available or the block is already present
475 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
476 assert(in_msg.Dirty == false);
477 assert( (getState(cache_entry, in_msg.addr) != State:NP)
478 && (getState(cache_entry, in_msg.addr) != State:I) );
479 trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
480 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
481 assert(in_msg.Dirty == false);
482 trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
484 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
485 trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
489 trigger(Event:L2_Replacement,
490 L2cache.cacheProbe(in_msg.addr),
491 getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
493 } else if (in_msg.Type == CoherenceResponseType:INV) {
494 trigger(Event:L1_INV, in_msg.addr, cache_entry);
496 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
497 error("Unexpected message");
507 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
509 peek(L1requestNetwork_in, RequestMsg) {
511 // if this is a retry or no local sharers, broadcast normally
512 enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
513 out_msg.addr := in_msg.addr;
514 out_msg.Type := in_msg.Type;
515 out_msg.Requestor := in_msg.Requestor;
516 out_msg.RetryNum := in_msg.RetryNum;
519 // If a statically shared L2 cache, then no other L2 caches can
522 //out_msg.Destination.broadcast(MachineType:L2Cache);
523 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
524 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
526 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
527 out_msg.MessageSize := MessageSizeType:Request_Control;
528 out_msg.AccessMode := in_msg.AccessMode;
529 out_msg.Prefetch := in_msg.Prefetch;
533 //profile_filter_action(0);
538 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
539 peek(responseNetwork_in, ResponseMsg) {
540 // FIXME, should use a 3rd vnet
541 enqueue(responseNetwork_out, ResponseMsg, 1) {
542 out_msg.addr := address;
543 out_msg.Type := in_msg.Type;
544 out_msg.Sender := machineID;
545 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
546 out_msg.Tokens := in_msg.Tokens;
547 out_msg.MessageSize := in_msg.MessageSize;
548 out_msg.DataBlk := in_msg.DataBlk;
549 out_msg.Dirty := in_msg.Dirty;
554 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
555 assert(is_valid(cache_entry));
556 if (cache_entry.Tokens > 0) {
557 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
558 out_msg.addr := address;
559 out_msg.Type := CoherenceResponseType:ACK;
560 out_msg.Sender := machineID;
561 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
562 out_msg.Tokens := cache_entry.Tokens;
563 out_msg.MessageSize := MessageSizeType:Writeback_Control;
565 cache_entry.Tokens := 0;
569 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
570 assert(is_valid(cache_entry));
571 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
572 out_msg.addr := address;
573 out_msg.Sender := machineID;
574 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
575 out_msg.Tokens := cache_entry.Tokens;
576 out_msg.DataBlk := cache_entry.DataBlk;
577 out_msg.Dirty := cache_entry.Dirty;
579 if (cache_entry.Dirty) {
580 out_msg.MessageSize := MessageSizeType:Writeback_Data;
581 out_msg.Type := CoherenceResponseType:DATA_OWNER;
583 out_msg.MessageSize := MessageSizeType:Writeback_Control;
584 out_msg.Type := CoherenceResponseType:ACK_OWNER;
587 cache_entry.Tokens := 0;
590 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
591 peek(requestNetwork_in, RequestMsg) {
592 assert(is_valid(cache_entry));
593 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
594 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
595 out_msg.addr := address;
596 out_msg.Type := CoherenceResponseType:DATA_SHARED;
597 out_msg.Sender := machineID;
598 out_msg.Destination.add(in_msg.Requestor);
599 out_msg.Tokens := N_tokens;
600 out_msg.DataBlk := cache_entry.DataBlk;
601 out_msg.Dirty := false;
602 out_msg.MessageSize := MessageSizeType:Response_Data;
604 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
607 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
608 out_msg.addr := address;
609 out_msg.Type := CoherenceResponseType:DATA_SHARED;
610 out_msg.Sender := machineID;
611 out_msg.Destination.add(in_msg.Requestor);
613 out_msg.DataBlk := cache_entry.DataBlk;
614 out_msg.Dirty := false;
615 out_msg.MessageSize := MessageSizeType:Response_Data;
617 cache_entry.Tokens := cache_entry.Tokens - 1;
622 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
623 assert(is_valid(cache_entry));
624 peek(requestNetwork_in, RequestMsg) {
625 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
626 out_msg.addr := address;
627 out_msg.Type := CoherenceResponseType:DATA_OWNER;
628 out_msg.Sender := machineID;
629 out_msg.Destination.add(in_msg.Requestor);
630 assert(cache_entry.Tokens >= 1);
631 out_msg.Tokens := cache_entry.Tokens;
632 out_msg.DataBlk := cache_entry.DataBlk;
633 out_msg.Dirty := cache_entry.Dirty;
634 out_msg.MessageSize := MessageSizeType:Response_Data;
637 cache_entry.Tokens := 0;
640 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
641 assert(is_valid(cache_entry));
642 if (cache_entry.Tokens > 0) {
643 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
644 out_msg.addr := address;
645 out_msg.Type := CoherenceResponseType:ACK;
646 out_msg.Sender := machineID;
647 out_msg.Destination.add(persistentTable.findSmallest(address));
648 assert(cache_entry.Tokens >= 1);
649 out_msg.Tokens := cache_entry.Tokens;
650 out_msg.MessageSize := MessageSizeType:Response_Control;
653 cache_entry.Tokens := 0;
656 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
657 assert(is_valid(cache_entry));
658 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
659 out_msg.addr := address;
660 out_msg.Type := CoherenceResponseType:DATA_OWNER;
661 out_msg.Sender := machineID;
662 out_msg.Destination.add(persistentTable.findSmallest(address));
663 assert(cache_entry.Tokens >= 1);
664 out_msg.Tokens := cache_entry.Tokens;
665 out_msg.DataBlk := cache_entry.DataBlk;
666 out_msg.Dirty := cache_entry.Dirty;
667 out_msg.MessageSize := MessageSizeType:Response_Data;
669 cache_entry.Tokens := 0;
672 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
673 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
674 assert(is_valid(cache_entry));
675 assert(cache_entry.Tokens > 0);
676 if (cache_entry.Tokens > 1) {
677 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
678 out_msg.addr := address;
679 out_msg.Type := CoherenceResponseType:ACK;
680 out_msg.Sender := machineID;
681 out_msg.Destination.add(persistentTable.findSmallest(address));
682 assert(cache_entry.Tokens >= 1);
683 out_msg.Tokens := cache_entry.Tokens - 1;
684 out_msg.MessageSize := MessageSizeType:Response_Control;
687 cache_entry.Tokens := 1;
690 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
691 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
692 assert(is_valid(cache_entry));
693 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
694 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
695 out_msg.addr := address;
696 out_msg.Type := CoherenceResponseType:DATA_OWNER;
697 out_msg.Sender := machineID;
698 out_msg.Destination.add(persistentTable.findSmallest(address));
699 out_msg.Tokens := cache_entry.Tokens - 1;
700 out_msg.DataBlk := cache_entry.DataBlk;
701 out_msg.Dirty := cache_entry.Dirty;
702 out_msg.MessageSize := MessageSizeType:Response_Data;
704 cache_entry.Tokens := 1;
707 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
708 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
709 assert(is_valid(cache_entry));
710 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
711 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
712 out_msg.addr := address;
713 out_msg.Type := CoherenceResponseType:DATA_OWNER;
714 out_msg.Sender := machineID;
715 out_msg.Destination.add(persistentTable.findSmallest(address));
716 out_msg.Tokens := cache_entry.Tokens;
717 out_msg.DataBlk := cache_entry.DataBlk;
718 out_msg.Dirty := cache_entry.Dirty;
719 out_msg.MessageSize := MessageSizeType:Response_Data;
721 cache_entry.Tokens := 0;
726 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
727 // assert(persistentTable.isLocked(address));
728 peek(responseNetwork_in, ResponseMsg) {
729 // FIXME, should use a 3rd vnet in some cases
730 enqueue(responseNetwork_out, ResponseMsg, 1) {
731 out_msg.addr := address;
732 out_msg.Type := in_msg.Type;
733 out_msg.Sender := machineID;
734 out_msg.Destination.add(persistentTable.findSmallest(address));
735 out_msg.Tokens := in_msg.Tokens;
736 out_msg.DataBlk := in_msg.DataBlk;
737 out_msg.Dirty := in_msg.Dirty;
738 out_msg.MessageSize := in_msg.MessageSize;
743 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
744 //assert(persistentTable.isLocked(address));
745 peek(responseNetwork_in, ResponseMsg) {
746 // FIXME, should use a 3rd vnet in some cases
747 enqueue(responseNetwork_out, ResponseMsg, 1) {
748 out_msg.addr := address;
749 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
750 out_msg.Type := CoherenceResponseType:DATA_SHARED;
752 assert(in_msg.Tokens < (max_tokens() / 2));
753 out_msg.Type := CoherenceResponseType:ACK;
755 out_msg.Sender := machineID;
756 out_msg.Destination.add(persistentTable.findSmallest(address));
757 out_msg.Tokens := in_msg.Tokens;
758 out_msg.DataBlk := in_msg.DataBlk;
759 out_msg.Dirty := in_msg.Dirty;
760 out_msg.MessageSize := in_msg.MessageSize;
765 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
766 // assert(persistentTable.isLocked(address));
767 peek(responseNetwork_in, ResponseMsg) {
768 // FIXME, should use a 3rd vnet in some cases
769 enqueue(responseNetwork_out, ResponseMsg, 1) {
770 out_msg.addr := address;
771 out_msg.Type := CoherenceResponseType:DATA_OWNER;
772 out_msg.Sender := machineID;
773 out_msg.Destination.add(persistentTable.findSmallest(address));
774 out_msg.Tokens := in_msg.Tokens;
775 out_msg.DataBlk := in_msg.DataBlk;
776 out_msg.Dirty := in_msg.Dirty;
777 out_msg.MessageSize := in_msg.MessageSize;
783 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
784 peek(responseNetwork_in, ResponseMsg) {
785 removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender));
789 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
790 peek(requestNetwork_in, RequestMsg) {
791 if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) {
792 //profile_filter_action(1);
793 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
797 enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
798 out_msg.addr := in_msg.addr;
799 out_msg.Requestor := in_msg.Requestor;
802 // Currently assuming only one chip so all L1s are local
804 //out_msg.Destination := getLocalL1IDs(machineID);
805 out_msg.Destination.broadcast(MachineType:L1Cache);
806 out_msg.Destination.remove(in_msg.Requestor);
808 out_msg.Type := in_msg.Type;
809 out_msg.isLocal := false;
810 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
811 out_msg.AccessMode := in_msg.AccessMode;
812 out_msg.Prefetch := in_msg.Prefetch;
814 //profile_filter_action(0);
819 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
820 peek(L1requestNetwork_in, RequestMsg) {
821 assert(is_valid(cache_entry));
822 assert(cache_entry.Tokens > 0);
823 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
824 out_msg.addr := address;
825 out_msg.Type := CoherenceResponseType:DATA_SHARED;
826 out_msg.Sender := machineID;
827 out_msg.Destination.add(in_msg.Requestor);
828 out_msg.DataBlk := cache_entry.DataBlk;
829 out_msg.Dirty := false;
830 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
833 cache_entry.Tokens := cache_entry.Tokens - 1;
837 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
838 peek(L1requestNetwork_in, RequestMsg) {
839 assert(is_valid(cache_entry));
840 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
841 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
842 out_msg.addr := address;
843 out_msg.Type := CoherenceResponseType:DATA_OWNER;
844 out_msg.Sender := machineID;
845 out_msg.Destination.add(in_msg.Requestor);
846 out_msg.DataBlk := cache_entry.DataBlk;
847 out_msg.Dirty := cache_entry.Dirty;
848 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
849 out_msg.Tokens := cache_entry.Tokens;
851 cache_entry.Tokens := 0;
855 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
856 peek(L1requestNetwork_in, RequestMsg) {
857 assert(is_valid(cache_entry));
858 // assert(cache_entry.Tokens == max_tokens());
859 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
860 out_msg.addr := address;
861 out_msg.Type := CoherenceResponseType:DATA_OWNER;
862 out_msg.Sender := machineID;
863 out_msg.Destination.add(in_msg.Requestor);
864 out_msg.DataBlk := cache_entry.DataBlk;
865 out_msg.Dirty := cache_entry.Dirty;
866 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
867 //out_msg.Tokens := max_tokens();
868 out_msg.Tokens := cache_entry.Tokens;
870 cache_entry.Tokens := 0;
874 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
875 persistentNetwork_in.dequeue(clockEdge());
878 action(m_popRequestQueue, "m", desc="Pop request queue.") {
879 requestNetwork_in.dequeue(clockEdge());
882 action(n_popResponseQueue, "n", desc="Pop response queue") {
883 responseNetwork_in.dequeue(clockEdge());
886 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
887 L1requestNetwork_in.dequeue(clockEdge());
891 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
892 peek(responseNetwork_in, ResponseMsg) {
893 assert(is_valid(cache_entry));
894 assert(in_msg.Tokens != 0);
895 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
897 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
898 // may not trigger this action.
899 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
900 cache_entry.Dirty := true;
905 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
906 peek(L1requestNetwork_in, RequestMsg) {
907 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
908 if (in_msg.Type == CoherenceRequestType:GETX) {
909 setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
910 } else if (in_msg.Type == CoherenceRequestType:GETS) {
911 addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
917 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
918 clearExclusiveBitIfExists(address);
921 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
922 peek(L1requestNetwork_in, RequestMsg) {
923 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
924 (is_valid(cache_entry))) {
925 L2cache.setMRU(address);
930 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
931 assert(is_valid(cache_entry));
932 if (cache_entry.Tokens > 0) {
933 peek(requestNetwork_in, RequestMsg) {
934 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
935 out_msg.addr := address;
936 out_msg.Type := CoherenceResponseType:ACK;
937 out_msg.Sender := machineID;
938 out_msg.Destination.add(in_msg.Requestor);
939 assert(cache_entry.Tokens >= 1);
940 out_msg.Tokens := cache_entry.Tokens;
941 out_msg.MessageSize := MessageSizeType:Response_Control;
945 cache_entry.Tokens := 0;
948 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
949 assert(is_valid(cache_entry));
950 if (cache_entry.Tokens > 0) {
951 peek(L1requestNetwork_in, RequestMsg) {
952 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
953 out_msg.addr := address;
954 out_msg.Type := CoherenceResponseType:ACK;
955 out_msg.Sender := machineID;
956 out_msg.Destination.add(in_msg.Requestor);
957 assert(cache_entry.Tokens >= 1);
958 out_msg.Tokens := cache_entry.Tokens;
959 out_msg.MessageSize := MessageSizeType:Response_Control;
963 cache_entry.Tokens := 0;
966 action(u_writeDataToCache, "u", desc="Write data to cache") {
967 peek(responseNetwork_in, ResponseMsg) {
968 assert(is_valid(cache_entry));
969 cache_entry.DataBlk := in_msg.DataBlk;
970 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
971 cache_entry.Dirty := in_msg.Dirty;
976 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
977 set_cache_entry(L2cache.allocate(address, new Entry));
980 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
981 L2cache.deallocate(address);
985 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
986 ++L2cache.demand_misses;
989 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
990 ++L2cache.demand_hits;
993 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
994 peek(responseNetwork_in, ResponseMsg) {
995 if (in_msg.Type != CoherenceResponseType:ACK &&
996 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
997 assert(is_valid(cache_entry));
998 assert(cache_entry.DataBlk == in_msg.DataBlk);
1004 //*****************************************************
1006 //*****************************************************
1008 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
1010 h_updateFilterFromL1HintOrWB;
1014 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
1015 l_popPersistentQueue;
1019 // Transitions from NP
1021 transition(NP, {Transient_GETX, Transient_GETS}) {
1022 // forward message to local sharers
1024 j_forwardTransientRequestToLocalSharers;
1029 transition(NP, {L1_GETS, L1_GETX}) {
1030 a_broadcastLocalRequest;
1033 o_popL1RequestQueue;
1036 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1041 transition(NP, Writeback_Shared_Data, S) {
1042 vv_allocateL2CacheBlock;
1044 q_updateTokensFromResponse;
1045 h_updateFilterFromL1HintOrWB;
1049 transition(NP, Writeback_Tokens, I) {
1050 vv_allocateL2CacheBlock;
1051 q_updateTokensFromResponse;
1052 h_updateFilterFromL1HintOrWB;
1056 transition(NP, Writeback_All_Tokens, M) {
1057 vv_allocateL2CacheBlock;
1059 q_updateTokensFromResponse;
1060 h_updateFilterFromL1HintOrWB;
1064 transition(NP, Writeback_Owned, O) {
1065 vv_allocateL2CacheBlock;
1067 q_updateTokensFromResponse;
1068 h_updateFilterFromL1HintOrWB;
1074 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1076 l_popPersistentQueue;
1079 // Transitions from Idle
1081 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1082 a_broadcastLocalRequest;
1083 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1086 o_popL1RequestQueue;
1089 transition(I, L1_GETX) {
1090 a_broadcastLocalRequest;
1091 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1094 o_popL1RequestQueue;
1097 transition(I, L2_Replacement) {
1098 c_cleanReplacement; // Only needed in some cases
1099 rr_deallocateL2CacheBlock;
1102 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1104 t_sendAckWithCollectedTokens;
1105 j_forwardTransientRequestToLocalSharers;
1110 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1112 e_sendAckWithCollectedTokens;
1113 l_popPersistentQueue;
1117 transition(I, Ack) {
1118 q_updateTokensFromResponse;
1122 transition(I, Data_Shared, S) {
1124 q_updateTokensFromResponse;
1128 transition(I, Writeback_Shared_Data, S) {
1130 q_updateTokensFromResponse;
1131 h_updateFilterFromL1HintOrWB;
1135 transition(I, Writeback_Tokens) {
1136 q_updateTokensFromResponse;
1137 h_updateFilterFromL1HintOrWB;
1141 transition(I, Data_Owner, O) {
1143 q_updateTokensFromResponse;
1147 transition(I, Writeback_Owned, O) {
1149 q_updateTokensFromResponse;
1150 h_updateFilterFromL1HintOrWB;
1154 transition(I, Data_All_Tokens, M) {
1156 q_updateTokensFromResponse;
1161 transition(I, Writeback_All_Tokens, M) {
1163 q_updateTokensFromResponse;
1164 h_updateFilterFromL1HintOrWB;
1168 // Transitions from Shared
1170 transition(S, L2_Replacement, I) {
1172 rr_deallocateL2CacheBlock;
1175 transition(S, Transient_GETX, I) {
1177 t_sendAckWithCollectedTokens;
1178 j_forwardTransientRequestToLocalSharers;
1182 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1183 j_forwardTransientRequestToLocalSharers;
1188 transition(S, Persistent_GETX, I_L) {
1189 e_sendAckWithCollectedTokens;
1190 l_popPersistentQueue;
1194 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1195 f_sendAckWithAllButOneTokens;
1196 l_popPersistentQueue;
1200 transition(S, Ack) {
1201 q_updateTokensFromResponse;
1205 transition(S, Data_Shared) {
1206 w_assertIncomingDataAndCacheDataMatch;
1207 q_updateTokensFromResponse;
1211 transition(S, Writeback_Tokens) {
1212 q_updateTokensFromResponse;
1213 h_updateFilterFromL1HintOrWB;
1217 transition(S, Writeback_Shared_Data) {
1218 w_assertIncomingDataAndCacheDataMatch;
1219 q_updateTokensFromResponse;
1220 h_updateFilterFromL1HintOrWB;
1225 transition(S, Data_Owner, O) {
1226 w_assertIncomingDataAndCacheDataMatch;
1227 q_updateTokensFromResponse;
1231 transition(S, Writeback_Owned, O) {
1232 w_assertIncomingDataAndCacheDataMatch;
1233 q_updateTokensFromResponse;
1234 h_updateFilterFromL1HintOrWB;
1238 transition(S, Data_All_Tokens, M) {
1239 w_assertIncomingDataAndCacheDataMatch;
1240 q_updateTokensFromResponse;
1244 transition(S, Writeback_All_Tokens, M) {
1245 w_assertIncomingDataAndCacheDataMatch;
1246 q_updateTokensFromResponse;
1247 h_updateFilterFromL1HintOrWB;
1251 transition(S, L1_GETX, I) {
1252 a_broadcastLocalRequest;
1253 tt_sendLocalAckWithCollectedTokens;
1257 o_popL1RequestQueue;
1261 transition(S, L1_GETS) {
1262 k_dataFromL2CacheToL1Requestor;
1266 o_popL1RequestQueue;
1269 transition(S, L1_GETS_Last_Token, I) {
1271 k_dataFromL2CacheToL1Requestor;
1275 o_popL1RequestQueue;
1278 // Transitions from Owned
1280 transition(O, L2_Replacement, I) {
1281 cc_dirtyReplacement;
1282 rr_deallocateL2CacheBlock;
1285 transition(O, Transient_GETX, I) {
1287 dd_sendDataWithAllTokens;
1288 j_forwardTransientRequestToLocalSharers;
1292 transition(O, Persistent_GETX, I_L) {
1293 ee_sendDataWithAllTokens;
1294 l_popPersistentQueue;
1297 transition(O, Persistent_GETS, S_L) {
1298 ff_sendDataWithAllButOneTokens;
1299 l_popPersistentQueue;
1302 transition(O, Persistent_GETS_Last_Token, I_L) {
1303 fa_sendDataWithAllTokens;
1304 l_popPersistentQueue;
1307 transition(O, Transient_GETS) {
1308 // send multiple tokens
1310 d_sendDataWithTokens;
1314 transition(O, Transient_GETS_Last_Token) {
1315 // WAIT FOR IT TO GO PERSISTENT
1320 transition(O, Ack) {
1321 q_updateTokensFromResponse;
1325 transition(O, Ack_All_Tokens, M) {
1326 q_updateTokensFromResponse;
1330 transition(O, Data_Shared) {
1331 w_assertIncomingDataAndCacheDataMatch;
1332 q_updateTokensFromResponse;
1337 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1338 w_assertIncomingDataAndCacheDataMatch;
1339 q_updateTokensFromResponse;
1340 h_updateFilterFromL1HintOrWB;
1344 transition(O, Data_All_Tokens, M) {
1345 w_assertIncomingDataAndCacheDataMatch;
1346 q_updateTokensFromResponse;
1350 transition(O, Writeback_All_Tokens, M) {
1351 w_assertIncomingDataAndCacheDataMatch;
1352 q_updateTokensFromResponse;
1353 h_updateFilterFromL1HintOrWB;
1357 transition(O, L1_GETS) {
1358 k_dataFromL2CacheToL1Requestor;
1362 o_popL1RequestQueue;
1365 transition(O, L1_GETS_Last_Token, I) {
1366 k_dataOwnerFromL2CacheToL1Requestor;
1370 o_popL1RequestQueue;
1373 transition(O, L1_GETX, I) {
1374 a_broadcastLocalRequest;
1375 k_dataAndAllTokensFromL2CacheToL1Requestor;
1379 o_popL1RequestQueue;
1382 // Transitions from M
1384 transition(M, L2_Replacement, I) {
1385 cc_dirtyReplacement;
1386 rr_deallocateL2CacheBlock;
1389 // MRM_DEBUG: Give up all tokens even for GETS? ???
1390 transition(M, {Transient_GETX, Transient_GETS}, I) {
1392 dd_sendDataWithAllTokens;
1396 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1397 ee_sendDataWithAllTokens;
1398 l_popPersistentQueue;
1402 transition(M, L1_GETS, O) {
1403 k_dataFromL2CacheToL1Requestor;
1407 o_popL1RequestQueue;
1410 transition(M, L1_GETX, I) {
1411 k_dataAndAllTokensFromL2CacheToL1Requestor;
1415 o_popL1RequestQueue;
1419 //Transitions from locked states
1421 transition({I_L, S_L}, Ack) {
1422 gg_bounceResponseToStarver;
1426 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1427 gg_bounceResponseToStarver;
1431 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1432 gg_bounceWBSharedToStarver;
1433 h_updateFilterFromL1HintOrWB;
1437 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1438 gg_bounceWBOwnedToStarver;
1439 h_updateFilterFromL1HintOrWB;
1443 transition(S_L, L2_Replacement, I) {
1445 rr_deallocateL2CacheBlock;
1448 transition(I_L, L2_Replacement, I) {
1449 rr_deallocateL2CacheBlock;
1452 transition(I_L, Own_Lock_or_Unlock, I) {
1453 l_popPersistentQueue;
1456 transition(S_L, Own_Lock_or_Unlock, S) {
1457 l_popPersistentQueue;
1460 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1465 transition(I_L, {L1_GETX, L1_GETS}) {
1466 a_broadcastLocalRequest;
1469 o_popL1RequestQueue;
1472 transition(S_L, L1_GETX, I_L) {
1473 a_broadcastLocalRequest;
1474 tt_sendLocalAckWithCollectedTokens;
1478 o_popL1RequestQueue;
1481 transition(S_L, L1_GETS) {
1482 k_dataFromL2CacheToL1Requestor;
1486 o_popL1RequestQueue;
1489 transition(S_L, L1_GETS_Last_Token, I_L) {
1490 k_dataFromL2CacheToL1Requestor;
1494 o_popL1RequestQueue;
1497 transition(S_L, Persistent_GETX, I_L) {
1498 e_sendAckWithCollectedTokens;
1499 l_popPersistentQueue;
1502 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1503 l_popPersistentQueue;
1506 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1507 l_popPersistentQueue;