3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 machine(L2Cache, "Token protocol")
36 : int l2_request_latency,
37 int l2_response_latency,
39 bool filtering_enabled
43 // From local bank of L2 cache TO the network
45 // this L2 bank -> a local L1 || mod-directory
46 MessageBuffer responseFromL2Cache, network="To", virtual_network="1", ordered="false";
47 // this L2 bank -> mod-directory
48 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="3", ordered="false";
49 // this L2 bank -> a local L1
50 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="4", ordered="false";
53 // FROM the network to this local bank of L2 cache
55 // a local L1 || mod-directory -> this L2 bank
56 MessageBuffer responseToL2Cache, network="From", virtual_network="1", ordered="false";
57 MessageBuffer persistentToL2Cache, network="From", virtual_network="2", ordered="true";
58 // mod-directory -> this L2 bank
59 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="3", ordered="false";
60 // a local L1 -> this L2 bank
61 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="4", ordered="false";
64 enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
66 NP, desc="Not Present";
68 S, desc="Shared, not present in any local L1s";
69 O, desc="Owned, not present in any L1s";
70 M, desc="Modified, not present in any L1s";
73 I_L, "I^L", desc="Invalid, Locked";
74 S_L, "S^L", desc="Shared, Locked";
78 enumeration(Event, desc="Cache events") {
81 L1_GETS, desc="local L1 GETS request";
82 L1_GETS_Last_Token, desc="local L1 GETS request";
83 L1_GETX, desc="local L1 GETX request";
84 L1_INV, desc="L1 no longer has tokens";
85 Transient_GETX, desc="A GetX from another processor";
86 Transient_GETS, desc="A GetS from another processor";
87 Transient_GETS_Last_Token, desc="A GetS from another processor";
89 // events initiated by this L2
90 L2_Replacement, desc="L2 Replacement", format="!r";
92 // events of external L2 responses
95 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
96 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
97 Writeback_All_Tokens, desc="Received a writeback from L1";
98 Writeback_Owned, desc="Received a writeback from L1";
101 Data_Shared, desc="Received a data message, we are now a sharer";
102 Data_Owner, desc="Received a data message, we are now the owner";
103 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
104 Ack, desc="Received an ack message";
105 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
108 Persistent_GETX, desc="Another processor has priority to read/write";
109 Persistent_GETS, desc="Another processor has priority to read";
110 Own_Lock_or_Unlock, desc="This processor now has priority";
116 structure(Entry, desc="...", interface="AbstractCacheEntry") {
117 State CacheState, desc="cache state";
118 bool Dirty, desc="Is the data dirty (different than memory)?";
119 int Tokens, desc="The number of tokens we're holding for the line";
120 DataBlock DataBlk, desc="data for the block";
123 structure(DirEntry, desc="...") {
124 Set Sharers, desc="Set of the internal processors that want the block in shared state";
125 bool exclusive, default="false", desc="if local exclusive is likely";
128 external_type(CacheMemory) {
129 bool cacheAvail(Address);
130 Address cacheProbe(Address);
131 void allocate(Address, Entry);
132 void deallocate(Address);
133 Entry lookup(Address);
134 void changePermission(Address, AccessPermission);
135 bool isTagPresent(Address);
136 void setMRU(Address);
139 external_type(PerfectCacheMemory) {
140 void allocate(Address);
141 void deallocate(Address);
142 DirEntry lookup(Address);
143 bool isTagPresent(Address);
146 external_type(PersistentTable) {
147 void persistentRequestLock(Address, MachineID, AccessType);
148 void persistentRequestUnlock(Address, MachineID);
149 MachineID findSmallest(Address);
150 AccessType typeOfSmallest(Address);
151 void markEntries(Address);
152 bool isLocked(Address);
153 int countStarvingForAddress(Address);
154 int countReadStarvingForAddress(Address);
157 CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
159 PersistentTable persistentTable;
160 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
162 Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
163 if (L2cacheMemory.isTagPresent(addr)) {
164 return L2cacheMemory[addr];
167 return L2cacheMemory[addr];
170 int getTokens(Address addr) {
171 if (L2cacheMemory.isTagPresent(addr)) {
172 return L2cacheMemory[addr].Tokens;
178 void changePermission(Address addr, AccessPermission permission) {
179 if (L2cacheMemory.isTagPresent(addr)) {
180 return L2cacheMemory.changePermission(addr, permission);
184 bool isCacheTagPresent(Address addr) {
185 return (L2cacheMemory.isTagPresent(addr) );
188 State getState(Address addr) {
189 if (isCacheTagPresent(addr)) {
190 return getL2CacheEntry(addr).CacheState;
191 } else if (persistentTable.isLocked(addr) == true) {
198 string getStateStr(Address addr) {
199 return L2Cache_State_to_string(getState(addr));
202 void setState(Address addr, State state) {
205 if (isCacheTagPresent(addr)) {
206 // Make sure the token count is in range
207 assert(getL2CacheEntry(addr).Tokens >= 0);
208 assert(getL2CacheEntry(addr).Tokens <= max_tokens());
210 // Make sure we have no tokens in L
211 if ((state == State:I_L) ) {
212 if (isCacheTagPresent(addr)) {
213 assert(getL2CacheEntry(addr).Tokens == 0);
217 // in M and E you have all the tokens
218 if (state == State:M ) {
219 assert(getL2CacheEntry(addr).Tokens == max_tokens());
222 // in NP you have no tokens
223 if (state == State:NP) {
224 assert(getL2CacheEntry(addr).Tokens == 0);
227 // You have at least one token in S-like states
228 if (state == State:S ) {
229 assert(getL2CacheEntry(addr).Tokens > 0);
232 // You have at least half the token in O-like states
233 if (state == State:O ) {
234 assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
235 // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
238 getL2CacheEntry(addr).CacheState := state;
241 if (state == State:I) {
242 changePermission(addr, AccessPermission:Invalid);
243 } else if (state == State:S || state == State:O ) {
244 changePermission(addr, AccessPermission:Read_Only);
245 } else if (state == State:M ) {
246 changePermission(addr, AccessPermission:Read_Write);
248 changePermission(addr, AccessPermission:Invalid);
253 void removeSharer(Address addr, NodeID id) {
255 if (localDirectory.isTagPresent(addr)) {
256 localDirectory[addr].Sharers.remove(id);
257 if (localDirectory[addr].Sharers.count() == 0) {
258 localDirectory.deallocate(addr);
263 bool sharersExist(Address addr) {
264 if (localDirectory.isTagPresent(addr)) {
265 if (localDirectory[addr].Sharers.count() > 0) {
277 bool exclusiveExists(Address addr) {
278 if (localDirectory.isTagPresent(addr)) {
279 if (localDirectory[addr].exclusive == true) {
291 // assumes that caller will check to make sure tag is present
292 Set getSharers(Address addr) {
293 return localDirectory[addr].Sharers;
296 void setNewWriter(Address addr, NodeID id) {
297 if (localDirectory.isTagPresent(addr) == false) {
298 localDirectory.allocate(addr);
300 localDirectory[addr].Sharers.clear();
301 localDirectory[addr].Sharers.add(id);
302 localDirectory[addr].exclusive := true;
305 void addNewSharer(Address addr, NodeID id) {
306 if (localDirectory.isTagPresent(addr) == false) {
307 localDirectory.allocate(addr);
309 localDirectory[addr].Sharers.add(id);
310 // localDirectory[addr].exclusive := false;
313 void clearExclusiveBitIfExists(Address addr) {
314 if (localDirectory.isTagPresent(addr) == true) {
315 localDirectory[addr].exclusive := false;
320 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
321 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
322 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
328 // Persistent Network
329 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
330 if (persistentNetwork_in.isReady()) {
331 peek(persistentNetwork_in, PersistentMsg) {
332 assert(in_msg.Destination.isElement(machineID));
334 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
335 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
336 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
337 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
338 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
339 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
341 error("Unexpected message");
344 // React to the message based on the current state of the table
345 if (persistentTable.isLocked(in_msg.Address)) {
347 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
348 trigger(Event:Persistent_GETS, in_msg.Address);
350 trigger(Event:Persistent_GETX, in_msg.Address);
354 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
362 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
363 if (requestNetwork_in.isReady()) {
364 peek(requestNetwork_in, RequestMsg) {
365 assert(in_msg.Destination.isElement(machineID));
367 if (in_msg.Type == CoherenceRequestType:GETX) {
368 trigger(Event:Transient_GETX, in_msg.Address);
369 } else if (in_msg.Type == CoherenceRequestType:GETS) {
370 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
371 trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
374 trigger(Event:Transient_GETS, in_msg.Address);
377 error("Unexpected message");
383 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
384 if (L1requestNetwork_in.isReady()) {
385 peek(L1requestNetwork_in, RequestMsg) {
386 assert(in_msg.Destination.isElement(machineID));
387 if (in_msg.Type == CoherenceRequestType:GETX) {
388 trigger(Event:L1_GETX, in_msg.Address);
389 } else if (in_msg.Type == CoherenceRequestType:GETS) {
390 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
391 trigger(Event:L1_GETS_Last_Token, in_msg.Address);
394 trigger(Event:L1_GETS, in_msg.Address);
397 error("Unexpected message");
405 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
406 if (responseNetwork_in.isReady()) {
407 peek(responseNetwork_in, ResponseMsg) {
408 assert(in_msg.Destination.isElement(machineID));
409 if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
410 if (in_msg.Type == CoherenceResponseType:ACK) {
411 trigger(Event:Ack, in_msg.Address);
412 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
413 trigger(Event:Data_Owner, in_msg.Address);
414 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
415 trigger(Event:Data_Shared, in_msg.Address);
416 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
418 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
420 // either room is available or the block is already present
422 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
423 assert(in_msg.Dirty == false);
424 trigger(Event:Writeback_Tokens, in_msg.Address);
425 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
426 assert(in_msg.Dirty == false);
427 trigger(Event:Writeback_Shared_Data, in_msg.Address);
429 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
430 //assert(in_msg.Dirty == false);
431 trigger(Event:Writeback_Owned, in_msg.Address);
435 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
437 } else if (in_msg.Type == CoherenceResponseType:INV) {
438 trigger(Event:L1_INV, in_msg.Address);
440 error("Unexpected message");
443 if (in_msg.Type == CoherenceResponseType:ACK) {
444 trigger(Event:Ack_All_Tokens, in_msg.Address);
445 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
446 trigger(Event:Data_All_Tokens, in_msg.Address);
447 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
448 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
450 // either room is available or the block is already present
452 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
453 assert(in_msg.Dirty == false);
454 assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
455 trigger(Event:Writeback_All_Tokens, in_msg.Address);
456 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
457 assert(in_msg.Dirty == false);
458 trigger(Event:Writeback_All_Tokens, in_msg.Address);
460 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
461 trigger(Event:Writeback_All_Tokens, in_msg.Address);
465 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
467 } else if (in_msg.Type == CoherenceResponseType:INV) {
468 trigger(Event:L1_INV, in_msg.Address);
470 DEBUG_EXPR(in_msg.Type);
471 error("Unexpected message");
481 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
483 peek(L1requestNetwork_in, RequestMsg) {
485 // if this is a retry or no local sharers, broadcast normally
487 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
488 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
489 out_msg.Address := in_msg.Address;
490 out_msg.Type := in_msg.Type;
491 out_msg.Requestor := in_msg.Requestor;
492 out_msg.RequestorMachine := in_msg.RequestorMachine;
493 out_msg.RetryNum := in_msg.RetryNum;
496 // If a statically shared L2 cache, then no other L2 caches can
499 //out_msg.Destination.broadcast(MachineType:L2Cache);
500 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
501 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
503 out_msg.Destination.add(map_Address_to_Directory(address));
504 out_msg.MessageSize := MessageSizeType:Request_Control;
505 out_msg.AccessMode := in_msg.AccessMode;
506 out_msg.Prefetch := in_msg.Prefetch;
510 //profile_filter_action(0);
515 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
516 peek(responseNetwork_in, ResponseMsg) {
517 // FIXME, should use a 3rd vnet
518 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
519 out_msg.Address := address;
520 out_msg.Type := in_msg.Type;
521 out_msg.Sender := machineID;
522 out_msg.SenderMachine := MachineType:L2Cache;
523 out_msg.Destination.add(map_Address_to_Directory(address));
524 out_msg.Tokens := in_msg.Tokens;
525 out_msg.MessageSize := in_msg.MessageSize;
526 out_msg.DataBlk := in_msg.DataBlk;
527 out_msg.Dirty := in_msg.Dirty;
532 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
533 if (getL2CacheEntry(address).Tokens > 0) {
534 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
535 out_msg.Address := address;
536 out_msg.Type := CoherenceResponseType:ACK;
537 out_msg.Sender := machineID;
538 out_msg.SenderMachine := MachineType:L2Cache;
539 out_msg.Destination.add(map_Address_to_Directory(address));
540 out_msg.Tokens := getL2CacheEntry(address).Tokens;
541 out_msg.MessageSize := MessageSizeType:Writeback_Control;
543 getL2CacheEntry(address).Tokens := 0;
547 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
548 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
549 out_msg.Address := address;
550 out_msg.Sender := machineID;
551 out_msg.SenderMachine := MachineType:L2Cache;
552 out_msg.Destination.add(map_Address_to_Directory(address));
553 out_msg.Tokens := getL2CacheEntry(address).Tokens;
554 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
555 out_msg.Dirty := getL2CacheEntry(address).Dirty;
557 if (getL2CacheEntry(address).Dirty) {
558 out_msg.MessageSize := MessageSizeType:Writeback_Data;
559 out_msg.Type := CoherenceResponseType:DATA_OWNER;
561 out_msg.MessageSize := MessageSizeType:Writeback_Control;
562 out_msg.Type := CoherenceResponseType:ACK_OWNER;
565 getL2CacheEntry(address).Tokens := 0;
568 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
569 peek(requestNetwork_in, RequestMsg) {
570 if (getL2CacheEntry(address).Tokens > N_tokens) {
571 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
572 out_msg.Address := address;
573 out_msg.Type := CoherenceResponseType:DATA_SHARED;
574 out_msg.Sender := machineID;
575 out_msg.SenderMachine := MachineType:L2Cache;
576 out_msg.Destination.add(in_msg.Requestor);
577 out_msg.Tokens := N_tokens;
578 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
579 out_msg.Dirty := false;
580 out_msg.MessageSize := MessageSizeType:Response_Data;
582 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
585 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
586 out_msg.Address := address;
587 out_msg.Type := CoherenceResponseType:DATA_SHARED;
588 out_msg.Sender := machineID;
589 out_msg.SenderMachine := MachineType:L2Cache;
590 out_msg.Destination.add(in_msg.Requestor);
592 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
593 out_msg.Dirty := false;
594 out_msg.MessageSize := MessageSizeType:Response_Data;
596 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
601 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
602 peek(requestNetwork_in, RequestMsg) {
603 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
604 out_msg.Address := address;
605 out_msg.Type := CoherenceResponseType:DATA_OWNER;
606 out_msg.Sender := machineID;
607 out_msg.SenderMachine := MachineType:L2Cache;
608 out_msg.Destination.add(in_msg.Requestor);
609 assert(getL2CacheEntry(address).Tokens >= 1);
610 out_msg.Tokens := getL2CacheEntry(address).Tokens;
611 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
612 out_msg.Dirty := getL2CacheEntry(address).Dirty;
613 out_msg.MessageSize := MessageSizeType:Response_Data;
616 getL2CacheEntry(address).Tokens := 0;
619 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
620 if (getL2CacheEntry(address).Tokens > 0) {
621 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
622 out_msg.Address := address;
623 out_msg.Type := CoherenceResponseType:ACK;
624 out_msg.Sender := machineID;
625 out_msg.SenderMachine := MachineType:L2Cache;
626 out_msg.Destination.add(persistentTable.findSmallest(address));
627 assert(getL2CacheEntry(address).Tokens >= 1);
628 out_msg.Tokens := getL2CacheEntry(address).Tokens;
629 out_msg.MessageSize := MessageSizeType:Response_Control;
632 getL2CacheEntry(address).Tokens := 0;
635 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
636 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
637 out_msg.Address := address;
638 out_msg.Type := CoherenceResponseType:DATA_OWNER;
639 out_msg.Sender := machineID;
640 out_msg.SenderMachine := MachineType:L2Cache;
641 out_msg.Destination.add(persistentTable.findSmallest(address));
642 assert(getL2CacheEntry(address).Tokens >= 1);
643 out_msg.Tokens := getL2CacheEntry(address).Tokens;
644 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
645 out_msg.Dirty := getL2CacheEntry(address).Dirty;
646 out_msg.MessageSize := MessageSizeType:Response_Data;
648 getL2CacheEntry(address).Tokens := 0;
651 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
652 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
653 assert(getL2CacheEntry(address).Tokens > 0);
654 if (getL2CacheEntry(address).Tokens > 1) {
655 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
656 out_msg.Address := address;
657 out_msg.Type := CoherenceResponseType:ACK;
658 out_msg.Sender := machineID;
659 out_msg.SenderMachine := MachineType:L2Cache;
660 out_msg.Destination.add(persistentTable.findSmallest(address));
661 assert(getL2CacheEntry(address).Tokens >= 1);
662 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
663 out_msg.MessageSize := MessageSizeType:Response_Control;
666 getL2CacheEntry(address).Tokens := 1;
669 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
670 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
671 assert(getL2CacheEntry(address).Tokens > 0);
672 if (getL2CacheEntry(address).Tokens > 1) {
673 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
674 out_msg.Address := address;
675 out_msg.Type := CoherenceResponseType:DATA_OWNER;
676 out_msg.Sender := machineID;
677 out_msg.SenderMachine := MachineType:L2Cache;
678 out_msg.Destination.add(persistentTable.findSmallest(address));
679 assert(getL2CacheEntry(address).Tokens >= 1);
680 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
681 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
682 out_msg.Dirty := getL2CacheEntry(address).Dirty;
683 out_msg.MessageSize := MessageSizeType:Response_Data;
685 getL2CacheEntry(address).Tokens := 1;
691 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
692 // assert(persistentTable.isLocked(address));
693 peek(responseNetwork_in, ResponseMsg) {
694 // FIXME, should use a 3rd vnet in some cases
695 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
696 out_msg.Address := address;
697 out_msg.Type := in_msg.Type;
698 out_msg.Sender := machineID;
699 out_msg.SenderMachine := MachineType:L2Cache;
700 out_msg.Destination.add(persistentTable.findSmallest(address));
701 out_msg.Tokens := in_msg.Tokens;
702 out_msg.DataBlk := in_msg.DataBlk;
703 out_msg.Dirty := in_msg.Dirty;
704 out_msg.MessageSize := in_msg.MessageSize;
709 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
710 //assert(persistentTable.isLocked(address));
711 peek(responseNetwork_in, ResponseMsg) {
712 // FIXME, should use a 3rd vnet in some cases
713 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
714 out_msg.Address := address;
715 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
716 out_msg.Type := CoherenceResponseType:DATA_SHARED;
718 out_msg.Type := CoherenceResponseType:ACK;
720 out_msg.Sender := machineID;
721 out_msg.SenderMachine := MachineType:L2Cache;
722 out_msg.Destination.add(persistentTable.findSmallest(address));
723 out_msg.Tokens := in_msg.Tokens;
724 out_msg.DataBlk := in_msg.DataBlk;
725 out_msg.Dirty := in_msg.Dirty;
726 out_msg.MessageSize := in_msg.MessageSize;
731 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
732 // assert(persistentTable.isLocked(address));
733 peek(responseNetwork_in, ResponseMsg) {
734 // FIXME, should use a 3rd vnet in some cases
735 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
736 out_msg.Address := address;
737 out_msg.Type := CoherenceResponseType:DATA_OWNER;
738 out_msg.Sender := machineID;
739 out_msg.SenderMachine := MachineType:L2Cache;
740 out_msg.Destination.add(persistentTable.findSmallest(address));
741 out_msg.Tokens := in_msg.Tokens;
742 out_msg.DataBlk := in_msg.DataBlk;
743 out_msg.Dirty := in_msg.Dirty;
744 out_msg.MessageSize := in_msg.MessageSize;
750 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
751 peek(responseNetwork_in, ResponseMsg) {
752 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
756 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
757 peek(requestNetwork_in, RequestMsg) {
758 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
759 //profile_filter_action(1);
760 DEBUG_EXPR("filtered message");
761 DEBUG_EXPR(in_msg.RetryNum);
764 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
765 out_msg.Address := in_msg.Address;
766 out_msg.Requestor := in_msg.Requestor;
767 out_msg.RequestorMachine := in_msg.RequestorMachine;
770 // Currently assuming only one chip so all L1s are local
772 //out_msg.Destination := getLocalL1IDs(machineID);
773 out_msg.Destination.broadcast(MachineType:L1Cache);
774 out_msg.Destination.remove(in_msg.Requestor);
776 out_msg.Type := in_msg.Type;
777 out_msg.isLocal := false;
778 out_msg.MessageSize := MessageSizeType:Request_Control;
779 out_msg.AccessMode := in_msg.AccessMode;
780 out_msg.Prefetch := in_msg.Prefetch;
782 //profile_filter_action(0);
788 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
789 peek(L1requestNetwork_in, RequestMsg) {
790 assert(getL2CacheEntry(address).Tokens > 0);
791 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
792 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
793 out_msg.Address := address;
794 out_msg.Type := CoherenceResponseType:DATA_SHARED;
795 out_msg.Sender := machineID;
796 out_msg.SenderMachine := MachineType:L2Cache;
797 out_msg.Destination.add(in_msg.Requestor);
798 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
799 out_msg.Dirty := false;
800 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
803 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
807 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
808 peek(L1requestNetwork_in, RequestMsg) {
809 assert(getL2CacheEntry(address).Tokens > 0);
810 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
811 out_msg.Address := address;
812 out_msg.Type := CoherenceResponseType:DATA_OWNER;
813 out_msg.Sender := machineID;
814 out_msg.SenderMachine := MachineType:L2Cache;
815 out_msg.Destination.add(in_msg.Requestor);
816 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
817 out_msg.Dirty := getL2CacheEntry(address).Dirty;
818 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
821 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
825 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
826 peek(L1requestNetwork_in, RequestMsg) {
827 // assert(getL2CacheEntry(address).Tokens == max_tokens());
828 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
829 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
830 out_msg.Address := address;
831 out_msg.Type := CoherenceResponseType:DATA_OWNER;
832 out_msg.Sender := machineID;
833 out_msg.SenderMachine := MachineType:L2Cache;
834 out_msg.Destination.add(in_msg.Requestor);
835 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
836 out_msg.Dirty := getL2CacheEntry(address).Dirty;
837 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
838 //out_msg.Tokens := max_tokens();
839 out_msg.Tokens := getL2CacheEntry(address).Tokens;
841 getL2CacheEntry(address).Tokens := 0;
845 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
846 persistentNetwork_in.dequeue();
849 action(m_popRequestQueue, "m", desc="Pop request queue.") {
850 requestNetwork_in.dequeue();
853 action(n_popResponseQueue, "n", desc="Pop response queue") {
854 responseNetwork_in.dequeue();
857 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
858 L1requestNetwork_in.dequeue();
862 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
863 peek(responseNetwork_in, ResponseMsg) {
864 assert(in_msg.Tokens != 0);
865 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
867 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
868 // may not trigger this action.
869 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
870 getL2CacheEntry(address).Dirty := true;
875 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
876 peek(L1requestNetwork_in, RequestMsg) {
877 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
878 if (in_msg.Type == CoherenceRequestType:GETX) {
879 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
880 } else if (in_msg.Type == CoherenceRequestType:GETS) {
881 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
887 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
888 clearExclusiveBitIfExists(address);
891 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
892 peek(L1requestNetwork_in, RequestMsg) {
893 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
894 (isCacheTagPresent(address))) {
895 L2cacheMemory.setMRU(address);
900 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
901 if (getL2CacheEntry(address).Tokens > 0) {
902 peek(requestNetwork_in, RequestMsg) {
903 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
904 out_msg.Address := address;
905 out_msg.Type := CoherenceResponseType:ACK;
906 out_msg.Sender := machineID;
907 out_msg.SenderMachine := MachineType:L2Cache;
908 out_msg.Destination.add(in_msg.Requestor);
909 assert(getL2CacheEntry(address).Tokens >= 1);
910 out_msg.Tokens := getL2CacheEntry(address).Tokens;
911 out_msg.MessageSize := MessageSizeType:Response_Control;
915 getL2CacheEntry(address).Tokens := 0;
918 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
919 if (getL2CacheEntry(address).Tokens > 0) {
920 peek(L1requestNetwork_in, RequestMsg) {
921 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
922 out_msg.Address := address;
923 out_msg.Type := CoherenceResponseType:ACK;
924 out_msg.Sender := machineID;
925 out_msg.SenderMachine := MachineType:L2Cache;
926 out_msg.Destination.add(in_msg.Requestor);
927 assert(getL2CacheEntry(address).Tokens >= 1);
928 out_msg.Tokens := getL2CacheEntry(address).Tokens;
929 out_msg.MessageSize := MessageSizeType:Response_Control;
933 getL2CacheEntry(address).Tokens := 0;
936 action(u_writeDataToCache, "u", desc="Write data to cache") {
937 peek(responseNetwork_in, ResponseMsg) {
938 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
939 if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
940 getL2CacheEntry(address).Dirty := in_msg.Dirty;
945 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
946 L2cacheMemory.allocate(address, new Entry);
949 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
950 L2cacheMemory.deallocate(address);
953 //action(uu_profileMiss, "\u", desc="Profile the demand miss") {
954 // peek(L1requestNetwork_in, RequestMsg) {
955 // AccessModeType not implemented
956 //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
961 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
962 peek(responseNetwork_in, ResponseMsg) {
963 assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
968 //*****************************************************
970 //*****************************************************
972 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
974 h_updateFilterFromL1HintOrWB;
978 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
979 l_popPersistentQueue;
983 // Transitions from NP
985 transition(NP, {Transient_GETX, Transient_GETS}) {
986 // forward message to local sharers
988 j_forwardTransientRequestToLocalSharers;
993 transition(NP, {L1_GETS, L1_GETX}) {
994 a_broadcastLocalRequest;
1000 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1005 transition(NP, Writeback_Shared_Data, S) {
1006 vv_allocateL2CacheBlock;
1008 q_updateTokensFromResponse;
1009 h_updateFilterFromL1HintOrWB;
1013 transition(NP, Writeback_Tokens, I) {
1014 vv_allocateL2CacheBlock;
1015 q_updateTokensFromResponse;
1016 h_updateFilterFromL1HintOrWB;
1020 transition(NP, Writeback_All_Tokens, M) {
1021 vv_allocateL2CacheBlock;
1023 q_updateTokensFromResponse;
1024 h_updateFilterFromL1HintOrWB;
1028 transition(NP, Writeback_Owned, O) {
1029 vv_allocateL2CacheBlock;
1031 q_updateTokensFromResponse;
1032 h_updateFilterFromL1HintOrWB;
1037 transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
1038 l_popPersistentQueue;
1041 // Transitions from Idle
1043 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1044 a_broadcastLocalRequest;
1045 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1048 o_popL1RequestQueue;
1051 transition(I, L1_GETX) {
1052 a_broadcastLocalRequest;
1053 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1056 o_popL1RequestQueue;
1059 transition(I, L2_Replacement) {
1060 c_cleanReplacement; // Only needed in some cases
1061 rr_deallocateL2CacheBlock;
1064 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1066 t_sendAckWithCollectedTokens;
1067 j_forwardTransientRequestToLocalSharers;
1071 transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
1072 e_sendAckWithCollectedTokens;
1073 l_popPersistentQueue;
1077 transition(I, Ack) {
1078 q_updateTokensFromResponse;
1082 transition(I, Data_Shared, S) {
1084 q_updateTokensFromResponse;
1088 transition(I, Writeback_Shared_Data, S) {
1090 q_updateTokensFromResponse;
1091 h_updateFilterFromL1HintOrWB;
1095 transition(I, Writeback_Tokens) {
1096 q_updateTokensFromResponse;
1097 h_updateFilterFromL1HintOrWB;
1101 transition(I, Data_Owner, O) {
1103 q_updateTokensFromResponse;
1107 transition(I, Writeback_Owned, O) {
1109 q_updateTokensFromResponse;
1110 h_updateFilterFromL1HintOrWB;
1114 transition(I, Data_All_Tokens, M) {
1116 q_updateTokensFromResponse;
1121 transition(I, Writeback_All_Tokens, M) {
1123 q_updateTokensFromResponse;
1124 h_updateFilterFromL1HintOrWB;
1128 // Transitions from Shared
1130 transition(S, L2_Replacement, I) {
1132 rr_deallocateL2CacheBlock;
1135 transition(S, Transient_GETX, I) {
1137 t_sendAckWithCollectedTokens;
1138 j_forwardTransientRequestToLocalSharers;
1142 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1143 j_forwardTransientRequestToLocalSharers;
1148 transition(S, Persistent_GETX, I_L) {
1149 e_sendAckWithCollectedTokens;
1150 l_popPersistentQueue;
1154 transition(S, Persistent_GETS, S_L) {
1155 f_sendAckWithAllButOneTokens;
1156 l_popPersistentQueue;
1160 transition(S, Ack) {
1161 q_updateTokensFromResponse;
1165 transition(S, Data_Shared) {
1166 w_assertIncomingDataAndCacheDataMatch;
1167 q_updateTokensFromResponse;
1171 transition(S, Writeback_Tokens) {
1172 q_updateTokensFromResponse;
1173 h_updateFilterFromL1HintOrWB;
1177 transition(S, Writeback_Shared_Data) {
1178 w_assertIncomingDataAndCacheDataMatch;
1179 q_updateTokensFromResponse;
1180 h_updateFilterFromL1HintOrWB;
1185 transition(S, Data_Owner, O) {
1186 w_assertIncomingDataAndCacheDataMatch;
1187 q_updateTokensFromResponse;
1191 transition(S, Writeback_Owned, O) {
1192 w_assertIncomingDataAndCacheDataMatch;
1193 q_updateTokensFromResponse;
1194 h_updateFilterFromL1HintOrWB;
1198 transition(S, Data_All_Tokens, M) {
1199 w_assertIncomingDataAndCacheDataMatch;
1200 q_updateTokensFromResponse;
1204 transition(S, Writeback_All_Tokens, M) {
1205 w_assertIncomingDataAndCacheDataMatch;
1206 q_updateTokensFromResponse;
1207 h_updateFilterFromL1HintOrWB;
1211 transition(S, L1_GETX, I) {
1212 a_broadcastLocalRequest;
1213 tt_sendLocalAckWithCollectedTokens;
1217 o_popL1RequestQueue;
1221 transition(S, L1_GETS) {
1222 k_dataFromL2CacheToL1Requestor;
1225 o_popL1RequestQueue;
1228 transition(S, L1_GETS_Last_Token, I) {
1230 k_dataFromL2CacheToL1Requestor;
1233 o_popL1RequestQueue;
1236 // Transitions from Owned
1238 transition(O, L2_Replacement, I) {
1239 cc_dirtyReplacement;
1240 rr_deallocateL2CacheBlock;
1243 transition(O, Transient_GETX, I) {
1245 dd_sendDataWithAllTokens;
1246 j_forwardTransientRequestToLocalSharers;
1250 transition(O, Persistent_GETX, I_L) {
1251 ee_sendDataWithAllTokens;
1252 l_popPersistentQueue;
1255 transition(O, Persistent_GETS, S_L) {
1256 ff_sendDataWithAllButOneTokens;
1257 l_popPersistentQueue;
1260 transition(O, Transient_GETS) {
1261 // send multiple tokens
1263 d_sendDataWithTokens;
1267 transition(O, Transient_GETS_Last_Token) {
1268 // WAIT FOR IT TO GO PERSISTENT
1273 transition(O, Ack) {
1274 q_updateTokensFromResponse;
1278 transition(O, Ack_All_Tokens, M) {
1279 q_updateTokensFromResponse;
1283 transition(O, Data_Shared) {
1284 w_assertIncomingDataAndCacheDataMatch;
1285 q_updateTokensFromResponse;
1290 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1291 w_assertIncomingDataAndCacheDataMatch;
1292 q_updateTokensFromResponse;
1293 h_updateFilterFromL1HintOrWB;
1297 transition(O, Data_All_Tokens, M) {
1298 w_assertIncomingDataAndCacheDataMatch;
1299 q_updateTokensFromResponse;
1303 transition(O, Writeback_All_Tokens, M) {
1304 w_assertIncomingDataAndCacheDataMatch;
1305 q_updateTokensFromResponse;
1306 h_updateFilterFromL1HintOrWB;
1310 transition(O, L1_GETS) {
1311 k_dataFromL2CacheToL1Requestor;
1314 o_popL1RequestQueue;
1317 transition(O, L1_GETS_Last_Token, I) {
1318 k_dataOwnerFromL2CacheToL1Requestor;
1321 o_popL1RequestQueue;
1324 transition(O, L1_GETX, I) {
1325 a_broadcastLocalRequest;
1326 k_dataAndAllTokensFromL2CacheToL1Requestor;
1330 o_popL1RequestQueue;
1333 // Transitions from M
1335 transition(M, L2_Replacement, I) {
1336 cc_dirtyReplacement;
1337 rr_deallocateL2CacheBlock;
1340 // MRM_DEBUG: Give up all tokens even for GETS? ???
1341 transition(M, {Transient_GETX, Transient_GETS}, I) {
1343 dd_sendDataWithAllTokens;
1347 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1348 ee_sendDataWithAllTokens;
1349 l_popPersistentQueue;
1353 transition(M, L1_GETS, O) {
1354 k_dataFromL2CacheToL1Requestor;
1357 o_popL1RequestQueue;
1360 transition(M, L1_GETX, I) {
1361 k_dataAndAllTokensFromL2CacheToL1Requestor;
1364 o_popL1RequestQueue;
1368 //Transitions from locked states
1370 transition({I_L, S_L}, Ack) {
1371 gg_bounceResponseToStarver;
1375 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1376 gg_bounceResponseToStarver;
1380 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1381 gg_bounceWBSharedToStarver;
1382 h_updateFilterFromL1HintOrWB;
1386 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1387 gg_bounceWBOwnedToStarver;
1388 h_updateFilterFromL1HintOrWB;
1392 transition(S_L, L2_Replacement, I) {
1394 rr_deallocateL2CacheBlock;
1397 transition(I_L, L2_Replacement, I) {
1398 rr_deallocateL2CacheBlock;
1401 transition(I_L, Own_Lock_or_Unlock, I) {
1402 l_popPersistentQueue;
1405 transition(S_L, Own_Lock_or_Unlock, S) {
1406 l_popPersistentQueue;
1409 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1414 transition(I_L, {L1_GETX, L1_GETS}) {
1415 a_broadcastLocalRequest;
1418 o_popL1RequestQueue;
1421 transition(S_L, L1_GETX, I_L) {
1422 a_broadcastLocalRequest;
1423 tt_sendLocalAckWithCollectedTokens;
1427 o_popL1RequestQueue;
1430 transition(S_L, L1_GETS) {
1431 k_dataFromL2CacheToL1Requestor;
1434 o_popL1RequestQueue;
1437 transition(S_L, L1_GETS_Last_Token, I_L) {
1438 k_dataFromL2CacheToL1Requestor;
1441 o_popL1RequestQueue;
1444 transition(S_L, Persistent_GETX, I_L) {
1445 e_sendAckWithCollectedTokens;
1446 l_popPersistentQueue;
1449 transition(S_L, Persistent_GETS) {
1450 l_popPersistentQueue;
1453 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1454 l_popPersistentQueue;