3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 machine(L2Cache, "Token protocol") {
38 // From local bank of L2 cache TO the network
39 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> a local L1
40 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> mod-directory
41 MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> a local L1 || mod-directory
44 // FROM the network to this local bank of L2 cache
45 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
46 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false"; // mod-directory -> this L2 bank
47 MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false"; // a local L1 || mod-directory -> this L2 bank
48 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
51 enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
53 NP, desc="Not Present";
55 S, desc="Shared, not present in any local L1s";
56 O, desc="Owned, not present in any L1s";
57 M, desc="Modified, not present in any L1s";
60 I_L, "I^L", desc="Invalid, Locked";
61 S_L, "S^L", desc="Shared, Locked";
65 enumeration(Event, desc="Cache events") {
68 L1_GETS, desc="local L1 GETS request";
69 L1_GETS_Last_Token, desc="local L1 GETS request";
70 L1_GETX, desc="local L1 GETX request";
71 L1_INV, desc="L1 no longer has tokens";
72 Transient_GETX, desc="A GetX from another processor";
73 Transient_GETS, desc="A GetS from another processor";
74 Transient_GETS_Last_Token, desc="A GetS from another processor";
76 // events initiated by this L2
77 L2_Replacement, desc="L2 Replacement", format="!r";
79 // events of external L2 responses
82 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
83 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
84 Writeback_All_Tokens, desc="Received a writeback from L1";
85 Writeback_Owned, desc="Received a writeback from L1";
88 Data_Shared, desc="Received a data message, we are now a sharer";
89 Data_Owner, desc="Received a data message, we are now the owner";
90 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
91 Ack, desc="Received an ack message";
92 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
95 Persistent_GETX, desc="Another processor has priority to read/write";
96 Persistent_GETS, desc="Another processor has priority to read";
97 Own_Lock_or_Unlock, desc="This processor now has priority";
103 structure(Entry, desc="...", interface="AbstractCacheEntry") {
104 State CacheState, desc="cache state";
105 bool Dirty, desc="Is the data dirty (different than memory)?";
106 int Tokens, desc="The number of tokens we're holding for the line";
107 DataBlock DataBlk, desc="data for the block";
112 structure(DirEntry, desc="...") {
113 Set Sharers, desc="Set of the internal processors that want the block in shared state";
114 bool exclusive, default="false", desc="if local exclusive is likely";
117 external_type(CacheMemory) {
118 bool cacheAvail(Address);
119 Address cacheProbe(Address);
120 void allocate(Address);
121 void deallocate(Address);
122 Entry lookup(Address);
123 void changePermission(Address, AccessPermission);
124 bool isTagPresent(Address);
125 void setMRU(Address);
128 external_type(PerfectCacheMemory) {
129 void allocate(Address);
130 void deallocate(Address);
131 DirEntry lookup(Address);
132 bool isTagPresent(Address);
136 CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)+"_L2"';
138 PersistentTable persistentTable, constructor_hack="i";
139 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
142 bool getFilteringEnabled();
144 Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
145 if (L2cacheMemory.isTagPresent(addr)) {
146 return L2cacheMemory[addr];
150 int getTokens(Address addr) {
151 if (L2cacheMemory.isTagPresent(addr)) {
152 return L2cacheMemory[addr].Tokens;
158 void changePermission(Address addr, AccessPermission permission) {
159 if (L2cacheMemory.isTagPresent(addr)) {
160 return L2cacheMemory.changePermission(addr, permission);
164 bool isCacheTagPresent(Address addr) {
165 return (L2cacheMemory.isTagPresent(addr) );
168 State getState(Address addr) {
169 if (isCacheTagPresent(addr)) {
170 return getL2CacheEntry(addr).CacheState;
171 } else if (persistentTable.isLocked(addr) == true) {
178 string getStateStr(Address addr) {
179 return L2Cache_State_to_string(getState(addr));
182 void setState(Address addr, State state) {
185 if (isCacheTagPresent(addr)) {
186 // Make sure the token count is in range
187 assert(getL2CacheEntry(addr).Tokens >= 0);
188 assert(getL2CacheEntry(addr).Tokens <= max_tokens());
190 // Make sure we have no tokens in L
191 if ((state == State:I_L) ) {
192 if (isCacheTagPresent(addr)) {
193 assert(getL2CacheEntry(addr).Tokens == 0);
197 // in M and E you have all the tokens
198 if (state == State:M ) {
199 assert(getL2CacheEntry(addr).Tokens == max_tokens());
202 // in NP you have no tokens
203 if (state == State:NP) {
204 assert(getL2CacheEntry(addr).Tokens == 0);
207 // You have at least one token in S-like states
208 if (state == State:S ) {
209 assert(getL2CacheEntry(addr).Tokens > 0);
212 // You have at least half the token in O-like states
213 if (state == State:O ) {
214 assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
215 // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
218 getL2CacheEntry(addr).CacheState := state;
221 if (state == State:I) {
222 changePermission(addr, AccessPermission:Invalid);
223 } else if (state == State:S || state == State:O ) {
224 changePermission(addr, AccessPermission:Read_Only);
225 } else if (state == State:M ) {
226 changePermission(addr, AccessPermission:Read_Write);
228 changePermission(addr, AccessPermission:Invalid);
233 void removeSharer(Address addr, NodeID id) {
235 if (localDirectory.isTagPresent(addr)) {
236 localDirectory[addr].Sharers.remove(id);
237 if (localDirectory[addr].Sharers.count() == 0) {
238 localDirectory.deallocate(addr);
243 bool sharersExist(Address addr) {
244 if (localDirectory.isTagPresent(addr)) {
245 if (localDirectory[addr].Sharers.count() > 0) {
257 bool exclusiveExists(Address addr) {
258 if (localDirectory.isTagPresent(addr)) {
259 if (localDirectory[addr].exclusive == true) {
271 // assumes that caller will check to make sure tag is present
272 Set getSharers(Address addr) {
273 return localDirectory[addr].Sharers;
276 void setNewWriter(Address addr, NodeID id) {
277 if (localDirectory.isTagPresent(addr) == false) {
278 localDirectory.allocate(addr);
280 localDirectory[addr].Sharers.clear();
281 localDirectory[addr].Sharers.add(id);
282 localDirectory[addr].exclusive := true;
285 void addNewSharer(Address addr, NodeID id) {
286 if (localDirectory.isTagPresent(addr) == false) {
287 localDirectory.allocate(addr);
289 localDirectory[addr].Sharers.add(id);
290 // localDirectory[addr].exclusive := false;
293 void clearExclusiveBitIfExists(Address addr) {
294 if (localDirectory.isTagPresent(addr) == true) {
295 localDirectory[addr].exclusive := false;
300 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
301 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
302 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
308 // Persistent Network
309 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
310 if (persistentNetwork_in.isReady()) {
311 peek(persistentNetwork_in, PersistentMsg) {
312 assert(in_msg.Destination.isElement(machineID));
314 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
315 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
316 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
317 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
318 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
319 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
321 error("Unexpected message");
324 // React to the message based on the current state of the table
325 if (persistentTable.isLocked(in_msg.Address)) {
327 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
328 trigger(Event:Persistent_GETS, in_msg.Address);
330 trigger(Event:Persistent_GETX, in_msg.Address);
334 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
342 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
343 if (requestNetwork_in.isReady()) {
344 peek(requestNetwork_in, RequestMsg) {
345 assert(in_msg.Destination.isElement(machineID));
347 if (in_msg.Type == CoherenceRequestType:GETX) {
348 trigger(Event:Transient_GETX, in_msg.Address);
349 } else if (in_msg.Type == CoherenceRequestType:GETS) {
350 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
351 trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
354 trigger(Event:Transient_GETS, in_msg.Address);
357 error("Unexpected message");
363 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
364 if (L1requestNetwork_in.isReady()) {
365 peek(L1requestNetwork_in, RequestMsg) {
366 assert(in_msg.Destination.isElement(machineID));
367 if (in_msg.Type == CoherenceRequestType:GETX) {
368 trigger(Event:L1_GETX, in_msg.Address);
369 } else if (in_msg.Type == CoherenceRequestType:GETS) {
370 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
371 trigger(Event:L1_GETS_Last_Token, in_msg.Address);
374 trigger(Event:L1_GETS, in_msg.Address);
377 error("Unexpected message");
385 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
386 if (responseNetwork_in.isReady()) {
387 peek(responseNetwork_in, ResponseMsg) {
388 assert(in_msg.Destination.isElement(machineID));
389 if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
390 if (in_msg.Type == CoherenceResponseType:ACK) {
391 trigger(Event:Ack, in_msg.Address);
392 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
393 trigger(Event:Data_Owner, in_msg.Address);
394 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
395 trigger(Event:Data_Shared, in_msg.Address);
396 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
398 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
400 // either room is available or the block is already present
402 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
403 assert(in_msg.Dirty == false);
404 trigger(Event:Writeback_Tokens, in_msg.Address);
405 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
406 assert(in_msg.Dirty == false);
407 trigger(Event:Writeback_Shared_Data, in_msg.Address);
409 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
410 //assert(in_msg.Dirty == false);
411 trigger(Event:Writeback_Owned, in_msg.Address);
415 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
417 } else if (in_msg.Type == CoherenceResponseType:INV) {
418 trigger(Event:L1_INV, in_msg.Address);
420 error("Unexpected message");
423 if (in_msg.Type == CoherenceResponseType:ACK) {
424 trigger(Event:Ack_All_Tokens, in_msg.Address);
425 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
426 trigger(Event:Data_All_Tokens, in_msg.Address);
427 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
428 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
430 // either room is available or the block is already present
432 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
433 assert(in_msg.Dirty == false);
434 assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
435 trigger(Event:Writeback_All_Tokens, in_msg.Address);
436 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
437 assert(in_msg.Dirty == false);
438 trigger(Event:Writeback_All_Tokens, in_msg.Address);
440 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
441 trigger(Event:Writeback_All_Tokens, in_msg.Address);
445 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
447 } else if (in_msg.Type == CoherenceResponseType:INV) {
448 trigger(Event:L1_INV, in_msg.Address);
450 DEBUG_EXPR(in_msg.Type);
451 error("Unexpected message");
461 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
463 peek(L1requestNetwork_in, RequestMsg) {
465 // if this is a retry or no local sharers, broadcast normally
467 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
468 enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
469 out_msg.Address := in_msg.Address;
470 out_msg.Type := in_msg.Type;
471 out_msg.Requestor := in_msg.Requestor;
472 out_msg.RequestorMachine := in_msg.RequestorMachine;
473 //out_msg.Destination.broadcast(MachineType:L2Cache);
474 out_msg.RetryNum := in_msg.RetryNum;
475 out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
476 out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
477 out_msg.Destination.add(map_Address_to_Directory(address));
478 out_msg.MessageSize := MessageSizeType:Request_Control;
479 out_msg.AccessMode := in_msg.AccessMode;
480 out_msg.Prefetch := in_msg.Prefetch;
484 //profile_filter_action(0);
489 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
490 peek(responseNetwork_in, ResponseMsg) {
491 // FIXME, should use a 3rd vnet
492 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
493 out_msg.Address := address;
494 out_msg.Type := in_msg.Type;
495 out_msg.Sender := machineID;
496 out_msg.SenderMachine := MachineType:L2Cache;
497 out_msg.Destination.add(map_Address_to_Directory(address));
498 out_msg.Tokens := in_msg.Tokens;
499 out_msg.MessageSize := in_msg.MessageSize;
500 out_msg.DataBlk := in_msg.DataBlk;
501 out_msg.Dirty := in_msg.Dirty;
506 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
507 if (getL2CacheEntry(address).Tokens > 0) {
508 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
509 out_msg.Address := address;
510 out_msg.Type := CoherenceResponseType:ACK;
511 out_msg.Sender := machineID;
512 out_msg.SenderMachine := MachineType:L2Cache;
513 out_msg.Destination.add(map_Address_to_Directory(address));
514 out_msg.Tokens := getL2CacheEntry(address).Tokens;
515 out_msg.MessageSize := MessageSizeType:Writeback_Control;
517 getL2CacheEntry(address).Tokens := 0;
521 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
522 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
523 out_msg.Address := address;
524 out_msg.Sender := machineID;
525 out_msg.SenderMachine := MachineType:L2Cache;
526 out_msg.Destination.add(map_Address_to_Directory(address));
527 out_msg.Tokens := getL2CacheEntry(address).Tokens;
528 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
529 out_msg.Dirty := getL2CacheEntry(address).Dirty;
531 if (getL2CacheEntry(address).Dirty) {
532 out_msg.MessageSize := MessageSizeType:Writeback_Data;
533 out_msg.Type := CoherenceResponseType:DATA_OWNER;
535 out_msg.MessageSize := MessageSizeType:Writeback_Control;
536 out_msg.Type := CoherenceResponseType:ACK_OWNER;
539 getL2CacheEntry(address).Tokens := 0;
542 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
543 peek(requestNetwork_in, RequestMsg) {
544 if (getL2CacheEntry(address).Tokens > N_tokens()) {
545 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
546 out_msg.Address := address;
547 out_msg.Type := CoherenceResponseType:DATA_SHARED;
548 out_msg.Sender := machineID;
549 out_msg.SenderMachine := MachineType:L2Cache;
550 out_msg.Destination.add(in_msg.Requestor);
551 out_msg.Tokens := N_tokens();
552 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
553 out_msg.Dirty := false;
554 out_msg.MessageSize := MessageSizeType:Response_Data;
556 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens();
559 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
560 out_msg.Address := address;
561 out_msg.Type := CoherenceResponseType:DATA_SHARED;
562 out_msg.Sender := machineID;
563 out_msg.SenderMachine := MachineType:L2Cache;
564 out_msg.Destination.add(in_msg.Requestor);
566 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
567 out_msg.Dirty := false;
568 out_msg.MessageSize := MessageSizeType:Response_Data;
570 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
575 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
576 peek(requestNetwork_in, RequestMsg) {
577 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
578 out_msg.Address := address;
579 out_msg.Type := CoherenceResponseType:DATA_OWNER;
580 out_msg.Sender := machineID;
581 out_msg.SenderMachine := MachineType:L2Cache;
582 out_msg.Destination.add(in_msg.Requestor);
583 assert(getL2CacheEntry(address).Tokens >= 1);
584 out_msg.Tokens := getL2CacheEntry(address).Tokens;
585 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
586 out_msg.Dirty := getL2CacheEntry(address).Dirty;
587 out_msg.MessageSize := MessageSizeType:Response_Data;
590 getL2CacheEntry(address).Tokens := 0;
593 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
594 if (getL2CacheEntry(address).Tokens > 0) {
595 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
596 out_msg.Address := address;
597 out_msg.Type := CoherenceResponseType:ACK;
598 out_msg.Sender := machineID;
599 out_msg.SenderMachine := MachineType:L2Cache;
600 out_msg.Destination.add(persistentTable.findSmallest(address));
601 assert(getL2CacheEntry(address).Tokens >= 1);
602 out_msg.Tokens := getL2CacheEntry(address).Tokens;
603 out_msg.MessageSize := MessageSizeType:Response_Control;
606 getL2CacheEntry(address).Tokens := 0;
609 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
610 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
611 out_msg.Address := address;
612 out_msg.Type := CoherenceResponseType:DATA_OWNER;
613 out_msg.Sender := machineID;
614 out_msg.SenderMachine := MachineType:L2Cache;
615 out_msg.Destination.add(persistentTable.findSmallest(address));
616 assert(getL2CacheEntry(address).Tokens >= 1);
617 out_msg.Tokens := getL2CacheEntry(address).Tokens;
618 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
619 out_msg.Dirty := getL2CacheEntry(address).Dirty;
620 out_msg.MessageSize := MessageSizeType:Response_Data;
622 getL2CacheEntry(address).Tokens := 0;
625 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
626 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
627 assert(getL2CacheEntry(address).Tokens > 0);
628 if (getL2CacheEntry(address).Tokens > 1) {
629 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
630 out_msg.Address := address;
631 out_msg.Type := CoherenceResponseType:ACK;
632 out_msg.Sender := machineID;
633 out_msg.SenderMachine := MachineType:L2Cache;
634 out_msg.Destination.add(persistentTable.findSmallest(address));
635 assert(getL2CacheEntry(address).Tokens >= 1);
636 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
637 out_msg.MessageSize := MessageSizeType:Response_Control;
640 getL2CacheEntry(address).Tokens := 1;
643 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
644 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
645 assert(getL2CacheEntry(address).Tokens > 0);
646 if (getL2CacheEntry(address).Tokens > 1) {
647 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
648 out_msg.Address := address;
649 out_msg.Type := CoherenceResponseType:DATA_OWNER;
650 out_msg.Sender := machineID;
651 out_msg.SenderMachine := MachineType:L2Cache;
652 out_msg.Destination.add(persistentTable.findSmallest(address));
653 assert(getL2CacheEntry(address).Tokens >= 1);
654 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
655 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
656 out_msg.Dirty := getL2CacheEntry(address).Dirty;
657 out_msg.MessageSize := MessageSizeType:Response_Data;
659 getL2CacheEntry(address).Tokens := 1;
665 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
666 // assert(persistentTable.isLocked(address));
667 peek(responseNetwork_in, ResponseMsg) {
668 // FIXME, should use a 3rd vnet in some cases
669 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
670 out_msg.Address := address;
671 out_msg.Type := in_msg.Type;
672 out_msg.Sender := machineID;
673 out_msg.SenderMachine := MachineType:L2Cache;
674 out_msg.Destination.add(persistentTable.findSmallest(address));
675 out_msg.Tokens := in_msg.Tokens;
676 out_msg.DataBlk := in_msg.DataBlk;
677 out_msg.Dirty := in_msg.Dirty;
678 out_msg.MessageSize := in_msg.MessageSize;
683 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
684 //assert(persistentTable.isLocked(address));
685 peek(responseNetwork_in, ResponseMsg) {
686 // FIXME, should use a 3rd vnet in some cases
687 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
688 out_msg.Address := address;
689 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
690 out_msg.Type := CoherenceResponseType:DATA_SHARED;
692 out_msg.Type := CoherenceResponseType:ACK;
694 out_msg.Sender := machineID;
695 out_msg.SenderMachine := MachineType:L2Cache;
696 out_msg.Destination.add(persistentTable.findSmallest(address));
697 out_msg.Tokens := in_msg.Tokens;
698 out_msg.DataBlk := in_msg.DataBlk;
699 out_msg.Dirty := in_msg.Dirty;
700 out_msg.MessageSize := in_msg.MessageSize;
705 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
706 // assert(persistentTable.isLocked(address));
707 peek(responseNetwork_in, ResponseMsg) {
708 // FIXME, should use a 3rd vnet in some cases
709 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
710 out_msg.Address := address;
711 out_msg.Type := CoherenceResponseType:DATA_OWNER;
712 out_msg.Sender := machineID;
713 out_msg.SenderMachine := MachineType:L2Cache;
714 out_msg.Destination.add(persistentTable.findSmallest(address));
715 out_msg.Tokens := in_msg.Tokens;
716 out_msg.DataBlk := in_msg.DataBlk;
717 out_msg.Dirty := in_msg.Dirty;
718 out_msg.MessageSize := in_msg.MessageSize;
724 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
725 peek(responseNetwork_in, ResponseMsg) {
726 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
730 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
731 peek(requestNetwork_in, RequestMsg) {
732 if (getFilteringEnabled() == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
733 profile_filter_action(1);
734 DEBUG_EXPR("filtered message");
735 DEBUG_EXPR(in_msg.RetryNum);
738 enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
739 out_msg.Address := in_msg.Address;
740 out_msg.Requestor := in_msg.Requestor;
741 out_msg.RequestorMachine := in_msg.RequestorMachine;
742 out_msg.Destination := getLocalL1IDs(machineID);
743 out_msg.Type := in_msg.Type;
744 out_msg.isLocal := false;
745 out_msg.MessageSize := MessageSizeType:Request_Control;
746 out_msg.AccessMode := in_msg.AccessMode;
747 out_msg.Prefetch := in_msg.Prefetch;
749 profile_filter_action(0);
755 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
756 peek(L1requestNetwork_in, RequestMsg) {
757 assert(getL2CacheEntry(address).Tokens > 0);
758 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
759 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
760 out_msg.Address := address;
761 out_msg.Type := CoherenceResponseType:DATA_SHARED;
762 out_msg.Sender := machineID;
763 out_msg.SenderMachine := MachineType:L2Cache;
764 out_msg.Destination.add(in_msg.Requestor);
765 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
766 out_msg.Dirty := false;
767 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
770 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
774 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
775 peek(L1requestNetwork_in, RequestMsg) {
776 assert(getL2CacheEntry(address).Tokens > 0);
777 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
778 out_msg.Address := address;
779 out_msg.Type := CoherenceResponseType:DATA_OWNER;
780 out_msg.Sender := machineID;
781 out_msg.SenderMachine := MachineType:L2Cache;
782 out_msg.Destination.add(in_msg.Requestor);
783 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
784 out_msg.Dirty := getL2CacheEntry(address).Dirty;
785 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
788 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
792 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
793 peek(L1requestNetwork_in, RequestMsg) {
794 // assert(getL2CacheEntry(address).Tokens == max_tokens());
795 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
796 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
797 out_msg.Address := address;
798 out_msg.Type := CoherenceResponseType:DATA_OWNER;
799 out_msg.Sender := machineID;
800 out_msg.SenderMachine := MachineType:L2Cache;
801 out_msg.Destination.add(in_msg.Requestor);
802 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
803 out_msg.Dirty := getL2CacheEntry(address).Dirty;
804 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
805 //out_msg.Tokens := max_tokens();
806 out_msg.Tokens := getL2CacheEntry(address).Tokens;
808 getL2CacheEntry(address).Tokens := 0;
812 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
813 persistentNetwork_in.dequeue();
816 action(m_popRequestQueue, "m", desc="Pop request queue.") {
817 requestNetwork_in.dequeue();
820 action(n_popResponseQueue, "n", desc="Pop response queue") {
821 responseNetwork_in.dequeue();
824 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
825 L1requestNetwork_in.dequeue();
829 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
830 peek(responseNetwork_in, ResponseMsg) {
831 assert(in_msg.Tokens != 0);
832 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
834 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
835 // may not trigger this action.
836 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
837 getL2CacheEntry(address).Dirty := true;
842 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
844 peek(L1requestNetwork_in, RequestMsg) {
845 if (in_msg.Type == CoherenceRequestType:GETX) {
846 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
847 } else if (in_msg.Type == CoherenceRequestType:GETS) {
848 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
853 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
854 clearExclusiveBitIfExists(address);
857 action( r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
858 if(isCacheTagPresent(address)) {
859 L2cacheMemory.setMRU(address);
863 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
864 if (getL2CacheEntry(address).Tokens > 0) {
865 peek(requestNetwork_in, RequestMsg) {
866 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
867 out_msg.Address := address;
868 out_msg.Type := CoherenceResponseType:ACK;
869 out_msg.Sender := machineID;
870 out_msg.SenderMachine := MachineType:L2Cache;
871 out_msg.Destination.add(in_msg.Requestor);
872 assert(getL2CacheEntry(address).Tokens >= 1);
873 out_msg.Tokens := getL2CacheEntry(address).Tokens;
874 out_msg.MessageSize := MessageSizeType:Response_Control;
878 getL2CacheEntry(address).Tokens := 0;
881 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
882 if (getL2CacheEntry(address).Tokens > 0) {
883 peek(L1requestNetwork_in, RequestMsg) {
884 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
885 out_msg.Address := address;
886 out_msg.Type := CoherenceResponseType:ACK;
887 out_msg.Sender := machineID;
888 out_msg.SenderMachine := MachineType:L2Cache;
889 out_msg.Destination.add(in_msg.Requestor);
890 assert(getL2CacheEntry(address).Tokens >= 1);
891 out_msg.Tokens := getL2CacheEntry(address).Tokens;
892 out_msg.MessageSize := MessageSizeType:Response_Control;
896 getL2CacheEntry(address).Tokens := 0;
899 action(u_writeDataToCache, "u", desc="Write data to cache") {
900 peek(responseNetwork_in, ResponseMsg) {
901 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
902 if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
903 getL2CacheEntry(address).Dirty := in_msg.Dirty;
908 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
909 L2cacheMemory.allocate(address);
912 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
913 L2cacheMemory.deallocate(address);
916 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
917 peek(L1requestNetwork_in, RequestMsg) {
918 // AccessModeType not implemented
919 //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
924 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
925 peek(responseNetwork_in, ResponseMsg) {
926 assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
930 action(z_stall, "z", desc="Stall") {
936 //*****************************************************
938 //*****************************************************
940 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
942 h_updateFilterFromL1HintOrWB;
946 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
947 l_popPersistentQueue;
951 // Transitions from NP
953 transition(NP, {Transient_GETX, Transient_GETS}) {
954 // forward message to local sharers
956 j_forwardTransientRequestToLocalSharers;
961 transition(NP, {L1_GETS, L1_GETX}) {
962 a_broadcastLocalRequest;
968 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
973 transition(NP, Writeback_Shared_Data, S) {
974 vv_allocateL2CacheBlock;
976 q_updateTokensFromResponse;
977 h_updateFilterFromL1HintOrWB;
981 transition(NP, Writeback_Tokens, I) {
982 vv_allocateL2CacheBlock;
983 q_updateTokensFromResponse;
984 h_updateFilterFromL1HintOrWB;
988 transition(NP, Writeback_All_Tokens, M) {
989 vv_allocateL2CacheBlock;
991 q_updateTokensFromResponse;
992 h_updateFilterFromL1HintOrWB;
996 transition(NP, Writeback_Owned, O) {
997 vv_allocateL2CacheBlock;
999 q_updateTokensFromResponse;
1000 h_updateFilterFromL1HintOrWB;
1005 transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
1006 l_popPersistentQueue;
1009 // Transitions from Idle
1011 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1012 a_broadcastLocalRequest;
1013 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1016 o_popL1RequestQueue;
1019 transition(I, L1_GETX) {
1020 a_broadcastLocalRequest;
1021 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1024 o_popL1RequestQueue;
1027 transition(I, L2_Replacement) {
1028 c_cleanReplacement; // Only needed in some cases
1029 rr_deallocateL2CacheBlock;
1032 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1034 t_sendAckWithCollectedTokens;
1035 j_forwardTransientRequestToLocalSharers;
1039 transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
1040 e_sendAckWithCollectedTokens;
1041 l_popPersistentQueue;
1045 transition(I, Ack) {
1046 q_updateTokensFromResponse;
1050 transition(I, Data_Shared, S) {
1052 q_updateTokensFromResponse;
1056 transition(I, Writeback_Shared_Data, S) {
1058 q_updateTokensFromResponse;
1059 h_updateFilterFromL1HintOrWB;
1063 transition(I, Writeback_Tokens) {
1064 q_updateTokensFromResponse;
1065 h_updateFilterFromL1HintOrWB;
1069 transition(I, Data_Owner, O) {
1071 q_updateTokensFromResponse;
1075 transition(I, Writeback_Owned, O) {
1077 q_updateTokensFromResponse;
1078 h_updateFilterFromL1HintOrWB;
1082 transition(I, Data_All_Tokens, M) {
1084 q_updateTokensFromResponse;
1089 transition(I, Writeback_All_Tokens, M) {
1091 q_updateTokensFromResponse;
1092 h_updateFilterFromL1HintOrWB;
1096 // Transitions from Shared
1098 transition(S, L2_Replacement, I) {
1100 rr_deallocateL2CacheBlock;
1103 transition(S, Transient_GETX, I) {
1105 t_sendAckWithCollectedTokens;
1106 j_forwardTransientRequestToLocalSharers;
1110 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1111 j_forwardTransientRequestToLocalSharers;
1116 transition(S, Persistent_GETX, I_L) {
1117 e_sendAckWithCollectedTokens;
1118 l_popPersistentQueue;
1122 transition(S, Persistent_GETS, S_L) {
1123 f_sendAckWithAllButOneTokens;
1124 l_popPersistentQueue;
1128 transition(S, Ack) {
1129 q_updateTokensFromResponse;
1133 transition(S, Data_Shared) {
1134 w_assertIncomingDataAndCacheDataMatch;
1135 q_updateTokensFromResponse;
1139 transition(S, Writeback_Tokens) {
1140 q_updateTokensFromResponse;
1141 h_updateFilterFromL1HintOrWB;
1145 transition(S, Writeback_Shared_Data) {
1146 w_assertIncomingDataAndCacheDataMatch;
1147 q_updateTokensFromResponse;
1148 h_updateFilterFromL1HintOrWB;
1153 transition(S, Data_Owner, O) {
1154 w_assertIncomingDataAndCacheDataMatch;
1155 q_updateTokensFromResponse;
1159 transition(S, Writeback_Owned, O) {
1160 w_assertIncomingDataAndCacheDataMatch;
1161 q_updateTokensFromResponse;
1162 h_updateFilterFromL1HintOrWB;
1166 transition(S, Data_All_Tokens, M) {
1167 w_assertIncomingDataAndCacheDataMatch;
1168 q_updateTokensFromResponse;
1172 transition(S, Writeback_All_Tokens, M) {
1173 w_assertIncomingDataAndCacheDataMatch;
1174 q_updateTokensFromResponse;
1175 h_updateFilterFromL1HintOrWB;
1179 transition(S, L1_GETX, I) {
1180 a_broadcastLocalRequest;
1181 tt_sendLocalAckWithCollectedTokens;
1185 o_popL1RequestQueue;
1189 transition(S, L1_GETS) {
1190 k_dataFromL2CacheToL1Requestor;
1193 o_popL1RequestQueue;
1196 transition(S, L1_GETS_Last_Token, I) {
1198 k_dataFromL2CacheToL1Requestor;
1201 o_popL1RequestQueue;
1204 // Transitions from Owned
1206 transition(O, L2_Replacement, I) {
1207 cc_dirtyReplacement;
1208 rr_deallocateL2CacheBlock;
1211 transition(O, Transient_GETX, I) {
1213 dd_sendDataWithAllTokens;
1214 j_forwardTransientRequestToLocalSharers;
1218 transition(O, Persistent_GETX, I_L) {
1219 ee_sendDataWithAllTokens;
1220 l_popPersistentQueue;
1223 transition(O, Persistent_GETS, S_L) {
1224 ff_sendDataWithAllButOneTokens;
1225 l_popPersistentQueue;
1228 transition(O, Transient_GETS) {
1229 // send multiple tokens
1231 d_sendDataWithTokens;
1235 transition(O, Transient_GETS_Last_Token) {
1236 // WAIT FOR IT TO GO PERSISTENT
1241 transition(O, Ack) {
1242 q_updateTokensFromResponse;
1246 transition(O, Ack_All_Tokens, M) {
1247 q_updateTokensFromResponse;
1251 transition(O, Data_Shared) {
1252 w_assertIncomingDataAndCacheDataMatch;
1253 q_updateTokensFromResponse;
1258 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1259 w_assertIncomingDataAndCacheDataMatch;
1260 q_updateTokensFromResponse;
1261 h_updateFilterFromL1HintOrWB;
1265 transition(O, Data_All_Tokens, M) {
1266 w_assertIncomingDataAndCacheDataMatch;
1267 q_updateTokensFromResponse;
1271 transition(O, Writeback_All_Tokens, M) {
1272 w_assertIncomingDataAndCacheDataMatch;
1273 q_updateTokensFromResponse;
1274 h_updateFilterFromL1HintOrWB;
1278 transition(O, L1_GETS) {
1279 k_dataFromL2CacheToL1Requestor;
1282 o_popL1RequestQueue;
1285 transition(O, L1_GETS_Last_Token, I) {
1286 k_dataOwnerFromL2CacheToL1Requestor;
1289 o_popL1RequestQueue;
1292 transition(O, L1_GETX, I) {
1293 a_broadcastLocalRequest;
1294 k_dataAndAllTokensFromL2CacheToL1Requestor;
1298 o_popL1RequestQueue;
1301 // Transitions from M
1303 transition(M, L2_Replacement, I) {
1304 cc_dirtyReplacement;
1305 rr_deallocateL2CacheBlock;
1308 // MRM_DEBUG: Give up all tokens even for GETS? ???
1309 transition(M, {Transient_GETX, Transient_GETS}, I) {
1311 dd_sendDataWithAllTokens;
1315 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1316 ee_sendDataWithAllTokens;
1317 l_popPersistentQueue;
1321 transition(M, L1_GETS, O) {
1322 k_dataFromL2CacheToL1Requestor;
1325 o_popL1RequestQueue;
1328 transition(M, L1_GETX, I) {
1329 k_dataAndAllTokensFromL2CacheToL1Requestor;
1332 o_popL1RequestQueue;
1336 //Transitions from locked states
1338 transition({I_L, S_L}, Ack) {
1339 gg_bounceResponseToStarver;
1343 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1344 gg_bounceResponseToStarver;
1348 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1349 gg_bounceWBSharedToStarver;
1350 h_updateFilterFromL1HintOrWB;
1354 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1355 gg_bounceWBOwnedToStarver;
1356 h_updateFilterFromL1HintOrWB;
1360 transition(S_L, L2_Replacement, I) {
1362 rr_deallocateL2CacheBlock;
1365 transition(I_L, L2_Replacement, I) {
1366 rr_deallocateL2CacheBlock;
1369 transition(I_L, Own_Lock_or_Unlock, I) {
1370 l_popPersistentQueue;
1373 transition(S_L, Own_Lock_or_Unlock, S) {
1374 l_popPersistentQueue;
1377 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1382 transition(I_L, {L1_GETX, L1_GETS}) {
1383 a_broadcastLocalRequest;
1386 o_popL1RequestQueue;
1389 transition(S_L, L1_GETX, I_L) {
1390 a_broadcastLocalRequest;
1391 tt_sendLocalAckWithCollectedTokens;
1395 o_popL1RequestQueue;
1398 transition(S_L, L1_GETS) {
1399 k_dataFromL2CacheToL1Requestor;
1402 o_popL1RequestQueue;
1405 transition(S_L, L1_GETS_Last_Token, I_L) {
1406 k_dataFromL2CacheToL1Requestor;
1409 o_popL1RequestQueue;
1412 transition(S_L, Persistent_GETX, I_L) {
1413 e_sendAckWithCollectedTokens;
1414 l_popPersistentQueue;
1417 transition(S_L, Persistent_GETS) {
1418 l_popPersistentQueue;
1421 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1422 l_popPersistentQueue;