2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 machine(L2Cache, "Token protocol")
30 : CacheMemory * L2cache;
32 Cycles l2_request_latency := 5;
33 Cycles l2_response_latency := 5;
34 bool filtering_enabled := "True";
37 // From local bank of L2 cache TO the network
39 // this L2 bank -> a local L1 || mod-directory
40 MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
41 ordered="false", vnet_type="response";
42 // this L2 bank -> mod-directory
43 MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
44 ordered="false", vnet_type="request";
45 // this L2 bank -> a local L1
46 MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
47 ordered="false", vnet_type="request";
50 // FROM the network to this local bank of L2 cache
52 // a local L1 || mod-directory -> this L2 bank
53 MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
54 ordered="false", vnet_type="response";
55 MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
56 ordered="true", vnet_type="persistent";
57 // mod-directory -> this L2 bank
58 MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
59 ordered="false", vnet_type="request";
60 // a local L1 -> this L2 bank
61 MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
62 ordered="false", vnet_type="request";
66 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
68 NP, AccessPermission:Invalid, desc="Not Present";
69 I, AccessPermission:Invalid, desc="Idle";
70 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
71 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
72 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
75 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
76 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
80 enumeration(Event, desc="Cache events") {
83 L1_GETS, desc="local L1 GETS request";
84 L1_GETS_Last_Token, desc="local L1 GETS request";
85 L1_GETX, desc="local L1 GETX request";
86 L1_INV, desc="L1 no longer has tokens";
87 Transient_GETX, desc="A GetX from another processor";
88 Transient_GETS, desc="A GetS from another processor";
89 Transient_GETS_Last_Token, desc="A GetS from another processor";
91 // events initiated by this L2
92 L2_Replacement, desc="L2 Replacement", format="!r";
94 // events of external L2 responses
97 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
98 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
99 Writeback_All_Tokens, desc="Received a writeback from L1";
100 Writeback_Owned, desc="Received a writeback from L1";
103 Data_Shared, desc="Received a data message, we are now a sharer";
104 Data_Owner, desc="Received a data message, we are now the owner";
105 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
106 Ack, desc="Received an ack message";
107 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
110 Persistent_GETX, desc="Another processor has priority to read/write";
111 Persistent_GETS, desc="Another processor has priority to read";
112 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
113 Own_Lock_or_Unlock, desc="This processor now has priority";
119 structure(Entry, desc="...", interface="AbstractCacheEntry") {
120 State CacheState, desc="cache state";
121 bool Dirty, desc="Is the data dirty (different than memory)?";
122 int Tokens, desc="The number of tokens we're holding for the line";
123 DataBlock DataBlk, desc="data for the block";
126 structure(DirEntry, desc="...") {
127 Set Sharers, desc="Set of the internal processors that want the block in shared state";
128 bool exclusive, default="false", desc="if local exclusive is likely";
131 structure(PerfectCacheMemory, external="yes") {
132 void allocate(Address);
133 void deallocate(Address);
134 DirEntry lookup(Address);
135 bool isTagPresent(Address);
138 structure(PersistentTable, external="yes") {
139 void persistentRequestLock(Address, MachineID, AccessType);
140 void persistentRequestUnlock(Address, MachineID);
141 MachineID findSmallest(Address);
142 AccessType typeOfSmallest(Address);
143 void markEntries(Address);
144 bool isLocked(Address);
145 int countStarvingForAddress(Address);
146 int countReadStarvingForAddress(Address);
149 PersistentTable persistentTable;
150 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
152 void set_cache_entry(AbstractCacheEntry b);
153 void unset_cache_entry();
155 Entry getCacheEntry(Address address), return_by_pointer="yes" {
156 Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
160 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
161 return getCacheEntry(addr).DataBlk;
164 int getTokens(Entry cache_entry) {
165 if (is_valid(cache_entry)) {
166 return cache_entry.Tokens;
172 State getState(Entry cache_entry, Address addr) {
173 if (is_valid(cache_entry)) {
174 return cache_entry.CacheState;
175 } else if (persistentTable.isLocked(addr)) {
182 void setState(Entry cache_entry, Address addr, State state) {
184 if (is_valid(cache_entry)) {
185 // Make sure the token count is in range
186 assert(cache_entry.Tokens >= 0);
187 assert(cache_entry.Tokens <= max_tokens());
188 assert(cache_entry.Tokens != (max_tokens() / 2));
190 // Make sure we have no tokens in L
191 if ((state == State:I_L) ) {
192 assert(cache_entry.Tokens == 0);
195 // in M and E you have all the tokens
196 if (state == State:M ) {
197 assert(cache_entry.Tokens == max_tokens());
200 // in NP you have no tokens
201 if (state == State:NP) {
202 assert(cache_entry.Tokens == 0);
205 // You have at least one token in S-like states
206 if (state == State:S ) {
207 assert(cache_entry.Tokens > 0);
210 // You have at least half the token in O-like states
211 if (state == State:O ) {
212 assert(cache_entry.Tokens > (max_tokens() / 2));
215 cache_entry.CacheState := state;
219 AccessPermission getAccessPermission(Address addr) {
220 Entry cache_entry := getCacheEntry(addr);
221 if(is_valid(cache_entry)) {
222 return L2Cache_State_to_permission(cache_entry.CacheState);
225 return AccessPermission:NotPresent;
228 void setAccessPermission(Entry cache_entry, Address addr, State state) {
229 if (is_valid(cache_entry)) {
230 cache_entry.changePermission(L2Cache_State_to_permission(state));
234 void removeSharer(Address addr, NodeID id) {
236 if (localDirectory.isTagPresent(addr)) {
237 localDirectory[addr].Sharers.remove(id);
238 if (localDirectory[addr].Sharers.count() == 0) {
239 localDirectory.deallocate(addr);
244 bool sharersExist(Address addr) {
245 if (localDirectory.isTagPresent(addr)) {
246 if (localDirectory[addr].Sharers.count() > 0) {
258 bool exclusiveExists(Address addr) {
259 if (localDirectory.isTagPresent(addr)) {
260 if (localDirectory[addr].exclusive) {
272 // assumes that caller will check to make sure tag is present
273 Set getSharers(Address addr) {
274 return localDirectory[addr].Sharers;
277 void setNewWriter(Address addr, NodeID id) {
278 if (localDirectory.isTagPresent(addr) == false) {
279 localDirectory.allocate(addr);
281 localDirectory[addr].Sharers.clear();
282 localDirectory[addr].Sharers.add(id);
283 localDirectory[addr].exclusive := true;
286 void addNewSharer(Address addr, NodeID id) {
287 if (localDirectory.isTagPresent(addr) == false) {
288 localDirectory.allocate(addr);
290 localDirectory[addr].Sharers.add(id);
291 // localDirectory[addr].exclusive := false;
294 void clearExclusiveBitIfExists(Address addr) {
295 if (localDirectory.isTagPresent(addr)) {
296 localDirectory[addr].exclusive := false;
301 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
302 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
303 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
309 // Persistent Network
310 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
311 if (persistentNetwork_in.isReady()) {
312 peek(persistentNetwork_in, PersistentMsg) {
313 assert(in_msg.Destination.isElement(machineID));
315 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
316 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Write);
317 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
318 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Read);
319 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
320 persistentTable.persistentRequestUnlock(in_msg.Addr, in_msg.Requestor);
322 error("Unexpected message");
325 Entry cache_entry := getCacheEntry(in_msg.Addr);
326 // React to the message based on the current state of the table
327 if (persistentTable.isLocked(in_msg.Addr)) {
329 if (persistentTable.typeOfSmallest(in_msg.Addr) == AccessType:Read) {
330 if (getTokens(cache_entry) == 1 ||
331 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
332 trigger(Event:Persistent_GETS_Last_Token, in_msg.Addr,
335 trigger(Event:Persistent_GETS, in_msg.Addr, cache_entry);
338 trigger(Event:Persistent_GETX, in_msg.Addr, cache_entry);
342 trigger(Event:Own_Lock_or_Unlock, in_msg.Addr, cache_entry);
350 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
351 if (requestNetwork_in.isReady()) {
352 peek(requestNetwork_in, RequestMsg) {
353 assert(in_msg.Destination.isElement(machineID));
355 Entry cache_entry := getCacheEntry(in_msg.Addr);
356 if (in_msg.Type == CoherenceRequestType:GETX) {
357 trigger(Event:Transient_GETX, in_msg.Addr, cache_entry);
358 } else if (in_msg.Type == CoherenceRequestType:GETS) {
359 if (getTokens(cache_entry) == 1) {
360 trigger(Event:Transient_GETS_Last_Token, in_msg.Addr,
364 trigger(Event:Transient_GETS, in_msg.Addr, cache_entry);
367 error("Unexpected message");
373 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
374 if (L1requestNetwork_in.isReady()) {
375 peek(L1requestNetwork_in, RequestMsg) {
376 assert(in_msg.Destination.isElement(machineID));
377 Entry cache_entry := getCacheEntry(in_msg.Addr);
378 if (in_msg.Type == CoherenceRequestType:GETX) {
379 trigger(Event:L1_GETX, in_msg.Addr, cache_entry);
380 } else if (in_msg.Type == CoherenceRequestType:GETS) {
381 if (getTokens(cache_entry) == 1 ||
382 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
383 trigger(Event:L1_GETS_Last_Token, in_msg.Addr, cache_entry);
386 trigger(Event:L1_GETS, in_msg.Addr, cache_entry);
389 error("Unexpected message");
397 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
398 if (responseNetwork_in.isReady()) {
399 peek(responseNetwork_in, ResponseMsg) {
400 assert(in_msg.Destination.isElement(machineID));
401 Entry cache_entry := getCacheEntry(in_msg.Addr);
403 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
404 if (in_msg.Type == CoherenceResponseType:ACK) {
405 assert(in_msg.Tokens < (max_tokens() / 2));
406 trigger(Event:Ack, in_msg.Addr, cache_entry);
407 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
408 trigger(Event:Data_Owner, in_msg.Addr, cache_entry);
409 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
410 trigger(Event:Data_Shared, in_msg.Addr, cache_entry);
411 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
412 in_msg.Type == CoherenceResponseType:WB_OWNED ||
413 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
415 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
417 // either room is available or the block is already present
419 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
420 assert(in_msg.Dirty == false);
421 trigger(Event:Writeback_Tokens, in_msg.Addr, cache_entry);
422 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
423 assert(in_msg.Dirty == false);
424 trigger(Event:Writeback_Shared_Data, in_msg.Addr, cache_entry);
426 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
427 //assert(in_msg.Dirty == false);
428 trigger(Event:Writeback_Owned, in_msg.Addr, cache_entry);
432 trigger(Event:L2_Replacement,
433 L2cache.cacheProbe(in_msg.Addr),
434 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
436 } else if (in_msg.Type == CoherenceResponseType:INV) {
437 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
439 error("Unexpected message");
442 if (in_msg.Type == CoherenceResponseType:ACK) {
443 assert(in_msg.Tokens < (max_tokens() / 2));
444 trigger(Event:Ack_All_Tokens, in_msg.Addr, cache_entry);
445 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
446 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
447 trigger(Event:Data_All_Tokens, in_msg.Addr, cache_entry);
448 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
449 in_msg.Type == CoherenceResponseType:WB_OWNED ||
450 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
451 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
453 // either room is available or the block is already present
455 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
456 assert(in_msg.Dirty == false);
457 assert( (getState(cache_entry, in_msg.Addr) != State:NP)
458 && (getState(cache_entry, in_msg.Addr) != State:I) );
459 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
460 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
461 assert(in_msg.Dirty == false);
462 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
464 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
465 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
469 trigger(Event:L2_Replacement,
470 L2cache.cacheProbe(in_msg.Addr),
471 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
473 } else if (in_msg.Type == CoherenceResponseType:INV) {
474 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
476 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
477 error("Unexpected message");
487 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
489 peek(L1requestNetwork_in, RequestMsg) {
491 // if this is a retry or no local sharers, broadcast normally
492 enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
493 out_msg.Addr := in_msg.Addr;
494 out_msg.Type := in_msg.Type;
495 out_msg.Requestor := in_msg.Requestor;
496 out_msg.RetryNum := in_msg.RetryNum;
499 // If a statically shared L2 cache, then no other L2 caches can
502 //out_msg.Destination.broadcast(MachineType:L2Cache);
503 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
504 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
506 out_msg.Destination.add(map_Address_to_Directory(address));
507 out_msg.MessageSize := MessageSizeType:Request_Control;
508 out_msg.AccessMode := in_msg.AccessMode;
509 out_msg.Prefetch := in_msg.Prefetch;
513 //profile_filter_action(0);
518 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
519 peek(responseNetwork_in, ResponseMsg) {
520 // FIXME, should use a 3rd vnet
521 enqueue(responseNetwork_out, ResponseMsg, 1) {
522 out_msg.Addr := address;
523 out_msg.Type := in_msg.Type;
524 out_msg.Sender := machineID;
525 out_msg.Destination.add(map_Address_to_Directory(address));
526 out_msg.Tokens := in_msg.Tokens;
527 out_msg.MessageSize := in_msg.MessageSize;
528 out_msg.DataBlk := in_msg.DataBlk;
529 out_msg.Dirty := in_msg.Dirty;
534 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
535 assert(is_valid(cache_entry));
536 if (cache_entry.Tokens > 0) {
537 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
538 out_msg.Addr := address;
539 out_msg.Type := CoherenceResponseType:ACK;
540 out_msg.Sender := machineID;
541 out_msg.Destination.add(map_Address_to_Directory(address));
542 out_msg.Tokens := cache_entry.Tokens;
543 out_msg.MessageSize := MessageSizeType:Writeback_Control;
545 cache_entry.Tokens := 0;
549 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
550 assert(is_valid(cache_entry));
551 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
552 out_msg.Addr := address;
553 out_msg.Sender := machineID;
554 out_msg.Destination.add(map_Address_to_Directory(address));
555 out_msg.Tokens := cache_entry.Tokens;
556 out_msg.DataBlk := cache_entry.DataBlk;
557 out_msg.Dirty := cache_entry.Dirty;
559 if (cache_entry.Dirty) {
560 out_msg.MessageSize := MessageSizeType:Writeback_Data;
561 out_msg.Type := CoherenceResponseType:DATA_OWNER;
563 out_msg.MessageSize := MessageSizeType:Writeback_Control;
564 out_msg.Type := CoherenceResponseType:ACK_OWNER;
567 cache_entry.Tokens := 0;
570 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
571 peek(requestNetwork_in, RequestMsg) {
572 assert(is_valid(cache_entry));
573 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
574 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
575 out_msg.Addr := address;
576 out_msg.Type := CoherenceResponseType:DATA_SHARED;
577 out_msg.Sender := machineID;
578 out_msg.Destination.add(in_msg.Requestor);
579 out_msg.Tokens := N_tokens;
580 out_msg.DataBlk := cache_entry.DataBlk;
581 out_msg.Dirty := false;
582 out_msg.MessageSize := MessageSizeType:Response_Data;
584 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
587 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
588 out_msg.Addr := address;
589 out_msg.Type := CoherenceResponseType:DATA_SHARED;
590 out_msg.Sender := machineID;
591 out_msg.Destination.add(in_msg.Requestor);
593 out_msg.DataBlk := cache_entry.DataBlk;
594 out_msg.Dirty := false;
595 out_msg.MessageSize := MessageSizeType:Response_Data;
597 cache_entry.Tokens := cache_entry.Tokens - 1;
602 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
603 assert(is_valid(cache_entry));
604 peek(requestNetwork_in, RequestMsg) {
605 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
606 out_msg.Addr := address;
607 out_msg.Type := CoherenceResponseType:DATA_OWNER;
608 out_msg.Sender := machineID;
609 out_msg.Destination.add(in_msg.Requestor);
610 assert(cache_entry.Tokens >= 1);
611 out_msg.Tokens := cache_entry.Tokens;
612 out_msg.DataBlk := cache_entry.DataBlk;
613 out_msg.Dirty := cache_entry.Dirty;
614 out_msg.MessageSize := MessageSizeType:Response_Data;
617 cache_entry.Tokens := 0;
620 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
621 assert(is_valid(cache_entry));
622 if (cache_entry.Tokens > 0) {
623 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
624 out_msg.Addr := address;
625 out_msg.Type := CoherenceResponseType:ACK;
626 out_msg.Sender := machineID;
627 out_msg.Destination.add(persistentTable.findSmallest(address));
628 assert(cache_entry.Tokens >= 1);
629 out_msg.Tokens := cache_entry.Tokens;
630 out_msg.MessageSize := MessageSizeType:Response_Control;
633 cache_entry.Tokens := 0;
636 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
637 assert(is_valid(cache_entry));
638 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
639 out_msg.Addr := address;
640 out_msg.Type := CoherenceResponseType:DATA_OWNER;
641 out_msg.Sender := machineID;
642 out_msg.Destination.add(persistentTable.findSmallest(address));
643 assert(cache_entry.Tokens >= 1);
644 out_msg.Tokens := cache_entry.Tokens;
645 out_msg.DataBlk := cache_entry.DataBlk;
646 out_msg.Dirty := cache_entry.Dirty;
647 out_msg.MessageSize := MessageSizeType:Response_Data;
649 cache_entry.Tokens := 0;
652 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
653 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
654 assert(is_valid(cache_entry));
655 assert(cache_entry.Tokens > 0);
656 if (cache_entry.Tokens > 1) {
657 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
658 out_msg.Addr := address;
659 out_msg.Type := CoherenceResponseType:ACK;
660 out_msg.Sender := machineID;
661 out_msg.Destination.add(persistentTable.findSmallest(address));
662 assert(cache_entry.Tokens >= 1);
663 out_msg.Tokens := cache_entry.Tokens - 1;
664 out_msg.MessageSize := MessageSizeType:Response_Control;
667 cache_entry.Tokens := 1;
670 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
671 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
672 assert(is_valid(cache_entry));
673 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
674 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
675 out_msg.Addr := address;
676 out_msg.Type := CoherenceResponseType:DATA_OWNER;
677 out_msg.Sender := machineID;
678 out_msg.Destination.add(persistentTable.findSmallest(address));
679 out_msg.Tokens := cache_entry.Tokens - 1;
680 out_msg.DataBlk := cache_entry.DataBlk;
681 out_msg.Dirty := cache_entry.Dirty;
682 out_msg.MessageSize := MessageSizeType:Response_Data;
684 cache_entry.Tokens := 1;
687 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
688 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
689 assert(is_valid(cache_entry));
690 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
691 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
692 out_msg.Addr := address;
693 out_msg.Type := CoherenceResponseType:DATA_OWNER;
694 out_msg.Sender := machineID;
695 out_msg.Destination.add(persistentTable.findSmallest(address));
696 out_msg.Tokens := cache_entry.Tokens;
697 out_msg.DataBlk := cache_entry.DataBlk;
698 out_msg.Dirty := cache_entry.Dirty;
699 out_msg.MessageSize := MessageSizeType:Response_Data;
701 cache_entry.Tokens := 0;
706 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
707 // assert(persistentTable.isLocked(address));
708 peek(responseNetwork_in, ResponseMsg) {
709 // FIXME, should use a 3rd vnet in some cases
710 enqueue(responseNetwork_out, ResponseMsg, 1) {
711 out_msg.Addr := address;
712 out_msg.Type := in_msg.Type;
713 out_msg.Sender := machineID;
714 out_msg.Destination.add(persistentTable.findSmallest(address));
715 out_msg.Tokens := in_msg.Tokens;
716 out_msg.DataBlk := in_msg.DataBlk;
717 out_msg.Dirty := in_msg.Dirty;
718 out_msg.MessageSize := in_msg.MessageSize;
723 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
724 //assert(persistentTable.isLocked(address));
725 peek(responseNetwork_in, ResponseMsg) {
726 // FIXME, should use a 3rd vnet in some cases
727 enqueue(responseNetwork_out, ResponseMsg, 1) {
728 out_msg.Addr := address;
729 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
730 out_msg.Type := CoherenceResponseType:DATA_SHARED;
732 assert(in_msg.Tokens < (max_tokens() / 2));
733 out_msg.Type := CoherenceResponseType:ACK;
735 out_msg.Sender := machineID;
736 out_msg.Destination.add(persistentTable.findSmallest(address));
737 out_msg.Tokens := in_msg.Tokens;
738 out_msg.DataBlk := in_msg.DataBlk;
739 out_msg.Dirty := in_msg.Dirty;
740 out_msg.MessageSize := in_msg.MessageSize;
745 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
746 // assert(persistentTable.isLocked(address));
747 peek(responseNetwork_in, ResponseMsg) {
748 // FIXME, should use a 3rd vnet in some cases
749 enqueue(responseNetwork_out, ResponseMsg, 1) {
750 out_msg.Addr := address;
751 out_msg.Type := CoherenceResponseType:DATA_OWNER;
752 out_msg.Sender := machineID;
753 out_msg.Destination.add(persistentTable.findSmallest(address));
754 out_msg.Tokens := in_msg.Tokens;
755 out_msg.DataBlk := in_msg.DataBlk;
756 out_msg.Dirty := in_msg.Dirty;
757 out_msg.MessageSize := in_msg.MessageSize;
763 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
764 peek(responseNetwork_in, ResponseMsg) {
765 removeSharer(in_msg.Addr, machineIDToNodeID(in_msg.Sender));
769 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
770 peek(requestNetwork_in, RequestMsg) {
771 if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) {
772 //profile_filter_action(1);
773 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
777 enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
778 out_msg.Addr := in_msg.Addr;
779 out_msg.Requestor := in_msg.Requestor;
782 // Currently assuming only one chip so all L1s are local
784 //out_msg.Destination := getLocalL1IDs(machineID);
785 out_msg.Destination.broadcast(MachineType:L1Cache);
786 out_msg.Destination.remove(in_msg.Requestor);
788 out_msg.Type := in_msg.Type;
789 out_msg.isLocal := false;
790 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
791 out_msg.AccessMode := in_msg.AccessMode;
792 out_msg.Prefetch := in_msg.Prefetch;
794 //profile_filter_action(0);
799 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
800 peek(L1requestNetwork_in, RequestMsg) {
801 assert(is_valid(cache_entry));
802 assert(cache_entry.Tokens > 0);
803 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
804 out_msg.Addr := address;
805 out_msg.Type := CoherenceResponseType:DATA_SHARED;
806 out_msg.Sender := machineID;
807 out_msg.Destination.add(in_msg.Requestor);
808 out_msg.DataBlk := cache_entry.DataBlk;
809 out_msg.Dirty := false;
810 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
813 cache_entry.Tokens := cache_entry.Tokens - 1;
817 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
818 peek(L1requestNetwork_in, RequestMsg) {
819 assert(is_valid(cache_entry));
820 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
821 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
822 out_msg.Addr := address;
823 out_msg.Type := CoherenceResponseType:DATA_OWNER;
824 out_msg.Sender := machineID;
825 out_msg.Destination.add(in_msg.Requestor);
826 out_msg.DataBlk := cache_entry.DataBlk;
827 out_msg.Dirty := cache_entry.Dirty;
828 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
829 out_msg.Tokens := cache_entry.Tokens;
831 cache_entry.Tokens := 0;
835 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
836 peek(L1requestNetwork_in, RequestMsg) {
837 assert(is_valid(cache_entry));
838 // assert(cache_entry.Tokens == max_tokens());
839 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
840 out_msg.Addr := address;
841 out_msg.Type := CoherenceResponseType:DATA_OWNER;
842 out_msg.Sender := machineID;
843 out_msg.Destination.add(in_msg.Requestor);
844 out_msg.DataBlk := cache_entry.DataBlk;
845 out_msg.Dirty := cache_entry.Dirty;
846 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
847 //out_msg.Tokens := max_tokens();
848 out_msg.Tokens := cache_entry.Tokens;
850 cache_entry.Tokens := 0;
854 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
855 persistentNetwork_in.dequeue();
858 action(m_popRequestQueue, "m", desc="Pop request queue.") {
859 requestNetwork_in.dequeue();
862 action(n_popResponseQueue, "n", desc="Pop response queue") {
863 responseNetwork_in.dequeue();
866 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
867 L1requestNetwork_in.dequeue();
871 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
872 peek(responseNetwork_in, ResponseMsg) {
873 assert(is_valid(cache_entry));
874 assert(in_msg.Tokens != 0);
875 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
877 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
878 // may not trigger this action.
879 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
880 cache_entry.Dirty := true;
885 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
886 peek(L1requestNetwork_in, RequestMsg) {
887 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
888 if (in_msg.Type == CoherenceRequestType:GETX) {
889 setNewWriter(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
890 } else if (in_msg.Type == CoherenceRequestType:GETS) {
891 addNewSharer(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
897 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
898 clearExclusiveBitIfExists(address);
901 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
902 peek(L1requestNetwork_in, RequestMsg) {
903 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
904 (is_valid(cache_entry))) {
905 L2cache.setMRU(address);
910 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
911 assert(is_valid(cache_entry));
912 if (cache_entry.Tokens > 0) {
913 peek(requestNetwork_in, RequestMsg) {
914 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
915 out_msg.Addr := address;
916 out_msg.Type := CoherenceResponseType:ACK;
917 out_msg.Sender := machineID;
918 out_msg.Destination.add(in_msg.Requestor);
919 assert(cache_entry.Tokens >= 1);
920 out_msg.Tokens := cache_entry.Tokens;
921 out_msg.MessageSize := MessageSizeType:Response_Control;
925 cache_entry.Tokens := 0;
928 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
929 assert(is_valid(cache_entry));
930 if (cache_entry.Tokens > 0) {
931 peek(L1requestNetwork_in, RequestMsg) {
932 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
933 out_msg.Addr := address;
934 out_msg.Type := CoherenceResponseType:ACK;
935 out_msg.Sender := machineID;
936 out_msg.Destination.add(in_msg.Requestor);
937 assert(cache_entry.Tokens >= 1);
938 out_msg.Tokens := cache_entry.Tokens;
939 out_msg.MessageSize := MessageSizeType:Response_Control;
943 cache_entry.Tokens := 0;
946 action(u_writeDataToCache, "u", desc="Write data to cache") {
947 peek(responseNetwork_in, ResponseMsg) {
948 assert(is_valid(cache_entry));
949 cache_entry.DataBlk := in_msg.DataBlk;
950 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
951 cache_entry.Dirty := in_msg.Dirty;
956 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
957 set_cache_entry(L2cache.allocate(address, new Entry));
960 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
961 L2cache.deallocate(address);
965 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
966 ++L2cache.demand_misses;
969 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
970 ++L2cache.demand_hits;
973 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
974 peek(responseNetwork_in, ResponseMsg) {
975 if (in_msg.Type != CoherenceResponseType:ACK &&
976 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
977 assert(is_valid(cache_entry));
978 assert(cache_entry.DataBlk == in_msg.DataBlk);
984 //*****************************************************
986 //*****************************************************
988 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
990 h_updateFilterFromL1HintOrWB;
994 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
995 l_popPersistentQueue;
999 // Transitions from NP
1001 transition(NP, {Transient_GETX, Transient_GETS}) {
1002 // forward message to local sharers
1004 j_forwardTransientRequestToLocalSharers;
1009 transition(NP, {L1_GETS, L1_GETX}) {
1010 a_broadcastLocalRequest;
1013 o_popL1RequestQueue;
1016 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1021 transition(NP, Writeback_Shared_Data, S) {
1022 vv_allocateL2CacheBlock;
1024 q_updateTokensFromResponse;
1025 h_updateFilterFromL1HintOrWB;
1029 transition(NP, Writeback_Tokens, I) {
1030 vv_allocateL2CacheBlock;
1031 q_updateTokensFromResponse;
1032 h_updateFilterFromL1HintOrWB;
1036 transition(NP, Writeback_All_Tokens, M) {
1037 vv_allocateL2CacheBlock;
1039 q_updateTokensFromResponse;
1040 h_updateFilterFromL1HintOrWB;
1044 transition(NP, Writeback_Owned, O) {
1045 vv_allocateL2CacheBlock;
1047 q_updateTokensFromResponse;
1048 h_updateFilterFromL1HintOrWB;
1054 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1056 l_popPersistentQueue;
1059 // Transitions from Idle
1061 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1062 a_broadcastLocalRequest;
1063 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1066 o_popL1RequestQueue;
1069 transition(I, L1_GETX) {
1070 a_broadcastLocalRequest;
1071 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1074 o_popL1RequestQueue;
1077 transition(I, L2_Replacement) {
1078 c_cleanReplacement; // Only needed in some cases
1079 rr_deallocateL2CacheBlock;
1082 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1084 t_sendAckWithCollectedTokens;
1085 j_forwardTransientRequestToLocalSharers;
1090 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1092 e_sendAckWithCollectedTokens;
1093 l_popPersistentQueue;
1097 transition(I, Ack) {
1098 q_updateTokensFromResponse;
1102 transition(I, Data_Shared, S) {
1104 q_updateTokensFromResponse;
1108 transition(I, Writeback_Shared_Data, S) {
1110 q_updateTokensFromResponse;
1111 h_updateFilterFromL1HintOrWB;
1115 transition(I, Writeback_Tokens) {
1116 q_updateTokensFromResponse;
1117 h_updateFilterFromL1HintOrWB;
1121 transition(I, Data_Owner, O) {
1123 q_updateTokensFromResponse;
1127 transition(I, Writeback_Owned, O) {
1129 q_updateTokensFromResponse;
1130 h_updateFilterFromL1HintOrWB;
1134 transition(I, Data_All_Tokens, M) {
1136 q_updateTokensFromResponse;
1141 transition(I, Writeback_All_Tokens, M) {
1143 q_updateTokensFromResponse;
1144 h_updateFilterFromL1HintOrWB;
1148 // Transitions from Shared
1150 transition(S, L2_Replacement, I) {
1152 rr_deallocateL2CacheBlock;
1155 transition(S, Transient_GETX, I) {
1157 t_sendAckWithCollectedTokens;
1158 j_forwardTransientRequestToLocalSharers;
1162 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1163 j_forwardTransientRequestToLocalSharers;
1168 transition(S, Persistent_GETX, I_L) {
1169 e_sendAckWithCollectedTokens;
1170 l_popPersistentQueue;
1174 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1175 f_sendAckWithAllButOneTokens;
1176 l_popPersistentQueue;
1180 transition(S, Ack) {
1181 q_updateTokensFromResponse;
1185 transition(S, Data_Shared) {
1186 w_assertIncomingDataAndCacheDataMatch;
1187 q_updateTokensFromResponse;
1191 transition(S, Writeback_Tokens) {
1192 q_updateTokensFromResponse;
1193 h_updateFilterFromL1HintOrWB;
1197 transition(S, Writeback_Shared_Data) {
1198 w_assertIncomingDataAndCacheDataMatch;
1199 q_updateTokensFromResponse;
1200 h_updateFilterFromL1HintOrWB;
1205 transition(S, Data_Owner, O) {
1206 w_assertIncomingDataAndCacheDataMatch;
1207 q_updateTokensFromResponse;
1211 transition(S, Writeback_Owned, O) {
1212 w_assertIncomingDataAndCacheDataMatch;
1213 q_updateTokensFromResponse;
1214 h_updateFilterFromL1HintOrWB;
1218 transition(S, Data_All_Tokens, M) {
1219 w_assertIncomingDataAndCacheDataMatch;
1220 q_updateTokensFromResponse;
1224 transition(S, Writeback_All_Tokens, M) {
1225 w_assertIncomingDataAndCacheDataMatch;
1226 q_updateTokensFromResponse;
1227 h_updateFilterFromL1HintOrWB;
1231 transition(S, L1_GETX, I) {
1232 a_broadcastLocalRequest;
1233 tt_sendLocalAckWithCollectedTokens;
1237 o_popL1RequestQueue;
1241 transition(S, L1_GETS) {
1242 k_dataFromL2CacheToL1Requestor;
1246 o_popL1RequestQueue;
1249 transition(S, L1_GETS_Last_Token, I) {
1251 k_dataFromL2CacheToL1Requestor;
1255 o_popL1RequestQueue;
1258 // Transitions from Owned
1260 transition(O, L2_Replacement, I) {
1261 cc_dirtyReplacement;
1262 rr_deallocateL2CacheBlock;
1265 transition(O, Transient_GETX, I) {
1267 dd_sendDataWithAllTokens;
1268 j_forwardTransientRequestToLocalSharers;
1272 transition(O, Persistent_GETX, I_L) {
1273 ee_sendDataWithAllTokens;
1274 l_popPersistentQueue;
1277 transition(O, Persistent_GETS, S_L) {
1278 ff_sendDataWithAllButOneTokens;
1279 l_popPersistentQueue;
1282 transition(O, Persistent_GETS_Last_Token, I_L) {
1283 fa_sendDataWithAllTokens;
1284 l_popPersistentQueue;
1287 transition(O, Transient_GETS) {
1288 // send multiple tokens
1290 d_sendDataWithTokens;
1294 transition(O, Transient_GETS_Last_Token) {
1295 // WAIT FOR IT TO GO PERSISTENT
1300 transition(O, Ack) {
1301 q_updateTokensFromResponse;
1305 transition(O, Ack_All_Tokens, M) {
1306 q_updateTokensFromResponse;
1310 transition(O, Data_Shared) {
1311 w_assertIncomingDataAndCacheDataMatch;
1312 q_updateTokensFromResponse;
1317 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1318 w_assertIncomingDataAndCacheDataMatch;
1319 q_updateTokensFromResponse;
1320 h_updateFilterFromL1HintOrWB;
1324 transition(O, Data_All_Tokens, M) {
1325 w_assertIncomingDataAndCacheDataMatch;
1326 q_updateTokensFromResponse;
1330 transition(O, Writeback_All_Tokens, M) {
1331 w_assertIncomingDataAndCacheDataMatch;
1332 q_updateTokensFromResponse;
1333 h_updateFilterFromL1HintOrWB;
1337 transition(O, L1_GETS) {
1338 k_dataFromL2CacheToL1Requestor;
1342 o_popL1RequestQueue;
1345 transition(O, L1_GETS_Last_Token, I) {
1346 k_dataOwnerFromL2CacheToL1Requestor;
1350 o_popL1RequestQueue;
1353 transition(O, L1_GETX, I) {
1354 a_broadcastLocalRequest;
1355 k_dataAndAllTokensFromL2CacheToL1Requestor;
1359 o_popL1RequestQueue;
1362 // Transitions from M
1364 transition(M, L2_Replacement, I) {
1365 cc_dirtyReplacement;
1366 rr_deallocateL2CacheBlock;
1369 // MRM_DEBUG: Give up all tokens even for GETS? ???
1370 transition(M, {Transient_GETX, Transient_GETS}, I) {
1372 dd_sendDataWithAllTokens;
1376 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1377 ee_sendDataWithAllTokens;
1378 l_popPersistentQueue;
1382 transition(M, L1_GETS, O) {
1383 k_dataFromL2CacheToL1Requestor;
1387 o_popL1RequestQueue;
1390 transition(M, L1_GETX, I) {
1391 k_dataAndAllTokensFromL2CacheToL1Requestor;
1395 o_popL1RequestQueue;
1399 //Transitions from locked states
1401 transition({I_L, S_L}, Ack) {
1402 gg_bounceResponseToStarver;
1406 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1407 gg_bounceResponseToStarver;
1411 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1412 gg_bounceWBSharedToStarver;
1413 h_updateFilterFromL1HintOrWB;
1417 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1418 gg_bounceWBOwnedToStarver;
1419 h_updateFilterFromL1HintOrWB;
1423 transition(S_L, L2_Replacement, I) {
1425 rr_deallocateL2CacheBlock;
1428 transition(I_L, L2_Replacement, I) {
1429 rr_deallocateL2CacheBlock;
1432 transition(I_L, Own_Lock_or_Unlock, I) {
1433 l_popPersistentQueue;
1436 transition(S_L, Own_Lock_or_Unlock, S) {
1437 l_popPersistentQueue;
1440 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1445 transition(I_L, {L1_GETX, L1_GETS}) {
1446 a_broadcastLocalRequest;
1449 o_popL1RequestQueue;
1452 transition(S_L, L1_GETX, I_L) {
1453 a_broadcastLocalRequest;
1454 tt_sendLocalAckWithCollectedTokens;
1458 o_popL1RequestQueue;
1461 transition(S_L, L1_GETS) {
1462 k_dataFromL2CacheToL1Requestor;
1466 o_popL1RequestQueue;
1469 transition(S_L, L1_GETS_Last_Token, I_L) {
1470 k_dataFromL2CacheToL1Requestor;
1474 o_popL1RequestQueue;
1477 transition(S_L, Persistent_GETX, I_L) {
1478 e_sendAckWithCollectedTokens;
1479 l_popPersistentQueue;
1482 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1483 l_popPersistentQueue;
1486 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1487 l_popPersistentQueue;