2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * $Id: MOESI_CMP_token-L1cache.sm 1.22 05/01/19 15:55:39-06:00 beckmann@s0-28.cs.wisc.edu $
34 machine(L1Cache, "Token protocol")
35 : Sequencer * sequencer;
36 CacheMemory * L1Icache;
37 CacheMemory * L1Dcache;
38 int l2_select_num_bits;
41 Cycles l1_request_latency := 2;
42 Cycles l1_response_latency := 2;
43 int retry_threshold := 1;
44 Cycles fixed_timeout_latency := 100;
45 Cycles reissue_wakeup_latency := 10;
46 Cycles use_timeout_latency := 50;
48 bool dynamic_timeout_enabled := "True";
49 bool no_mig_atomic := "True";
53 // From this node's L1 cache TO the network
55 // a local L1 -> this L2 bank
56 MessageBuffer * responseFromL1Cache, network="To", virtual_network="4",
58 MessageBuffer * persistentFromL1Cache, network="To", virtual_network="3",
59 vnet_type="persistent";
60 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
61 MessageBuffer * requestFromL1Cache, network="To", virtual_network="1",
64 // To this node's L1 cache FROM the network
66 // a L2 bank -> this L1
67 MessageBuffer * responseToL1Cache, network="From", virtual_network="4",
69 MessageBuffer * persistentToL1Cache, network="From", virtual_network="3",
70 vnet_type="persistent";
71 // a L2 bank -> this L1
72 MessageBuffer * requestToL1Cache, network="From", virtual_network="1",
75 MessageBuffer * mandatoryQueue;
78 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
80 NP, AccessPermission:Invalid, "NP", desc="Not Present";
81 I, AccessPermission:Invalid, "I", desc="Idle";
82 S, AccessPermission:Read_Only, "S", desc="Shared";
83 O, AccessPermission:Read_Only, "O", desc="Owned";
84 M, AccessPermission:Read_Only, "M", desc="Modified (dirty)";
85 MM, AccessPermission:Read_Write, "MM", desc="Modified (dirty and locally modified)";
86 M_W, AccessPermission:Read_Only, "M^W", desc="Modified (dirty), waiting";
87 MM_W, AccessPermission:Read_Write, "MM^W", desc="Modified (dirty and locally modified), waiting";
90 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
91 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
92 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
93 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
96 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
97 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
98 IM_L, AccessPermission:Busy, "IM^L", desc="Invalid, Locked, trying to go to Modified";
99 SM_L, AccessPermission:Busy, "SM^L", desc="Shared, Locked, trying to go to Modified";
100 IS_L, AccessPermission:Busy, "IS^L", desc="Invalid, Locked, trying to go to Shared";
104 enumeration(Event, desc="Cache events") {
105 Load, desc="Load request from the processor";
106 Ifetch, desc="I-fetch request from the processor";
107 Store, desc="Store request from the processor";
108 Atomic, desc="Atomic request from the processor";
109 L1_Replacement, desc="L1 Replacement";
112 Data_Shared, desc="Received a data message, we are now a sharer";
113 Data_Owner, desc="Received a data message, we are now the owner";
114 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
115 Ack, desc="Received an ack message";
116 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
119 Transient_GETX, desc="A GetX from another processor";
120 Transient_Local_GETX, desc="A GetX from another processor";
121 Transient_GETS, desc="A GetS from another processor";
122 Transient_Local_GETS, desc="A GetS from another processor";
123 Transient_GETS_Last_Token, desc="A GetS from another processor";
124 Transient_Local_GETS_Last_Token, desc="A GetS from another processor";
126 // Lock/Unlock for distributed
127 Persistent_GETX, desc="Another processor has priority to read/write";
128 Persistent_GETS, desc="Another processor has priority to read";
129 Persistent_GETS_Last_Token, desc="Another processor has priority to read, no more tokens";
130 Own_Lock_or_Unlock, desc="This processor now has priority";
133 Request_Timeout, desc="Timeout";
134 Use_TimeoutStarverX, desc="Timeout";
135 Use_TimeoutStarverS, desc="Timeout";
136 Use_TimeoutNoStarvers, desc="Timeout";
137 Use_TimeoutNoStarvers_NoMig, desc="Timeout Don't Migrate";
143 structure(Entry, desc="...", interface="AbstractCacheEntry") {
144 State CacheState, desc="cache state";
145 bool Dirty, desc="Is the data dirty (different than memory)?";
146 int Tokens, desc="The number of tokens we're holding for the line";
147 DataBlock DataBlk, desc="data for the block";
152 structure(TBE, desc="...") {
153 Addr addr, desc="Physical address for this TBE";
154 State TBEState, desc="Transient state";
155 int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
156 Addr PC, desc="Program counter of request";
158 bool WentPersistent, default="false", desc="Request went persistent";
159 bool ExternalResponse, default="false", desc="Response came from an external controller";
160 bool IsAtomic, default="false", desc="Request was an atomic request";
162 AccessType TypeOfAccess, desc="Type of request (used for profiling)";
163 Cycles IssueTime, desc="Time the request was issued";
164 RubyAccessMode AccessMode, desc="user/supervisor access type";
165 PrefetchBit Prefetch, desc="Is this a prefetch request";
168 structure(TBETable, external="yes") {
171 void deallocate(Addr);
172 bool isPresent(Addr);
175 structure(PersistentTable, external="yes") {
176 void persistentRequestLock(Addr, MachineID, AccessType);
177 void persistentRequestUnlock(Addr, MachineID);
178 bool okToIssueStarving(Addr, MachineID);
179 MachineID findSmallest(Addr);
180 AccessType typeOfSmallest(Addr);
181 void markEntries(Addr);
183 int countStarvingForAddress(Addr);
184 int countReadStarvingForAddress(Addr);
188 Tick cyclesToTicks(Cycles c);
189 void set_cache_entry(AbstractCacheEntry b);
190 void unset_cache_entry();
193 void wakeUpAllBuffers();
194 void wakeUpBuffers(Addr a);
197 TBETable L1_TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
199 bool starving, default="false";
200 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
202 PersistentTable persistentTable;
203 TimerTable useTimerTable;
204 TimerTable reissueTimerTable;
206 int outstandingRequests, default="0";
207 int outstandingPersistentRequests, default="0";
209 // Constant that provides hysteresis for calculated the estimated average
210 int averageLatencyHysteresis, default="(8)";
211 Cycles averageLatencyCounter,
212 default="(Cycles(500) << (*m_averageLatencyHysteresis_ptr))";
214 Cycles averageLatencyEstimate() {
215 DPRINTF(RubySlicc, "%d\n",
216 (averageLatencyCounter >> averageLatencyHysteresis));
217 return averageLatencyCounter >> averageLatencyHysteresis;
220 void updateAverageLatencyEstimate(Cycles latency) {
221 DPRINTF(RubySlicc, "%d\n", latency);
223 // By subtracting the current average and then adding the most
224 // recent sample, we calculate an estimate of the recent average.
225 // If we simply used a running sum and divided by the total number
226 // of entries, the estimate of the average would adapt very slowly
227 // after the execution has run for a long time.
228 // averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
230 averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
233 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
234 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
235 if(is_valid(L1Dcache_entry)) {
236 return L1Dcache_entry;
239 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
240 return L1Icache_entry;
243 void functionalRead(Addr addr, Packet *pkt) {
244 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
247 int functionalWrite(Addr addr, Packet *pkt) {
248 int num_functional_writes := 0;
249 num_functional_writes := num_functional_writes +
250 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
251 return num_functional_writes;
254 Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
255 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
256 return L1Dcache_entry;
259 Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
260 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
261 return L1Icache_entry;
264 int getTokens(Entry cache_entry) {
265 if (is_valid(cache_entry)) {
266 return cache_entry.Tokens;
271 State getState(TBE tbe, Entry cache_entry, Addr addr) {
275 } else if (is_valid(cache_entry)) {
276 return cache_entry.CacheState;
278 if (persistentTable.isLocked(addr) && (persistentTable.findSmallest(addr) != machineID)) {
279 // Not in cache, in persistent table, but this processor isn't highest priority
287 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
288 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
291 assert(state != State:I);
292 assert(state != State:S);
293 assert(state != State:O);
294 assert(state != State:MM);
295 assert(state != State:M);
296 tbe.TBEState := state;
299 if (is_valid(cache_entry)) {
300 // Make sure the token count is in range
301 assert(cache_entry.Tokens >= 0);
302 assert(cache_entry.Tokens <= max_tokens());
303 assert(cache_entry.Tokens != (max_tokens() / 2));
305 if ((state == State:I_L) ||
306 (state == State:IM_L) ||
307 (state == State:IS_L)) {
308 // Make sure we have no tokens in the "Invalid, locked" states
309 assert(cache_entry.Tokens == 0);
311 // Make sure the line is locked
312 // assert(persistentTable.isLocked(addr));
314 // But we shouldn't have highest priority for it
315 // assert(persistentTable.findSmallest(addr) != id);
317 } else if ((state == State:S_L) ||
318 (state == State:SM_L)) {
319 assert(cache_entry.Tokens >= 1);
320 assert(cache_entry.Tokens < (max_tokens() / 2));
322 // Make sure the line is locked...
323 // assert(persistentTable.isLocked(addr));
325 // ...But we shouldn't have highest priority for it...
326 // assert(persistentTable.findSmallest(addr) != id);
328 // ...And it must be a GETS request
329 // assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);
333 // If there is an entry in the persistent table of this block,
334 // this processor needs to have an entry in the table for this
335 // block, and that entry better be the smallest (highest
336 // priority). Otherwise, the state should have been one of
339 //if (persistentTable.isLocked(addr)) {
340 // assert(persistentTable.findSmallest(addr) == id);
344 // in M and E you have all the tokens
345 if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
346 assert(cache_entry.Tokens == max_tokens());
349 // in NP you have no tokens
350 if (state == State:NP) {
351 assert(cache_entry.Tokens == 0);
354 // You have at least one token in S-like states
355 if (state == State:S || state == State:SM) {
356 assert(cache_entry.Tokens > 0);
359 // You have at least half the token in O-like states
360 if (state == State:O && state == State:OM) {
361 assert(cache_entry.Tokens > (max_tokens() / 2));
364 cache_entry.CacheState := state;
368 AccessPermission getAccessPermission(Addr addr) {
369 TBE tbe := L1_TBEs[addr];
371 return L1Cache_State_to_permission(tbe.TBEState);
374 Entry cache_entry := getCacheEntry(addr);
375 if(is_valid(cache_entry)) {
376 return L1Cache_State_to_permission(cache_entry.CacheState);
379 return AccessPermission:NotPresent;
382 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
383 if (is_valid(cache_entry)) {
384 cache_entry.changePermission(L1Cache_State_to_permission(state));
388 Event mandatory_request_type_to_event(RubyRequestType type) {
389 if (type == RubyRequestType:LD) {
391 } else if (type == RubyRequestType:IFETCH) {
393 } else if (type == RubyRequestType:ST) {
395 } else if (type == RubyRequestType:ATOMIC) {
402 error("Invalid RubyRequestType");
406 AccessType cache_request_type_to_access_type(RubyRequestType type) {
407 if ((type == RubyRequestType:LD) || (type == RubyRequestType:IFETCH)) {
408 return AccessType:Read;
409 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
410 return AccessType:Write;
412 error("Invalid RubyRequestType");
416 // NOTE: direct local hits should not call this function
417 bool isExternalHit(Addr addr, MachineID sender) {
418 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
420 } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
422 if (sender == mapAddressToRange(addr, MachineType:L2Cache,
423 l2_select_low_bit, l2_select_num_bits, intToID(0))) {
433 bool okToIssueStarving(Addr addr, MachineID machineID) {
434 return persistentTable.okToIssueStarving(addr, machineID);
437 void markPersistentEntries(Addr addr) {
438 persistentTable.markEntries(addr);
441 void setExternalResponse(TBE tbe) {
442 assert(is_valid(tbe));
443 tbe.ExternalResponse := true;
446 bool IsAtomic(TBE tbe) {
447 assert(is_valid(tbe));
452 out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
453 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
454 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
455 out_port(requestRecycle_out, RequestMsg, requestToL1Cache);
460 in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
461 if (useTimerTable_in.isReady(clockEdge())) {
462 Addr readyAddress := useTimerTable.nextAddress();
463 TBE tbe := L1_TBEs.lookup(readyAddress);
465 if (persistentTable.isLocked(readyAddress) &&
466 (persistentTable.findSmallest(readyAddress) != machineID)) {
467 if (persistentTable.typeOfSmallest(readyAddress) == AccessType:Write) {
468 trigger(Event:Use_TimeoutStarverX, readyAddress,
469 getCacheEntry(readyAddress), tbe);
471 trigger(Event:Use_TimeoutStarverS, readyAddress,
472 getCacheEntry(readyAddress), tbe);
475 if (no_mig_atomic && IsAtomic(tbe)) {
476 trigger(Event:Use_TimeoutNoStarvers_NoMig, readyAddress,
477 getCacheEntry(readyAddress), tbe);
479 trigger(Event:Use_TimeoutNoStarvers, readyAddress,
480 getCacheEntry(readyAddress), tbe);
487 in_port(reissueTimerTable_in, Addr, reissueTimerTable, rank=4) {
488 Tick current_time := clockEdge();
489 if (reissueTimerTable_in.isReady(current_time)) {
490 Addr addr := reissueTimerTable.nextAddress();
491 trigger(Event:Request_Timeout, addr, getCacheEntry(addr),
492 L1_TBEs.lookup(addr));
496 // Persistent Network
497 in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
498 if (persistentNetwork_in.isReady(clockEdge())) {
499 peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
500 assert(in_msg.Destination.isElement(machineID));
502 // Apply the lockdown or unlockdown message to the table
503 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
504 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
505 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
506 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
507 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
508 persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
510 error("Unexpected message");
513 // React to the message based on the current state of the table
514 Entry cache_entry := getCacheEntry(in_msg.addr);
515 TBE tbe := L1_TBEs[in_msg.addr];
517 if (persistentTable.isLocked(in_msg.addr)) {
518 if (persistentTable.findSmallest(in_msg.addr) == machineID) {
519 // Our Own Lock - this processor is highest priority
520 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
523 if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
524 if (getTokens(cache_entry) == 1 ||
525 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
526 trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
529 trigger(Event:Persistent_GETS, in_msg.addr,
533 trigger(Event:Persistent_GETX, in_msg.addr,
538 // Unlock case - no entries in the table
539 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
547 in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
548 if (responseNetwork_in.isReady(clockEdge())) {
549 peek(responseNetwork_in, ResponseMsg, block_on="addr") {
550 assert(in_msg.Destination.isElement(machineID));
552 Entry cache_entry := getCacheEntry(in_msg.addr);
553 TBE tbe := L1_TBEs[in_msg.addr];
555 // Mark TBE flag if response received off-chip. Use this to update average latency estimate
556 if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
558 if (in_msg.Sender == mapAddressToRange(in_msg.addr,
559 MachineType:L2Cache, l2_select_low_bit,
560 l2_select_num_bits, intToID(0))) {
562 // came from an off-chip L2 cache
564 // L1_TBEs[in_msg.addr].ExternalResponse := true;
565 // profile_offchipL2_response(in_msg.addr);
569 // profile_onchipL2_response(in_msg.addr );
571 } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:Directory ) {
573 setExternalResponse(tbe);
574 // profile_memory_response( in_msg.addr);
576 } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
577 //if (isLocalProcessor(machineID, in_msg.Sender) == false) {
578 //if (is_valid(tbe)) {
579 // tbe.ExternalResponse := true;
580 // profile_offchipL1_response(in_msg.addr );
584 // profile_onchipL1_response(in_msg.addr );
587 error("unexpected SenderMachine");
591 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
592 if (in_msg.Type == CoherenceResponseType:ACK) {
593 assert(in_msg.Tokens < (max_tokens() / 2));
594 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
595 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
596 trigger(Event:Data_Owner, in_msg.addr, cache_entry, tbe);
597 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
598 assert(in_msg.Tokens < (max_tokens() / 2));
599 trigger(Event:Data_Shared, in_msg.addr, cache_entry, tbe);
601 error("Unexpected message");
604 if (in_msg.Type == CoherenceResponseType:ACK) {
605 assert(in_msg.Tokens < (max_tokens() / 2));
606 trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry, tbe);
607 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
608 trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry, tbe);
610 error("Unexpected message");
618 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
619 if (requestNetwork_in.isReady(clockEdge())) {
620 peek(requestNetwork_in, RequestMsg, block_on="addr") {
621 assert(in_msg.Destination.isElement(machineID));
623 Entry cache_entry := getCacheEntry(in_msg.addr);
624 TBE tbe := L1_TBEs[in_msg.addr];
626 if (in_msg.Type == CoherenceRequestType:GETX) {
627 if (in_msg.isLocal) {
628 trigger(Event:Transient_Local_GETX, in_msg.addr,
632 trigger(Event:Transient_GETX, in_msg.addr,
635 } else if (in_msg.Type == CoherenceRequestType:GETS) {
636 if (getTokens(cache_entry) == 1 ||
637 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
638 if (in_msg.isLocal) {
639 trigger(Event:Transient_Local_GETS_Last_Token, in_msg.addr,
643 trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
648 if (in_msg.isLocal) {
649 trigger(Event:Transient_Local_GETS, in_msg.addr,
653 trigger(Event:Transient_GETS, in_msg.addr,
658 error("Unexpected message");
665 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
666 if (mandatoryQueue_in.isReady(clockEdge())) {
667 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
668 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
670 TBE tbe := L1_TBEs[in_msg.LineAddress];
672 if (in_msg.Type == RubyRequestType:IFETCH) {
673 // ** INSTRUCTION ACCESS ***
675 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
676 if (is_valid(L1Icache_entry)) {
677 // The tag matches for the L1, so the L1 fetches the line.
678 // We know it can't be in the L2 due to exclusion.
679 trigger(mandatory_request_type_to_event(in_msg.Type),
680 in_msg.LineAddress, L1Icache_entry, tbe);
683 // Check to see if it is in the OTHER L1
684 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
685 if (is_valid(L1Dcache_entry)) {
686 // The block is in the wrong L1, try to write it to the L2
687 trigger(Event:L1_Replacement, in_msg.LineAddress,
688 L1Dcache_entry, tbe);
691 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
692 // L1 does't have the line, but we have space for it in the L1
693 trigger(mandatory_request_type_to_event(in_msg.Type),
694 in_msg.LineAddress, L1Icache_entry, tbe);
696 // No room in the L1, so we need to make room
697 trigger(Event:L1_Replacement,
698 L1Icache.cacheProbe(in_msg.LineAddress),
699 getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
700 L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
704 // *** DATA ACCESS ***
706 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
707 if (is_valid(L1Dcache_entry)) {
708 // The tag matches for the L1, so the L1 fetches the line.
709 // We know it can't be in the L2 due to exclusion.
710 trigger(mandatory_request_type_to_event(in_msg.Type),
711 in_msg.LineAddress, L1Dcache_entry, tbe);
714 // Check to see if it is in the OTHER L1
715 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
716 if (is_valid(L1Icache_entry)) {
717 // The block is in the wrong L1, try to write it to the L2
718 trigger(Event:L1_Replacement, in_msg.LineAddress,
719 L1Icache_entry, tbe);
722 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
723 // L1 does't have the line, but we have space for it in the L1
724 trigger(mandatory_request_type_to_event(in_msg.Type),
725 in_msg.LineAddress, L1Dcache_entry, tbe);
727 // No room in the L1, so we need to make room
728 trigger(Event:L1_Replacement,
729 L1Dcache.cacheProbe(in_msg.LineAddress),
730 getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
731 L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
741 action(a_issueReadRequest, "a", desc="Issue GETS") {
742 assert(is_valid(tbe));
743 if (tbe.IssueCount == 0) {
744 // Update outstanding requests
745 //profile_outstanding_request(outstandingRequests);
746 outstandingRequests := outstandingRequests + 1;
749 if (tbe.IssueCount >= retry_threshold) {
750 // Issue a persistent request if possible
751 if (okToIssueStarving(address, machineID) && (starving == false)) {
752 enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
753 out_msg.addr := address;
754 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
755 out_msg.Requestor := machineID;
756 out_msg.Destination.broadcast(MachineType:L1Cache);
759 // Currently the configuration system limits the system to only one
760 // chip. Therefore, if we assume one shared L2 cache, then only one
761 // pertinent L2 cache exist.
763 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
765 out_msg.Destination.add(mapAddressToRange(address,
766 MachineType:L2Cache, l2_select_low_bit,
767 l2_select_num_bits, intToID(0)));
769 out_msg.Destination.add(map_Address_to_Directory(address));
770 out_msg.MessageSize := MessageSizeType:Persistent_Control;
771 out_msg.Prefetch := tbe.Prefetch;
772 out_msg.AccessMode := tbe.AccessMode;
774 markPersistentEntries(address);
777 if (tbe.IssueCount == 0) {
778 //profile_persistent_prediction(address, tbe.TypeOfAccess);
781 // Update outstanding requests
782 //profile_outstanding_persistent_request(outstandingPersistentRequests);
783 outstandingPersistentRequests := outstandingPersistentRequests + 1;
785 // Increment IssueCount
786 tbe.IssueCount := tbe.IssueCount + 1;
788 tbe.WentPersistent := true;
790 // Do not schedule a wakeup, a persistent requests will always complete
794 // We'd like to issue a persistent request, but are not allowed
795 // to issue a P.R. right now. This, we do not increment the
798 // Set a wakeup timer
799 reissueTimerTable.set(
800 address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
804 // Make a normal request
805 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
806 out_msg.addr := address;
807 out_msg.Type := CoherenceRequestType:GETS;
808 out_msg.Requestor := machineID;
809 out_msg.Destination.add(mapAddressToRange(address,
810 MachineType:L2Cache, l2_select_low_bit,
811 l2_select_num_bits, intToID(0)));
813 out_msg.RetryNum := tbe.IssueCount;
814 if (tbe.IssueCount == 0) {
815 out_msg.MessageSize := MessageSizeType:Request_Control;
817 out_msg.MessageSize := MessageSizeType:Reissue_Control;
819 out_msg.Prefetch := tbe.Prefetch;
820 out_msg.AccessMode := tbe.AccessMode;
823 // send to other local L1s, with local bit set
824 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
825 out_msg.addr := address;
826 out_msg.Type := CoherenceRequestType:GETS;
827 out_msg.Requestor := machineID;
829 // Since only one chip, assuming all L1 caches are local
831 //out_msg.Destination := getOtherLocalL1IDs(machineID);
832 out_msg.Destination.broadcast(MachineType:L1Cache);
833 out_msg.Destination.remove(machineID);
835 out_msg.RetryNum := tbe.IssueCount;
836 out_msg.isLocal := true;
837 if (tbe.IssueCount == 0) {
838 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
840 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
842 out_msg.Prefetch := tbe.Prefetch;
843 out_msg.AccessMode := tbe.AccessMode;
846 // Increment IssueCount
847 tbe.IssueCount := tbe.IssueCount + 1;
849 // Set a wakeup timer
851 if (dynamic_timeout_enabled) {
852 reissueTimerTable.set(
853 address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
855 reissueTimerTable.set(
856 address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
862 action(b_issueWriteRequest, "b", desc="Issue GETX") {
864 assert(is_valid(tbe));
865 if (tbe.IssueCount == 0) {
866 // Update outstanding requests
867 //profile_outstanding_request(outstandingRequests);
868 outstandingRequests := outstandingRequests + 1;
871 if (tbe.IssueCount >= retry_threshold) {
872 // Issue a persistent request if possible
873 if ( okToIssueStarving(address, machineID) && (starving == false)) {
874 enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
875 out_msg.addr := address;
876 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
877 out_msg.Requestor := machineID;
878 out_msg.Destination.broadcast(MachineType:L1Cache);
881 // Currently the configuration system limits the system to only one
882 // chip. Therefore, if we assume one shared L2 cache, then only one
883 // pertinent L2 cache exist.
885 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
887 out_msg.Destination.add(mapAddressToRange(address,
888 MachineType:L2Cache, l2_select_low_bit,
889 l2_select_num_bits, intToID(0)));
891 out_msg.Destination.add(map_Address_to_Directory(address));
892 out_msg.MessageSize := MessageSizeType:Persistent_Control;
893 out_msg.Prefetch := tbe.Prefetch;
894 out_msg.AccessMode := tbe.AccessMode;
896 markPersistentEntries(address);
899 // Update outstanding requests
900 //profile_outstanding_persistent_request(outstandingPersistentRequests);
901 outstandingPersistentRequests := outstandingPersistentRequests + 1;
903 if (tbe.IssueCount == 0) {
904 //profile_persistent_prediction(address, tbe.TypeOfAccess);
907 // Increment IssueCount
908 tbe.IssueCount := tbe.IssueCount + 1;
910 tbe.WentPersistent := true;
912 // Do not schedule a wakeup, a persistent requests will always complete
916 // We'd like to issue a persistent request, but are not allowed
917 // to issue a P.R. right now. This, we do not increment the
920 // Set a wakeup timer
921 reissueTimerTable.set(
922 address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
926 // Make a normal request
927 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
928 out_msg.addr := address;
929 out_msg.Type := CoherenceRequestType:GETX;
930 out_msg.Requestor := machineID;
932 out_msg.Destination.add(mapAddressToRange(address,
933 MachineType:L2Cache, l2_select_low_bit,
934 l2_select_num_bits, intToID(0)));
936 out_msg.RetryNum := tbe.IssueCount;
938 if (tbe.IssueCount == 0) {
939 out_msg.MessageSize := MessageSizeType:Request_Control;
941 out_msg.MessageSize := MessageSizeType:Reissue_Control;
943 out_msg.Prefetch := tbe.Prefetch;
944 out_msg.AccessMode := tbe.AccessMode;
947 // send to other local L1s too
948 enqueue(requestNetwork_out, RequestMsg, l1_request_latency) {
949 out_msg.addr := address;
950 out_msg.Type := CoherenceRequestType:GETX;
951 out_msg.Requestor := machineID;
952 out_msg.isLocal := true;
955 // Since only one chip, assuming all L1 caches are local
957 //out_msg.Destination := getOtherLocalL1IDs(machineID);
958 out_msg.Destination.broadcast(MachineType:L1Cache);
959 out_msg.Destination.remove(machineID);
961 out_msg.RetryNum := tbe.IssueCount;
962 if (tbe.IssueCount == 0) {
963 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
965 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
967 out_msg.Prefetch := tbe.Prefetch;
968 out_msg.AccessMode := tbe.AccessMode;
971 // Increment IssueCount
972 tbe.IssueCount := tbe.IssueCount + 1;
974 DPRINTF(RubySlicc, "incremented issue count to %d\n",
977 // Set a wakeup timer
978 if (dynamic_timeout_enabled) {
979 reissueTimerTable.set(
980 address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
982 reissueTimerTable.set(
983 address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
988 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
989 peek(responseNetwork_in, ResponseMsg) {
990 // FIXME, should use a 3rd vnet
991 enqueue(responseNetwork_out, ResponseMsg, 1) {
992 out_msg.addr := address;
993 out_msg.Type := in_msg.Type;
994 out_msg.Sender := machineID;
995 out_msg.Destination.add(map_Address_to_Directory(address));
996 out_msg.Tokens := in_msg.Tokens;
997 out_msg.MessageSize := in_msg.MessageSize;
998 out_msg.DataBlk := in_msg.DataBlk;
999 out_msg.Dirty := in_msg.Dirty;
1004 action(c_ownedReplacement, "c", desc="Issue writeback") {
1005 assert(is_valid(cache_entry));
1006 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1007 out_msg.addr := address;
1008 out_msg.Sender := machineID;
1010 out_msg.Destination.add(mapAddressToRange(address,
1011 MachineType:L2Cache, l2_select_low_bit,
1012 l2_select_num_bits, intToID(0)));
1014 out_msg.Tokens := cache_entry.Tokens;
1015 out_msg.DataBlk := cache_entry.DataBlk;
1016 out_msg.Dirty := cache_entry.Dirty;
1017 out_msg.Type := CoherenceResponseType:WB_OWNED;
1019 // always send the data?
1020 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1022 cache_entry.Tokens := 0;
1025 action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
1027 // don't send writeback if replacing block with no tokens
1028 assert(is_valid(cache_entry));
1029 assert (cache_entry.Tokens > 0);
1030 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1031 out_msg.addr := address;
1032 out_msg.Sender := machineID;
1034 out_msg.Destination.add(mapAddressToRange(address,
1035 MachineType:L2Cache, l2_select_low_bit,
1036 l2_select_num_bits, intToID(0)));
1038 out_msg.Tokens := cache_entry.Tokens;
1039 out_msg.DataBlk := cache_entry.DataBlk;
1040 // assert(cache_entry.Dirty == false);
1041 out_msg.Dirty := false;
1043 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1044 out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
1046 cache_entry.Tokens := 0;
1049 action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
1050 assert(is_valid(cache_entry));
1051 if (cache_entry.Tokens > 0) {
1052 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1053 out_msg.addr := address;
1054 out_msg.Sender := machineID;
1056 out_msg.Destination.add(mapAddressToRange(address,
1057 MachineType:L2Cache, l2_select_low_bit,
1058 l2_select_num_bits, intToID(0)));
1060 out_msg.Tokens := cache_entry.Tokens;
1061 out_msg.DataBlk := cache_entry.DataBlk;
1062 // assert(cache_entry.Dirty == false);
1063 out_msg.Dirty := false;
1065 // always send the data?
1066 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1067 out_msg.Type := CoherenceResponseType:WB_TOKENS;
1070 cache_entry.Tokens := 0;
1074 action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
1075 assert(is_valid(cache_entry));
1076 peek(requestNetwork_in, RequestMsg) {
1077 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1078 out_msg.addr := address;
1079 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1080 out_msg.Sender := machineID;
1081 out_msg.Destination.add(in_msg.Requestor);
1082 out_msg.Tokens := 1;
1083 out_msg.DataBlk := cache_entry.DataBlk;
1084 // out_msg.Dirty := cache_entry.Dirty;
1085 out_msg.Dirty := false;
1086 if (in_msg.isLocal) {
1087 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1089 out_msg.MessageSize := MessageSizeType:Response_Data;
1093 cache_entry.Tokens := cache_entry.Tokens - 1;
1094 assert(cache_entry.Tokens >= 1);
1097 action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
1098 assert(is_valid(cache_entry));
1099 peek(requestNetwork_in, RequestMsg) {
1100 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
1101 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1102 out_msg.addr := address;
1103 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1104 out_msg.Sender := machineID;
1105 out_msg.Destination.add(in_msg.Requestor);
1106 out_msg.Tokens := N_tokens;
1107 out_msg.DataBlk := cache_entry.DataBlk;
1108 // out_msg.Dirty := cache_entry.Dirty;
1109 out_msg.Dirty := false;
1110 if (in_msg.isLocal) {
1111 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1113 out_msg.MessageSize := MessageSizeType:Response_Data;
1116 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
1118 else if (cache_entry.Tokens > 1) {
1119 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1120 out_msg.addr := address;
1121 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1122 out_msg.Sender := machineID;
1123 out_msg.Destination.add(in_msg.Requestor);
1124 out_msg.Tokens := 1;
1125 out_msg.DataBlk := cache_entry.DataBlk;
1126 // out_msg.Dirty := cache_entry.Dirty;
1127 out_msg.Dirty := false;
1128 if (in_msg.isLocal) {
1129 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1131 out_msg.MessageSize := MessageSizeType:Response_Data;
1134 cache_entry.Tokens := cache_entry.Tokens - 1;
1137 // assert(cache_entry.Tokens >= 1);
1140 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
1141 peek(requestNetwork_in, RequestMsg) {
1142 assert(is_valid(cache_entry));
1143 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1144 out_msg.addr := address;
1145 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1146 out_msg.Sender := machineID;
1147 out_msg.Destination.add(in_msg.Requestor);
1148 assert(cache_entry.Tokens > (max_tokens() / 2));
1149 out_msg.Tokens := cache_entry.Tokens;
1150 out_msg.DataBlk := cache_entry.DataBlk;
1151 out_msg.Dirty := cache_entry.Dirty;
1152 if (in_msg.isLocal) {
1153 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1155 out_msg.MessageSize := MessageSizeType:Response_Data;
1159 cache_entry.Tokens := 0;
1162 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
1163 // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1164 assert(is_valid(cache_entry));
1165 if (cache_entry.Tokens > 0) {
1166 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1167 out_msg.addr := address;
1168 if (cache_entry.Tokens > (max_tokens() / 2)) {
1169 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1171 out_msg.Type := CoherenceResponseType:ACK;
1173 out_msg.Sender := machineID;
1174 out_msg.Destination.add(persistentTable.findSmallest(address));
1175 assert(cache_entry.Tokens >= 1);
1176 out_msg.Tokens := cache_entry.Tokens;
1177 out_msg.DataBlk := cache_entry.DataBlk;
1178 out_msg.MessageSize := MessageSizeType:Response_Control;
1181 cache_entry.Tokens := 0;
1184 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
1185 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1186 assert(is_valid(cache_entry));
1187 assert(cache_entry.Tokens > 0);
1188 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1189 out_msg.addr := address;
1190 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1191 out_msg.Sender := machineID;
1192 out_msg.Destination.add(persistentTable.findSmallest(address));
1193 assert(cache_entry.Tokens > (max_tokens() / 2));
1194 out_msg.Tokens := cache_entry.Tokens;
1195 out_msg.DataBlk := cache_entry.DataBlk;
1196 out_msg.Dirty := cache_entry.Dirty;
1197 out_msg.MessageSize := MessageSizeType:Response_Data;
1199 cache_entry.Tokens := 0;
1202 action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
1203 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1204 assert(is_valid(cache_entry));
1205 assert(cache_entry.Tokens > 0);
1206 if (cache_entry.Tokens > 1) {
1207 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1208 out_msg.addr := address;
1209 if (cache_entry.Tokens > (max_tokens() / 2)) {
1210 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1212 out_msg.Type := CoherenceResponseType:ACK;
1214 out_msg.Sender := machineID;
1215 out_msg.Destination.add(persistentTable.findSmallest(address));
1216 assert(cache_entry.Tokens >= 1);
1217 if (cache_entry.Tokens > N_tokens) {
1218 out_msg.Tokens := cache_entry.Tokens - N_tokens;
1220 out_msg.Tokens := cache_entry.Tokens - 1;
1222 out_msg.DataBlk := cache_entry.DataBlk;
1223 out_msg.MessageSize := MessageSizeType:Response_Control;
1226 if (cache_entry.Tokens > N_tokens) {
1227 cache_entry.Tokens := N_tokens;
1229 cache_entry.Tokens := 1;
1233 action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
1234 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1235 assert(is_valid(cache_entry));
1236 assert(cache_entry.Tokens > ((max_tokens() / 2) + 1));
1237 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1238 out_msg.addr := address;
1239 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1240 out_msg.Sender := machineID;
1241 out_msg.Destination.add(persistentTable.findSmallest(address));
1242 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
1243 out_msg.Tokens := cache_entry.Tokens - N_tokens;
1245 out_msg.Tokens := cache_entry.Tokens - 1;
1247 assert(out_msg.Tokens > (max_tokens() / 2));
1248 out_msg.DataBlk := cache_entry.DataBlk;
1249 out_msg.Dirty := cache_entry.Dirty;
1250 out_msg.MessageSize := MessageSizeType:Response_Data;
1252 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
1253 cache_entry.Tokens := N_tokens;
1255 cache_entry.Tokens := 1;
1259 action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
1260 assert(is_valid(cache_entry));
1261 assert(cache_entry.Tokens == ((max_tokens() / 2) + 1));
1262 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1263 out_msg.addr := address;
1264 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1265 out_msg.Sender := machineID;
1266 out_msg.Destination.add(persistentTable.findSmallest(address));
1267 out_msg.Tokens := cache_entry.Tokens;
1268 assert(out_msg.Tokens > (max_tokens() / 2));
1269 out_msg.DataBlk := cache_entry.DataBlk;
1270 out_msg.Dirty := cache_entry.Dirty;
1271 out_msg.MessageSize := MessageSizeType:Response_Data;
1273 cache_entry.Tokens := 0;
1276 action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
1277 // assert(persistentTable.isLocked(address));
1279 peek(responseNetwork_in, ResponseMsg) {
1280 // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1281 // FIXME, should use a 3rd vnet in some cases
1282 enqueue(responseNetwork_out, ResponseMsg, 1) {
1283 out_msg.addr := address;
1284 out_msg.Type := in_msg.Type;
1285 out_msg.Sender := machineID;
1286 out_msg.Destination.add(persistentTable.findSmallest(address));
1287 out_msg.Tokens := in_msg.Tokens;
1288 out_msg.DataBlk := in_msg.DataBlk;
1289 out_msg.Dirty := in_msg.Dirty;
1290 out_msg.MessageSize := in_msg.MessageSize;
1295 action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
1296 assert(is_valid(cache_entry));
1297 DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
1298 address, cache_entry.DataBlk);
1300 L1Dcache.setMRU(cache_entry);
1301 sequencer.readCallback(address, cache_entry.DataBlk, false,
1302 MachineType:L1Cache);
1305 action(h_ifetch_hit, "hi", desc="Notify sequencer the load completed.") {
1306 assert(is_valid(cache_entry));
1307 DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
1308 address, cache_entry.DataBlk);
1310 L1Icache.setMRU(cache_entry);
1311 sequencer.readCallback(address, cache_entry.DataBlk, false,
1312 MachineType:L1Cache);
1315 action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
1316 assert(is_valid(cache_entry));
1317 DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
1318 address, cache_entry.DataBlk);
1319 peek(responseNetwork_in, ResponseMsg) {
1320 L1Icache.setMRU(address);
1321 L1Dcache.setMRU(address);
1322 sequencer.readCallback(address, cache_entry.DataBlk,
1323 isExternalHit(address, in_msg.Sender),
1324 machineIDToMachineType(in_msg.Sender));
1328 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
1329 assert(is_valid(cache_entry));
1330 DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
1331 address, cache_entry.DataBlk);
1333 L1Dcache.setMRU(cache_entry);
1334 sequencer.writeCallback(address, cache_entry.DataBlk, false,
1335 MachineType:L1Cache);
1336 cache_entry.Dirty := true;
1337 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
1340 action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
1341 assert(is_valid(cache_entry));
1342 DPRINTF(RubySlicc, "Address: %#x, Data Block: %s\n",
1343 address, cache_entry.DataBlk);
1344 peek(responseNetwork_in, ResponseMsg) {
1345 L1Icache.setMRU(address);
1346 L1Dcache.setMRU(address);
1347 sequencer.writeCallback(address, cache_entry.DataBlk,
1348 isExternalHit(address, in_msg.Sender),
1349 machineIDToMachineType(in_msg.Sender));
1351 cache_entry.Dirty := true;
1352 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
1355 action(i_allocateTBE, "i", desc="Allocate TBE") {
1356 check_allocate(L1_TBEs);
1357 L1_TBEs.allocate(address);
1358 set_tbe(L1_TBEs[address]);
1359 tbe.IssueCount := 0;
1360 peek(mandatoryQueue_in, RubyRequest) {
1361 tbe.PC := in_msg.ProgramCounter;
1362 tbe.TypeOfAccess := cache_request_type_to_access_type(in_msg.Type);
1363 if (in_msg.Type == RubyRequestType:ATOMIC) {
1364 tbe.IsAtomic := true;
1366 tbe.Prefetch := in_msg.Prefetch;
1367 tbe.AccessMode := in_msg.AccessMode;
1369 tbe.IssueTime := curCycle();
1372 action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
1373 peek(mandatoryQueue_in, RubyRequest) {
1374 APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
1378 action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
1379 if (reissueTimerTable.isSet(address)) {
1380 reissueTimerTable.unset(address);
1384 action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
1385 useTimerTable.unset(address);
1388 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
1389 mandatoryQueue_in.dequeue(clockEdge());
1392 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
1393 persistentNetwork_in.dequeue(clockEdge());
1396 action(m_popRequestQueue, "m", desc="Pop request queue.") {
1397 requestNetwork_in.dequeue(clockEdge());
1400 action(n_popResponseQueue, "n", desc="Pop response queue") {
1401 responseNetwork_in.dequeue(clockEdge());
1404 action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
1406 address, clockEdge() + cyclesToTicks(use_timeout_latency));
1409 action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
1410 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1411 out_msg.addr := address;
1412 out_msg.Type := CoherenceResponseType:INV;
1413 out_msg.Tokens := 0;
1414 out_msg.Sender := machineID;
1416 out_msg.Destination.add(mapAddressToRange(address,
1417 MachineType:L2Cache, l2_select_low_bit,
1418 l2_select_num_bits, intToID(0)));
1419 out_msg.MessageSize := MessageSizeType:Response_Control;
1423 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
1424 peek(responseNetwork_in, ResponseMsg) {
1425 assert(is_valid(cache_entry));
1426 assert(in_msg.Tokens != 0);
1427 DPRINTF(RubySlicc, "L1 received tokens for address: %#x, tokens: %d\n",
1428 in_msg.addr, in_msg.Tokens);
1429 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
1430 DPRINTF(RubySlicc, "%d\n", cache_entry.Tokens);
1432 if (cache_entry.Dirty == false && in_msg.Dirty) {
1433 cache_entry.Dirty := true;
1438 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1440 assert(is_valid(tbe));
1441 if (tbe.WentPersistent) {
1442 // assert(starving);
1443 outstandingRequests := outstandingRequests - 1;
1444 enqueue(persistentNetwork_out, PersistentMsg, l1_request_latency) {
1445 out_msg.addr := address;
1446 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
1447 out_msg.Requestor := machineID;
1448 out_msg.Destination.broadcast(MachineType:L1Cache);
1451 // Currently the configuration system limits the system to only one
1452 // chip. Therefore, if we assume one shared L2 cache, then only one
1453 // pertinent L2 cache exist.
1455 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
1457 out_msg.Destination.add(mapAddressToRange(address,
1458 MachineType:L2Cache, l2_select_low_bit,
1459 l2_select_num_bits, intToID(0)));
1461 out_msg.Destination.add(map_Address_to_Directory(address));
1462 out_msg.MessageSize := MessageSizeType:Persistent_Control;
1467 // Update average latency
1468 if (tbe.IssueCount <= 1) {
1469 if (tbe.ExternalResponse) {
1470 updateAverageLatencyEstimate(curCycle() - tbe.IssueTime);
1475 //if (tbe.WentPersistent) {
1476 // profile_token_retry(address, tbe.TypeOfAccess, 2);
1479 // profile_token_retry(address, tbe.TypeOfAccess, 1);
1482 //profile_token_retry(address, tbe.TypeOfAccess, tbe.IssueCount);
1483 L1_TBEs.deallocate(address);
1487 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
1488 assert(is_valid(cache_entry));
1489 if (cache_entry.Tokens > 0) {
1490 peek(requestNetwork_in, RequestMsg) {
1491 enqueue(responseNetwork_out, ResponseMsg, l1_response_latency) {
1492 out_msg.addr := address;
1493 if (cache_entry.Tokens > (max_tokens() / 2)) {
1494 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1496 out_msg.Type := CoherenceResponseType:ACK;
1498 out_msg.Sender := machineID;
1499 out_msg.Destination.add(in_msg.Requestor);
1500 assert(cache_entry.Tokens >= 1);
1501 out_msg.Tokens := cache_entry.Tokens;
1502 out_msg.DataBlk := cache_entry.DataBlk;
1503 out_msg.MessageSize := MessageSizeType:Response_Control;
1507 cache_entry.Tokens := 0;
1510 action(u_writeDataToCache, "u", desc="Write data to cache") {
1511 peek(responseNetwork_in, ResponseMsg) {
1512 assert(is_valid(cache_entry));
1513 cache_entry.DataBlk := in_msg.DataBlk;
1514 if (cache_entry.Dirty == false && in_msg.Dirty) {
1515 cache_entry.Dirty := in_msg.Dirty;
1521 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1522 assert(getTokens(cache_entry) == 0);
1523 if (L1Dcache.isTagPresent(address)) {
1524 L1Dcache.deallocate(address);
1526 L1Icache.deallocate(address);
1528 unset_cache_entry();
1531 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1532 if (is_valid(cache_entry)) {
1534 set_cache_entry(L1Dcache.allocate(address, new Entry));
1538 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
1539 if (is_valid(cache_entry)) {
1541 set_cache_entry(L1Icache.allocate(address, new Entry));
1545 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1546 if (send_evictions) {
1547 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
1548 sequencer.evictionCallback(address);
1552 action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
1553 ++L1Icache.demand_misses;
1556 action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
1557 ++L1Icache.demand_hits;
1560 action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
1561 ++L1Dcache.demand_misses;
1564 action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
1565 ++L1Dcache.demand_hits;
1568 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
1569 peek(responseNetwork_in, ResponseMsg) {
1570 assert(is_valid(cache_entry));
1571 assert(cache_entry.DataBlk == in_msg.DataBlk);
1575 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1576 peek(mandatoryQueue_in, RubyRequest) {
1577 APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
1579 stall_and_wait(mandatoryQueue_in, address);
1582 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1583 wakeUpBuffers(address);
1586 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1590 //*****************************************************
1592 //*****************************************************
1594 // Transitions for Load/Store/L2_Replacement from transient states
1595 transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L1_Replacement) {
1596 ta_traceStalledAddress;
1597 zz_stallAndWaitMandatoryQueue;
1600 transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, {Store, Atomic}) {
1601 zz_stallAndWaitMandatoryQueue;
1604 transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
1605 zz_stallAndWaitMandatoryQueue;
1609 transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
1610 l_popPersistentQueue;
1613 // Transitions from NP
1614 transition(NP, Load, IS) {
1615 ii_allocateL1DCacheBlock;
1619 k_popMandatoryQueue;
1622 transition(NP, Ifetch, IS) {
1623 pp_allocateL1ICacheBlock;
1627 k_popMandatoryQueue;
1630 transition(NP, {Store, Atomic}, IM) {
1631 ii_allocateL1DCacheBlock;
1633 b_issueWriteRequest;
1635 k_popMandatoryQueue;
1638 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1643 transition(NP, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) {
1647 transition(NP, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
1648 l_popPersistentQueue;
1651 // Transitions from Idle
1652 transition(I, Load, IS) {
1656 k_popMandatoryQueue;
1659 transition(I, Ifetch, IS) {
1663 k_popMandatoryQueue;
1666 transition(I, {Store, Atomic}, IM) {
1668 b_issueWriteRequest;
1670 k_popMandatoryQueue;
1673 transition(I, L1_Replacement) {
1674 ta_traceStalledAddress;
1675 tr_tokenReplacement;
1676 gg_deallocateL1CacheBlock;
1677 ka_wakeUpAllDependents;
1680 transition(I, {Transient_GETX, Transient_Local_GETX}) {
1681 t_sendAckWithCollectedTokens;
1685 transition(I, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
1689 transition(I, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
1690 e_sendAckWithCollectedTokens;
1691 l_popPersistentQueue;
1694 transition(I_L, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}) {
1695 l_popPersistentQueue;
1698 transition(I, Ack) {
1699 q_updateTokensFromResponse;
1703 transition(I, Data_Shared, S) {
1705 q_updateTokensFromResponse;
1709 transition(I, Data_Owner, O) {
1711 q_updateTokensFromResponse;
1715 transition(I, Data_All_Tokens, M) {
1717 q_updateTokensFromResponse;
1721 // Transitions from Shared
1722 transition({S, SM, S_L, SM_L}, Load) {
1725 k_popMandatoryQueue;
1728 transition({S, SM, S_L, SM_L}, Ifetch) {
1731 k_popMandatoryQueue;
1734 transition(S, {Store, Atomic}, SM) {
1736 b_issueWriteRequest;
1738 k_popMandatoryQueue;
1741 transition(S, L1_Replacement, I) {
1742 ta_traceStalledAddress;
1743 cc_sharedReplacement; // Only needed in some cases
1744 forward_eviction_to_cpu;
1745 gg_deallocateL1CacheBlock;
1746 ka_wakeUpAllDependents;
1749 transition(S, {Transient_GETX, Transient_Local_GETX}, I) {
1750 t_sendAckWithCollectedTokens;
1751 p_informL2AboutTokenLoss;
1752 forward_eviction_to_cpu
1756 // only owner responds to non-local requests
1757 transition(S, Transient_GETS) {
1761 transition(S, Transient_Local_GETS) {
1762 d_sendDataWithToken;
1766 transition(S, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
1770 transition({S, S_L}, Persistent_GETX, I_L) {
1771 e_sendAckWithCollectedTokens;
1772 p_informL2AboutTokenLoss;
1773 forward_eviction_to_cpu
1774 l_popPersistentQueue;
1777 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1778 f_sendAckWithAllButNorOneTokens;
1779 l_popPersistentQueue;
1782 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1783 l_popPersistentQueue;
1786 transition(S, Ack) {
1787 q_updateTokensFromResponse;
1791 transition(S, Data_Shared) {
1792 w_assertIncomingDataAndCacheDataMatch;
1793 q_updateTokensFromResponse;
1797 transition(S, Data_Owner, O) {
1798 w_assertIncomingDataAndCacheDataMatch;
1799 q_updateTokensFromResponse;
1803 transition(S, Data_All_Tokens, M) {
1804 w_assertIncomingDataAndCacheDataMatch;
1805 q_updateTokensFromResponse;
1809 // Transitions from Owned
1810 transition({O, OM}, Ifetch) {
1813 k_popMandatoryQueue;
1816 transition({O, OM}, Load) {
1819 k_popMandatoryQueue;
1822 transition(O, {Store, Atomic}, OM) {
1824 b_issueWriteRequest;
1826 k_popMandatoryQueue;
1829 transition(O, L1_Replacement, I) {
1830 ta_traceStalledAddress;
1832 forward_eviction_to_cpu
1833 gg_deallocateL1CacheBlock;
1834 ka_wakeUpAllDependents;
1837 transition(O, {Transient_GETX, Transient_Local_GETX}, I) {
1838 dd_sendDataWithAllTokens;
1839 p_informL2AboutTokenLoss;
1840 forward_eviction_to_cpu
1844 transition(O, Persistent_GETX, I_L) {
1845 ee_sendDataWithAllTokens;
1846 p_informL2AboutTokenLoss;
1847 forward_eviction_to_cpu
1848 l_popPersistentQueue;
1851 transition(O, Persistent_GETS, S_L) {
1852 ff_sendDataWithAllButNorOneTokens;
1853 l_popPersistentQueue;
1856 transition(O, Persistent_GETS_Last_Token, I_L) {
1857 fo_sendDataWithOwnerToken;
1858 forward_eviction_to_cpu
1859 l_popPersistentQueue;
1862 transition(O, Transient_GETS) {
1863 d_sendDataWithToken;
1867 transition(O, Transient_Local_GETS) {
1868 d_sendDataWithToken;
1872 // ran out of tokens, wait for it to go persistent
1873 transition(O, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
1877 transition(O, Ack) {
1878 q_updateTokensFromResponse;
1882 transition(O, Ack_All_Tokens, M) {
1883 q_updateTokensFromResponse;
1887 transition(O, Data_Shared) {
1888 w_assertIncomingDataAndCacheDataMatch;
1889 q_updateTokensFromResponse;
1893 transition(O, Data_All_Tokens, M) {
1894 w_assertIncomingDataAndCacheDataMatch;
1895 q_updateTokensFromResponse;
1899 // Transitions from Modified
1900 transition({MM, MM_W}, Ifetch) {
1903 k_popMandatoryQueue;
1906 transition({MM, MM_W}, Load) {
1909 k_popMandatoryQueue;
1912 transition({MM_W}, {Store, Atomic}) {
1915 k_popMandatoryQueue;
1918 transition(MM, Store) {
1921 k_popMandatoryQueue;
1924 transition(MM, Atomic, M) {
1927 k_popMandatoryQueue;
1930 transition(MM, L1_Replacement, I) {
1931 ta_traceStalledAddress;
1933 forward_eviction_to_cpu
1934 gg_deallocateL1CacheBlock;
1935 ka_wakeUpAllDependents;
1938 transition(MM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}, I) {
1939 dd_sendDataWithAllTokens;
1940 p_informL2AboutTokenLoss;
1941 forward_eviction_to_cpu
1945 transition({MM_W}, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
1949 // Implement the migratory sharing optimization, even for persistent requests
1950 transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
1951 ee_sendDataWithAllTokens;
1952 p_informL2AboutTokenLoss;
1953 forward_eviction_to_cpu
1954 l_popPersistentQueue;
1957 // ignore persistent requests in lockout period
1958 transition(MM_W, {Persistent_GETX, Persistent_GETS}) {
1959 l_popPersistentQueue;
1962 transition(MM_W, Use_TimeoutNoStarvers, MM) {
1965 kd_wakeUpDependents;
1968 transition(MM_W, Use_TimeoutNoStarvers_NoMig, M) {
1971 kd_wakeUpDependents;
1974 // Transitions from Dirty Exclusive
1975 transition({M, M_W}, Ifetch) {
1978 k_popMandatoryQueue;
1981 transition({M, M_W}, Load) {
1984 k_popMandatoryQueue;
1987 transition(M, Store, MM) {
1990 k_popMandatoryQueue;
1993 transition(M, Atomic) {
1996 k_popMandatoryQueue;
1999 transition(M_W, Store, MM_W) {
2002 k_popMandatoryQueue;
2005 transition(M_W, Atomic) {
2008 k_popMandatoryQueue;
2011 transition(M, L1_Replacement, I) {
2012 ta_traceStalledAddress;
2014 forward_eviction_to_cpu
2015 gg_deallocateL1CacheBlock;
2016 ka_wakeUpAllDependents;
2019 transition(M, {Transient_GETX, Transient_Local_GETX}, I) {
2020 dd_sendDataWithAllTokens;
2021 p_informL2AboutTokenLoss;
2022 forward_eviction_to_cpu
2026 transition(M, Transient_Local_GETS, O) {
2027 d_sendDataWithToken;
2031 transition(M, Transient_GETS, O) {
2032 d_sendDataWithNTokenIfAvail;
2036 transition(M_W, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
2040 transition(M, Persistent_GETX, I_L) {
2041 ee_sendDataWithAllTokens;
2042 p_informL2AboutTokenLoss;
2043 forward_eviction_to_cpu
2044 l_popPersistentQueue;
2047 transition(M, Persistent_GETS, S_L) {
2048 ff_sendDataWithAllButNorOneTokens;
2049 l_popPersistentQueue;
2052 // ignore persistent requests in lockout period
2053 transition(M_W, {Persistent_GETX, Persistent_GETS}) {
2054 l_popPersistentQueue;
2057 transition(M_W, Use_TimeoutStarverS, S_L) {
2059 ff_sendDataWithAllButNorOneTokens;
2063 // someone unlocked during timeout
2064 transition(M_W, {Use_TimeoutNoStarvers, Use_TimeoutNoStarvers_NoMig}, M) {
2067 kd_wakeUpDependents;
2070 transition(M_W, Use_TimeoutStarverX, I_L) {
2072 ee_sendDataWithAllTokens;
2073 forward_eviction_to_cpu;
2074 p_informL2AboutTokenLoss;
2079 transition(MM_W, {Use_TimeoutStarverX, Use_TimeoutStarverS}, I_L) {
2081 ee_sendDataWithAllTokens;
2082 forward_eviction_to_cpu;
2083 p_informL2AboutTokenLoss;
2088 // Transient_GETX and Transient_GETS in transient states
2089 transition(OM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
2090 m_popRequestQueue; // Even if we have the data, we can pretend we don't have it yet.
2093 transition(IS, {Transient_GETX, Transient_Local_GETX}) {
2094 t_sendAckWithCollectedTokens;
2098 transition(IS, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
2102 transition(IS, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IS_L) {
2103 e_sendAckWithCollectedTokens;
2104 l_popPersistentQueue;
2107 transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
2108 l_popPersistentQueue;
2111 transition(IM, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IM_L) {
2112 e_sendAckWithCollectedTokens;
2113 l_popPersistentQueue;
2116 transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
2117 l_popPersistentQueue;
2120 transition({SM, SM_L}, Persistent_GETX, IM_L) {
2121 e_sendAckWithCollectedTokens;
2122 forward_eviction_to_cpu
2123 l_popPersistentQueue;
2126 transition(SM, {Persistent_GETS, Persistent_GETS_Last_Token}, SM_L) {
2127 f_sendAckWithAllButNorOneTokens;
2128 l_popPersistentQueue;
2131 transition(SM_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
2132 l_popPersistentQueue;
2135 transition(OM, Persistent_GETX, IM_L) {
2136 ee_sendDataWithAllTokens;
2137 forward_eviction_to_cpu
2138 l_popPersistentQueue;
2141 transition(OM, Persistent_GETS, SM_L) {
2142 ff_sendDataWithAllButNorOneTokens;
2143 l_popPersistentQueue;
2146 transition(OM, Persistent_GETS_Last_Token, IM_L) {
2147 fo_sendDataWithOwnerToken;
2148 l_popPersistentQueue;
2151 // Transitions from IM/SM
2153 transition({IM, SM}, Ack) {
2154 q_updateTokensFromResponse;
2158 transition(IM, Data_Shared, SM) {
2160 q_updateTokensFromResponse;
2164 transition(IM, Data_Owner, OM) {
2166 q_updateTokensFromResponse;
2170 transition(IM, Data_All_Tokens, MM_W) {
2172 q_updateTokensFromResponse;
2173 xx_external_store_hit;
2174 o_scheduleUseTimeout;
2175 j_unsetReissueTimer;
2177 kd_wakeUpDependents;
2180 transition(SM, Data_Shared) {
2181 w_assertIncomingDataAndCacheDataMatch;
2182 q_updateTokensFromResponse;
2186 transition(SM, Data_Owner, OM) {
2187 w_assertIncomingDataAndCacheDataMatch;
2188 q_updateTokensFromResponse;
2192 transition(SM, Data_All_Tokens, MM_W) {
2193 w_assertIncomingDataAndCacheDataMatch;
2194 q_updateTokensFromResponse;
2195 xx_external_store_hit;
2196 o_scheduleUseTimeout;
2197 j_unsetReissueTimer;
2199 kd_wakeUpDependents;
2202 transition({IM, SM}, {Transient_GETX, Transient_Local_GETX}, IM) { // We don't have the data yet, but we might have collected some tokens. We give them up here to avoid livelock
2203 t_sendAckWithCollectedTokens;
2204 forward_eviction_to_cpu;
2208 transition({IM, SM}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
2212 transition({IM, SM}, Request_Timeout) {
2213 j_unsetReissueTimer;
2214 b_issueWriteRequest;
2217 // Transitions from OM
2219 transition(OM, Ack) {
2220 q_updateTokensFromResponse;
2224 transition(OM, Ack_All_Tokens, MM_W) {
2225 q_updateTokensFromResponse;
2226 xx_external_store_hit;
2227 o_scheduleUseTimeout;
2228 j_unsetReissueTimer;
2230 kd_wakeUpDependents;
2233 transition(OM, Data_Shared) {
2234 w_assertIncomingDataAndCacheDataMatch;
2235 q_updateTokensFromResponse;
2239 transition(OM, Data_All_Tokens, MM_W) {
2240 w_assertIncomingDataAndCacheDataMatch;
2241 q_updateTokensFromResponse;
2242 xx_external_store_hit;
2243 o_scheduleUseTimeout;
2244 j_unsetReissueTimer;
2246 kd_wakeUpDependents;
2249 transition(OM, Request_Timeout) {
2250 j_unsetReissueTimer;
2251 b_issueWriteRequest;
2254 // Transitions from IS
2256 transition(IS, Ack) {
2257 q_updateTokensFromResponse;
2261 transition(IS, Data_Shared, S) {
2263 q_updateTokensFromResponse;
2264 x_external_load_hit;
2266 j_unsetReissueTimer;
2268 kd_wakeUpDependents;
2271 transition(IS, Data_Owner, O) {
2273 q_updateTokensFromResponse;
2274 x_external_load_hit;
2276 j_unsetReissueTimer;
2278 kd_wakeUpDependents;
2281 transition(IS, Data_All_Tokens, M_W) {
2283 q_updateTokensFromResponse;
2284 x_external_load_hit;
2285 o_scheduleUseTimeout;
2286 j_unsetReissueTimer;
2288 kd_wakeUpDependents;
2291 transition(IS, Request_Timeout) {
2292 j_unsetReissueTimer;
2296 // Transitions from I_L
2298 transition(I_L, Load, IS_L) {
2299 ii_allocateL1DCacheBlock;
2303 k_popMandatoryQueue;
2306 transition(I_L, Ifetch, IS_L) {
2307 pp_allocateL1ICacheBlock;
2311 k_popMandatoryQueue;
2314 transition(I_L, {Store, Atomic}, IM_L) {
2315 ii_allocateL1DCacheBlock;
2317 b_issueWriteRequest;
2319 k_popMandatoryQueue;
2323 // Transitions from S_L
2325 transition(S_L, {Store, Atomic}, SM_L) {
2327 b_issueWriteRequest;
2329 k_popMandatoryQueue;
2332 // Other transitions from *_L states
2334 transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS, Transient_GETX, Transient_Local_GETX}) {
2338 transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
2339 g_bounceResponseToStarver;
2343 transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
2344 g_bounceResponseToStarver;
2348 transition({I_L, S_L}, Data_All_Tokens) {
2349 g_bounceResponseToStarver;
2353 transition(IS_L, Request_Timeout) {
2354 j_unsetReissueTimer;
2358 transition({IM_L, SM_L}, Request_Timeout) {
2359 j_unsetReissueTimer;
2360 b_issueWriteRequest;
2363 // Opportunisticly Complete the memory operation in the following
2364 // cases. Note: these transitions could just use
2365 // g_bounceResponseToStarver, but if we have the data and tokens, we
2366 // might as well complete the memory request while we have the
2367 // chance (and then immediately forward on the data)
2369 transition(IM_L, Data_All_Tokens, MM_W) {
2371 q_updateTokensFromResponse;
2372 xx_external_store_hit;
2373 j_unsetReissueTimer;
2374 o_scheduleUseTimeout;
2376 kd_wakeUpDependents;
2379 transition(SM_L, Data_All_Tokens, S_L) {
2381 q_updateTokensFromResponse;
2382 xx_external_store_hit;
2383 ff_sendDataWithAllButNorOneTokens;
2385 j_unsetReissueTimer;
2389 transition(IS_L, Data_Shared, I_L) {
2391 q_updateTokensFromResponse;
2392 x_external_load_hit;
2394 e_sendAckWithCollectedTokens;
2395 p_informL2AboutTokenLoss;
2396 j_unsetReissueTimer;
2400 transition(IS_L, Data_Owner, I_L) {
2402 q_updateTokensFromResponse;
2403 x_external_load_hit;
2404 ee_sendDataWithAllTokens;
2406 p_informL2AboutTokenLoss;
2407 j_unsetReissueTimer;
2411 transition(IS_L, Data_All_Tokens, M_W) {
2413 q_updateTokensFromResponse;
2414 x_external_load_hit;
2415 j_unsetReissueTimer;
2416 o_scheduleUseTimeout;
2418 kd_wakeUpDependents;
2421 // Own_Lock_or_Unlock
2423 transition(I_L, Own_Lock_or_Unlock, I) {
2424 l_popPersistentQueue;
2425 kd_wakeUpDependents;
2428 transition(S_L, Own_Lock_or_Unlock, S) {
2429 l_popPersistentQueue;
2430 kd_wakeUpDependents;
2433 transition(IM_L, Own_Lock_or_Unlock, IM) {
2434 l_popPersistentQueue;
2435 kd_wakeUpDependents;
2438 transition(IS_L, Own_Lock_or_Unlock, IS) {
2439 l_popPersistentQueue;
2440 kd_wakeUpDependents;
2443 transition(SM_L, Own_Lock_or_Unlock, SM) {
2444 l_popPersistentQueue;
2445 kd_wakeUpDependents;