3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * $Id: MOESI_CMP_token-L1cache.sm 1.22 05/01/19 15:55:39-06:00 beckmann@s0-28.cs.wisc.edu $
35 machine(L1Cache, "Token protocol")
36 : Sequencer * sequencer,
37 CacheMemory * L1IcacheMemory,
38 CacheMemory * L1DcacheMemory,
39 int l2_select_num_bits,
41 int l1_request_latency = 2,
42 int l1_response_latency = 2,
43 int retry_threshold = 1,
44 int fixed_timeout_latency = 100,
45 bool dynamic_timeout_enabled = true,
46 bool no_mig_atomic = true
49 // From this node's L1 cache TO the network
51 // a local L1 -> this L2 bank
52 MessageBuffer responseFromL1Cache, network="To", virtual_network="4", ordered="false";
53 MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true";
54 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
55 MessageBuffer requestFromL1Cache, network="To", virtual_network="1", ordered="false";
58 // To this node's L1 cache FROM the network
59 // a L2 bank -> this L1
60 MessageBuffer responseToL1Cache, network="From", virtual_network="4", ordered="false";
61 MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true";
62 // a L2 bank -> this L1
63 MessageBuffer requestToL1Cache, network="From", virtual_network="1", ordered="false";
66 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
68 NP, "NP", desc="Not Present";
70 S, "S", desc="Shared";
72 M, "M", desc="Modified (dirty)";
73 MM, "MM", desc="Modified (dirty and locally modified)";
74 M_W, "M^W", desc="Modified (dirty), waiting";
75 MM_W, "MM^W", desc="Modified (dirty and locally modified), waiting";
78 IM, "IM", desc="Issued GetX";
79 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
80 OM, "OM", desc="Issued GetX, received data";
81 IS, "IS", desc="Issued GetS";
84 I_L, "I^L", desc="Invalid, Locked";
85 S_L, "S^L", desc="Shared, Locked";
86 IM_L, "IM^L", desc="Invalid, Locked, trying to go to Modified";
87 SM_L, "SM^L", desc="Shared, Locked, trying to go to Modified";
88 IS_L, "IS^L", desc="Invalid, Locked, trying to go to Shared";
92 enumeration(Event, desc="Cache events") {
93 Load, desc="Load request from the processor";
94 Ifetch, desc="I-fetch request from the processor";
95 Store, desc="Store request from the processor";
96 Atomic, desc="Atomic request from the processor";
97 L1_Replacement, desc="L1 Replacement";
100 Data_Shared, desc="Received a data message, we are now a sharer";
101 Data_Owner, desc="Received a data message, we are now the owner";
102 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
103 Ack, desc="Received an ack message";
104 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
107 Transient_GETX, desc="A GetX from another processor";
108 Transient_Local_GETX, desc="A GetX from another processor";
109 Transient_GETS, desc="A GetS from another processor";
110 Transient_Local_GETS, desc="A GetS from another processor";
111 Transient_GETS_Last_Token, desc="A GetS from another processor";
112 Transient_Local_GETS_Last_Token, desc="A GetS from another processor";
114 // Lock/Unlock for distributed
115 Persistent_GETX, desc="Another processor has priority to read/write";
116 Persistent_GETS, desc="Another processor has priority to read";
117 Persistent_GETS_Last_Token, desc="Another processor has priority to read, no more tokens";
118 Own_Lock_or_Unlock, desc="This processor now has priority";
121 Request_Timeout, desc="Timeout";
122 Use_TimeoutStarverX, desc="Timeout";
123 Use_TimeoutStarverS, desc="Timeout";
124 Use_TimeoutNoStarvers, desc="Timeout";
125 Use_TimeoutNoStarvers_NoMig, desc="Timeout Don't Migrate";
131 structure(Entry, desc="...", interface="AbstractCacheEntry") {
132 State CacheState, desc="cache state";
133 bool Dirty, desc="Is the data dirty (different than memory)?";
134 int Tokens, desc="The number of tokens we're holding for the line";
135 DataBlock DataBlk, desc="data for the block";
140 structure(TBE, desc="...") {
141 Address Address, desc="Physical address for this TBE";
142 State TBEState, desc="Transient state";
143 int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
144 Address PC, desc="Program counter of request";
146 bool WentPersistent, default="false", desc="Request went persistent";
147 bool ExternalResponse, default="false", desc="Response came from an external controller";
148 bool IsAtomic, default="false", desc="Request was an atomic request";
150 AccessType AccessType, desc="Type of request (used for profiling)";
151 Time IssueTime, desc="Time the request was issued";
152 AccessModeType AccessMode, desc="user/supervisor access type";
153 PrefetchBit Prefetch, desc="Is this a prefetch request";
156 external_type(TBETable) {
158 void allocate(Address);
159 void deallocate(Address);
160 bool isPresent(Address);
163 external_type(PersistentTable) {
164 void persistentRequestLock(Address, MachineID, AccessType);
165 void persistentRequestUnlock(Address, MachineID);
166 bool okToIssueStarving(Address, MachineID);
167 MachineID findSmallest(Address);
168 AccessType typeOfSmallest(Address);
169 void markEntries(Address);
170 bool isLocked(Address);
171 int countStarvingForAddress(Address);
172 int countReadStarvingForAddress(Address);
175 void set_cache_entry(AbstractCacheEntry b);
176 void unset_cache_entry();
180 TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
182 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
184 bool starving, default="false";
185 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
187 PersistentTable persistentTable;
188 TimerTable useTimerTable;
189 TimerTable reissueTimerTable;
191 int outstandingRequests, default="0";
192 int outstandingPersistentRequests, default="0";
194 int averageLatencyHysteresis, default="(8)"; // Constant that provides hysteresis for calculated the estimated average
195 int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_ptr))";
197 int averageLatencyEstimate() {
198 DPRINTF(RubySlicc, "%d\n",
199 (averageLatencyCounter >> averageLatencyHysteresis));
200 //profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) );
201 return averageLatencyCounter >> averageLatencyHysteresis;
204 void updateAverageLatencyEstimate(int latency) {
205 DPRINTF(RubySlicc, "%d\n", latency);
206 assert(latency >= 0);
208 // By subtracting the current average and then adding the most
209 // recent sample, we calculate an estimate of the recent average.
210 // If we simply used a running sum and divided by the total number
211 // of entries, the estimate of the average would adapt very slowly
212 // after the execution has run for a long time.
213 // averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
215 averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
218 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
219 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
220 if(is_valid(L1Dcache_entry)) {
221 return L1Dcache_entry;
224 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
225 return L1Icache_entry;
228 Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
229 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
230 return L1Dcache_entry;
233 Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
234 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
235 return L1Icache_entry;
238 int getTokens(Entry cache_entry) {
239 if (is_valid(cache_entry)) {
240 return cache_entry.Tokens;
245 State getState(TBE tbe, Entry cache_entry, Address addr) {
249 } else if (is_valid(cache_entry)) {
250 return cache_entry.CacheState;
252 if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
253 // Not in cache, in persistent table, but this processor isn't highest priority
261 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
262 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
265 assert(state != State:I);
266 assert(state != State:S);
267 assert(state != State:O);
268 assert(state != State:MM);
269 assert(state != State:M);
270 tbe.TBEState := state;
273 if (is_valid(cache_entry)) {
274 // Make sure the token count is in range
275 assert(cache_entry.Tokens >= 0);
276 assert(cache_entry.Tokens <= max_tokens());
277 assert(cache_entry.Tokens != (max_tokens() / 2));
279 if ((state == State:I_L) ||
280 (state == State:IM_L) ||
281 (state == State:IS_L)) {
282 // Make sure we have no tokens in the "Invalid, locked" states
283 assert(cache_entry.Tokens == 0);
285 // Make sure the line is locked
286 // assert(persistentTable.isLocked(addr));
288 // But we shouldn't have highest priority for it
289 // assert(persistentTable.findSmallest(addr) != id);
291 } else if ((state == State:S_L) ||
292 (state == State:SM_L)) {
293 assert(cache_entry.Tokens >= 1);
294 assert(cache_entry.Tokens < (max_tokens() / 2));
296 // Make sure the line is locked...
297 // assert(persistentTable.isLocked(addr));
299 // ...But we shouldn't have highest priority for it...
300 // assert(persistentTable.findSmallest(addr) != id);
302 // ...And it must be a GETS request
303 // assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);
307 // If there is an entry in the persistent table of this block,
308 // this processor needs to have an entry in the table for this
309 // block, and that entry better be the smallest (highest
310 // priority). Otherwise, the state should have been one of
313 //if (persistentTable.isLocked(addr)) {
314 // assert(persistentTable.findSmallest(addr) == id);
318 // in M and E you have all the tokens
319 if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
320 assert(cache_entry.Tokens == max_tokens());
323 // in NP you have no tokens
324 if (state == State:NP) {
325 assert(cache_entry.Tokens == 0);
328 // You have at least one token in S-like states
329 if (state == State:S || state == State:SM) {
330 assert(cache_entry.Tokens > 0);
333 // You have at least half the token in O-like states
334 if (state == State:O && state == State:OM) {
335 assert(cache_entry.Tokens > (max_tokens() / 2));
338 cache_entry.CacheState := state;
341 if (state == State:MM ||
342 state == State:MM_W) {
343 cache_entry.changePermission(AccessPermission:Read_Write);
344 } else if ((state == State:S) ||
345 (state == State:O) ||
346 (state == State:M) ||
347 (state == State:M_W) ||
348 (state == State:SM) ||
349 (state == State:S_L) ||
350 (state == State:SM_L) ||
351 (state == State:OM)) {
352 cache_entry.changePermission(AccessPermission:Read_Only);
354 cache_entry.changePermission(AccessPermission:Invalid);
359 Event mandatory_request_type_to_event(CacheRequestType type) {
360 if (type == CacheRequestType:LD) {
362 } else if (type == CacheRequestType:IFETCH) {
364 } else if (type == CacheRequestType:ST) {
366 } else if (type == CacheRequestType:ATOMIC) {
373 error("Invalid CacheRequestType");
377 AccessType cache_request_type_to_access_type(CacheRequestType type) {
378 if ((type == CacheRequestType:LD) || (type == CacheRequestType:IFETCH)) {
379 return AccessType:Read;
380 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
381 return AccessType:Write;
383 error("Invalid CacheRequestType");
387 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
388 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
390 // NOTE direct local hits should not call this
392 return GenericMachineType:L1Cache_wCC;
393 } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
395 if (sender == (mapAddressToRange(addr,
398 l2_select_num_bits))) {
400 return GenericMachineType:L2Cache;
402 return GenericMachineType:L2Cache_wCC;
405 return ConvertMachToGenericMach(machineIDToMachineType(sender));
409 bool okToIssueStarving(Address addr, MachineID machinID) {
410 return persistentTable.okToIssueStarving(addr, machineID);
413 void markPersistentEntries(Address addr) {
414 persistentTable.markEntries(addr);
417 void setExternalResponse(TBE tbe) {
418 assert(is_valid(tbe));
419 tbe.ExternalResponse := true;
422 bool IsAtomic(TBE tbe) {
423 assert(is_valid(tbe));
428 out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
429 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
430 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
431 out_port(requestRecycle_out, RequestMsg, requestToL1Cache);
436 in_port(useTimerTable_in, Address, useTimerTable, rank=5) {
437 if (useTimerTable_in.isReady()) {
438 TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
440 if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
441 (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
442 if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
443 trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress(),
444 getCacheEntry(useTimerTable.readyAddress()), tbe);
446 trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress(),
447 getCacheEntry(useTimerTable.readyAddress()), tbe);
450 if (no_mig_atomic && IsAtomic(tbe)) {
451 trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress(),
452 getCacheEntry(useTimerTable.readyAddress()), tbe);
454 trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress(),
455 getCacheEntry(useTimerTable.readyAddress()), tbe);
462 in_port(reissueTimerTable_in, Address, reissueTimerTable, rank=4) {
463 if (reissueTimerTable_in.isReady()) {
464 trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
465 getCacheEntry(reissueTimerTable.readyAddress()),
466 L1_TBEs[reissueTimerTable.readyAddress()]);
470 // Persistent Network
471 in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
472 if (persistentNetwork_in.isReady()) {
473 peek(persistentNetwork_in, PersistentMsg, block_on="Address") {
474 assert(in_msg.Destination.isElement(machineID));
476 // Apply the lockdown or unlockdown message to the table
477 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
478 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
479 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
480 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
481 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
482 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
484 error("Unexpected message");
487 // React to the message based on the current state of the table
488 Entry cache_entry := getCacheEntry(in_msg.Address);
489 TBE tbe := L1_TBEs[in_msg.Address];
491 if (persistentTable.isLocked(in_msg.Address)) {
492 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
493 // Our Own Lock - this processor is highest priority
494 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
497 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
498 if (getTokens(cache_entry) == 1 ||
499 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
500 trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
503 trigger(Event:Persistent_GETS, in_msg.Address,
507 trigger(Event:Persistent_GETX, in_msg.Address,
512 // Unlock case - no entries in the table
513 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
521 in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
522 if (responseNetwork_in.isReady()) {
523 peek(responseNetwork_in, ResponseMsg, block_on="Address") {
524 assert(in_msg.Destination.isElement(machineID));
526 Entry cache_entry := getCacheEntry(in_msg.Address);
527 TBE tbe := L1_TBEs[in_msg.Address];
529 // Mark TBE flag if response received off-chip. Use this to update average latency estimate
530 if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
532 if (in_msg.Sender == mapAddressToRange(in_msg.Address,
535 l2_select_num_bits)) {
537 // came from an off-chip L2 cache
539 // L1_TBEs[in_msg.Address].ExternalResponse := true;
540 // profile_offchipL2_response(in_msg.Address);
544 // profile_onchipL2_response(in_msg.Address );
546 } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:Directory ) {
548 setExternalResponse(tbe);
549 // profile_memory_response( in_msg.Address);
551 } else if ( machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
552 //if (isLocalProcessor(machineID, in_msg.Sender) == false) {
553 //if (is_valid(tbe)) {
554 // tbe.ExternalResponse := true;
555 // profile_offchipL1_response(in_msg.Address );
559 // profile_onchipL1_response(in_msg.Address );
562 error("unexpected SenderMachine");
566 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
567 if (in_msg.Type == CoherenceResponseType:ACK) {
568 assert(in_msg.Tokens < (max_tokens() / 2));
569 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
570 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
571 trigger(Event:Data_Owner, in_msg.Address, cache_entry, tbe);
572 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
573 assert(in_msg.Tokens < (max_tokens() / 2));
574 trigger(Event:Data_Shared, in_msg.Address, cache_entry, tbe);
576 error("Unexpected message");
579 if (in_msg.Type == CoherenceResponseType:ACK) {
580 assert(in_msg.Tokens < (max_tokens() / 2));
581 trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry, tbe);
582 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
583 trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry, tbe);
585 error("Unexpected message");
593 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
594 if (requestNetwork_in.isReady()) {
595 peek(requestNetwork_in, RequestMsg, block_on="Address") {
596 assert(in_msg.Destination.isElement(machineID));
598 Entry cache_entry := getCacheEntry(in_msg.Address);
599 TBE tbe := L1_TBEs[in_msg.Address];
601 if (in_msg.Type == CoherenceRequestType:GETX) {
602 if (in_msg.isLocal) {
603 trigger(Event:Transient_Local_GETX, in_msg.Address,
607 trigger(Event:Transient_GETX, in_msg.Address,
610 } else if (in_msg.Type == CoherenceRequestType:GETS) {
611 if (getTokens(cache_entry) == 1 ||
612 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
613 if (in_msg.isLocal) {
614 trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address,
618 trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
623 if (in_msg.isLocal) {
624 trigger(Event:Transient_Local_GETS, in_msg.Address,
628 trigger(Event:Transient_GETS, in_msg.Address,
633 error("Unexpected message");
640 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
641 if (mandatoryQueue_in.isReady()) {
642 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
643 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
645 TBE tbe := L1_TBEs[in_msg.LineAddress];
647 if (in_msg.Type == CacheRequestType:IFETCH) {
648 // ** INSTRUCTION ACCESS ***
650 // Check to see if it is in the OTHER L1
651 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
652 if (is_valid(L1Dcache_entry)) {
653 // The block is in the wrong L1, try to write it to the L2
654 trigger(Event:L1_Replacement, in_msg.LineAddress,
655 L1Dcache_entry, tbe);
658 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
659 if (is_valid(L1Icache_entry)) {
660 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
661 trigger(mandatory_request_type_to_event(in_msg.Type),
662 in_msg.LineAddress, L1Icache_entry, tbe);
664 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
665 // L1 does't have the line, but we have space for it in the L1
666 trigger(mandatory_request_type_to_event(in_msg.Type),
667 in_msg.LineAddress, L1Icache_entry, tbe);
669 // No room in the L1, so we need to make room
670 trigger(Event:L1_Replacement,
671 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
672 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
673 L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
677 // *** DATA ACCESS ***
679 // Check to see if it is in the OTHER L1
680 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
682 if (is_valid(L1Icache_entry)) {
683 // The block is in the wrong L1, try to write it to the L2
684 trigger(Event:L1_Replacement, in_msg.LineAddress,
685 L1Icache_entry, tbe);
688 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
689 if (is_valid(L1Dcache_entry)) {
690 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
691 trigger(mandatory_request_type_to_event(in_msg.Type),
692 in_msg.LineAddress, L1Dcache_entry, tbe);
694 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
695 // L1 does't have the line, but we have space for it in the L1
696 trigger(mandatory_request_type_to_event(in_msg.Type),
697 in_msg.LineAddress, L1Dcache_entry, tbe);
699 // No room in the L1, so we need to make room
700 trigger(Event:L1_Replacement,
701 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
702 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
703 L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
713 action(a_issueReadRequest, "a", desc="Issue GETS") {
714 assert(is_valid(tbe));
715 if (tbe.IssueCount == 0) {
716 // Update outstanding requests
717 //profile_outstanding_request(outstandingRequests);
718 outstandingRequests := outstandingRequests + 1;
721 if (tbe.IssueCount >= retry_threshold) {
722 // Issue a persistent request if possible
723 if (okToIssueStarving(address, machineID) && (starving == false)) {
724 enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
725 out_msg.Address := address;
726 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
727 out_msg.Requestor := machineID;
728 out_msg.Destination.broadcast(MachineType:L1Cache);
731 // Currently the configuration system limits the system to only one
732 // chip. Therefore, if we assume one shared L2 cache, then only one
733 // pertinent L2 cache exist.
735 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
737 out_msg.Destination.add(mapAddressToRange(address,
740 l2_select_num_bits));
742 out_msg.Destination.add(map_Address_to_Directory(address));
743 out_msg.MessageSize := MessageSizeType:Persistent_Control;
744 out_msg.Prefetch := tbe.Prefetch;
745 out_msg.AccessMode := tbe.AccessMode;
747 markPersistentEntries(address);
750 if (tbe.IssueCount == 0) {
751 //profile_persistent_prediction(address, tbe.AccessType);
754 // Update outstanding requests
755 //profile_outstanding_persistent_request(outstandingPersistentRequests);
756 outstandingPersistentRequests := outstandingPersistentRequests + 1;
758 // Increment IssueCount
759 tbe.IssueCount := tbe.IssueCount + 1;
761 tbe.WentPersistent := true;
763 // Do not schedule a wakeup, a persistent requests will always complete
767 // We'd like to issue a persistent request, but are not allowed
768 // to issue a P.R. right now. This, we do not increment the
771 // Set a wakeup timer
772 reissueTimerTable.set(address, 10);
776 // Make a normal request
777 enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
778 out_msg.Address := address;
779 out_msg.Type := CoherenceRequestType:GETS;
780 out_msg.Requestor := machineID;
781 out_msg.Destination.add(mapAddressToRange(address,
784 l2_select_num_bits));
786 out_msg.RetryNum := tbe.IssueCount;
787 if (tbe.IssueCount == 0) {
788 out_msg.MessageSize := MessageSizeType:Request_Control;
790 out_msg.MessageSize := MessageSizeType:Reissue_Control;
792 out_msg.Prefetch := tbe.Prefetch;
793 out_msg.AccessMode := tbe.AccessMode;
796 // send to other local L1s, with local bit set
797 enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
798 out_msg.Address := address;
799 out_msg.Type := CoherenceRequestType:GETS;
800 out_msg.Requestor := machineID;
802 // Since only one chip, assuming all L1 caches are local
804 //out_msg.Destination := getOtherLocalL1IDs(machineID);
805 out_msg.Destination.broadcast(MachineType:L1Cache);
806 out_msg.Destination.remove(machineID);
808 out_msg.RetryNum := tbe.IssueCount;
809 out_msg.isLocal := true;
810 if (tbe.IssueCount == 0) {
811 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
813 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
815 out_msg.Prefetch := tbe.Prefetch;
816 out_msg.AccessMode := tbe.AccessMode;
819 // Increment IssueCount
820 tbe.IssueCount := tbe.IssueCount + 1;
822 // Set a wakeup timer
824 if (dynamic_timeout_enabled) {
825 reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
827 reissueTimerTable.set(address, fixed_timeout_latency);
833 action(b_issueWriteRequest, "b", desc="Issue GETX") {
835 assert(is_valid(tbe));
836 if (tbe.IssueCount == 0) {
837 // Update outstanding requests
838 //profile_outstanding_request(outstandingRequests);
839 outstandingRequests := outstandingRequests + 1;
842 if (tbe.IssueCount >= retry_threshold) {
843 // Issue a persistent request if possible
844 if ( okToIssueStarving(address, machineID) && (starving == false)) {
845 enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
846 out_msg.Address := address;
847 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
848 out_msg.Requestor := machineID;
849 out_msg.Destination.broadcast(MachineType:L1Cache);
852 // Currently the configuration system limits the system to only one
853 // chip. Therefore, if we assume one shared L2 cache, then only one
854 // pertinent L2 cache exist.
856 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
858 out_msg.Destination.add(mapAddressToRange(address,
861 l2_select_num_bits));
863 out_msg.Destination.add(map_Address_to_Directory(address));
864 out_msg.MessageSize := MessageSizeType:Persistent_Control;
865 out_msg.Prefetch := tbe.Prefetch;
866 out_msg.AccessMode := tbe.AccessMode;
868 markPersistentEntries(address);
871 // Update outstanding requests
872 //profile_outstanding_persistent_request(outstandingPersistentRequests);
873 outstandingPersistentRequests := outstandingPersistentRequests + 1;
875 if (tbe.IssueCount == 0) {
876 //profile_persistent_prediction(address, tbe.AccessType);
879 // Increment IssueCount
880 tbe.IssueCount := tbe.IssueCount + 1;
882 tbe.WentPersistent := true;
884 // Do not schedule a wakeup, a persistent requests will always complete
888 // We'd like to issue a persistent request, but are not allowed
889 // to issue a P.R. right now. This, we do not increment the
892 // Set a wakeup timer
893 reissueTimerTable.set(address, 10);
898 // Make a normal request
899 enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
900 out_msg.Address := address;
901 out_msg.Type := CoherenceRequestType:GETX;
902 out_msg.Requestor := machineID;
904 out_msg.Destination.add(mapAddressToRange(address,
907 l2_select_num_bits));
909 out_msg.RetryNum := tbe.IssueCount;
911 if (tbe.IssueCount == 0) {
912 out_msg.MessageSize := MessageSizeType:Request_Control;
914 out_msg.MessageSize := MessageSizeType:Reissue_Control;
916 out_msg.Prefetch := tbe.Prefetch;
917 out_msg.AccessMode := tbe.AccessMode;
920 // send to other local L1s too
921 enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
922 out_msg.Address := address;
923 out_msg.Type := CoherenceRequestType:GETX;
924 out_msg.Requestor := machineID;
925 out_msg.isLocal := true;
928 // Since only one chip, assuming all L1 caches are local
930 //out_msg.Destination := getOtherLocalL1IDs(machineID);
931 out_msg.Destination.broadcast(MachineType:L1Cache);
932 out_msg.Destination.remove(machineID);
934 out_msg.RetryNum := tbe.IssueCount;
935 if (tbe.IssueCount == 0) {
936 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
938 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
940 out_msg.Prefetch := tbe.Prefetch;
941 out_msg.AccessMode := tbe.AccessMode;
944 // Increment IssueCount
945 tbe.IssueCount := tbe.IssueCount + 1;
947 DPRINTF(RubySlicc, "incremented issue count to %d\n",
950 // Set a wakeup timer
951 if (dynamic_timeout_enabled) {
952 reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
954 reissueTimerTable.set(address, fixed_timeout_latency);
959 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
960 peek(responseNetwork_in, ResponseMsg) {
961 // FIXME, should use a 3rd vnet
962 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
963 out_msg.Address := address;
964 out_msg.Type := in_msg.Type;
965 out_msg.Sender := machineID;
966 out_msg.Destination.add(map_Address_to_Directory(address));
967 out_msg.Tokens := in_msg.Tokens;
968 out_msg.MessageSize := in_msg.MessageSize;
969 out_msg.DataBlk := in_msg.DataBlk;
970 out_msg.Dirty := in_msg.Dirty;
975 action(c_ownedReplacement, "c", desc="Issue writeback") {
976 assert(is_valid(cache_entry));
977 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
978 out_msg.Address := address;
979 out_msg.Sender := machineID;
981 out_msg.Destination.add(mapAddressToRange(address,
984 l2_select_num_bits));
986 out_msg.Tokens := cache_entry.Tokens;
987 out_msg.DataBlk := cache_entry.DataBlk;
988 out_msg.Dirty := cache_entry.Dirty;
989 out_msg.Type := CoherenceResponseType:WB_OWNED;
991 // always send the data?
992 out_msg.MessageSize := MessageSizeType:Writeback_Data;
994 cache_entry.Tokens := 0;
997 action(cc_sharedReplacement, "\c", desc="Issue shared writeback") {
999 // don't send writeback if replacing block with no tokens
1000 assert(is_valid(cache_entry));
1001 assert (cache_entry.Tokens > 0);
1002 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1003 out_msg.Address := address;
1004 out_msg.Sender := machineID;
1006 out_msg.Destination.add(mapAddressToRange(address,
1007 MachineType:L2Cache,
1009 l2_select_num_bits));
1011 out_msg.Tokens := cache_entry.Tokens;
1012 out_msg.DataBlk := cache_entry.DataBlk;
1013 // assert(cache_entry.Dirty == false);
1014 out_msg.Dirty := false;
1016 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1017 out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
1019 cache_entry.Tokens := 0;
1022 action(tr_tokenReplacement, "tr", desc="Issue token writeback") {
1023 assert(is_valid(cache_entry));
1024 if (cache_entry.Tokens > 0) {
1025 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1026 out_msg.Address := address;
1027 out_msg.Sender := machineID;
1029 out_msg.Destination.add(mapAddressToRange(address,
1030 MachineType:L2Cache,
1032 l2_select_num_bits));
1034 out_msg.Tokens := cache_entry.Tokens;
1035 out_msg.DataBlk := cache_entry.DataBlk;
1036 // assert(cache_entry.Dirty == false);
1037 out_msg.Dirty := false;
1039 // always send the data?
1040 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1041 out_msg.Type := CoherenceResponseType:WB_TOKENS;
1044 cache_entry.Tokens := 0;
1048 action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
1049 assert(is_valid(cache_entry));
1050 peek(requestNetwork_in, RequestMsg) {
1051 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1052 out_msg.Address := address;
1053 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1054 out_msg.Sender := machineID;
1055 out_msg.Destination.add(in_msg.Requestor);
1056 out_msg.Tokens := 1;
1057 out_msg.DataBlk := cache_entry.DataBlk;
1058 // out_msg.Dirty := cache_entry.Dirty;
1059 out_msg.Dirty := false;
1060 if (in_msg.isLocal) {
1061 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1063 out_msg.MessageSize := MessageSizeType:Response_Data;
1067 cache_entry.Tokens := cache_entry.Tokens - 1;
1068 assert(cache_entry.Tokens >= 1);
1071 action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
1072 assert(is_valid(cache_entry));
1073 peek(requestNetwork_in, RequestMsg) {
1074 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
1075 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1076 out_msg.Address := address;
1077 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1078 out_msg.Sender := machineID;
1079 out_msg.Destination.add(in_msg.Requestor);
1080 out_msg.Tokens := N_tokens;
1081 out_msg.DataBlk := cache_entry.DataBlk;
1082 // out_msg.Dirty := cache_entry.Dirty;
1083 out_msg.Dirty := false;
1084 if (in_msg.isLocal) {
1085 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1087 out_msg.MessageSize := MessageSizeType:Response_Data;
1090 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
1092 else if (cache_entry.Tokens > 1) {
1093 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1094 out_msg.Address := address;
1095 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1096 out_msg.Sender := machineID;
1097 out_msg.Destination.add(in_msg.Requestor);
1098 out_msg.Tokens := 1;
1099 out_msg.DataBlk := cache_entry.DataBlk;
1100 // out_msg.Dirty := cache_entry.Dirty;
1101 out_msg.Dirty := false;
1102 if (in_msg.isLocal) {
1103 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1105 out_msg.MessageSize := MessageSizeType:Response_Data;
1108 cache_entry.Tokens := cache_entry.Tokens - 1;
1111 // assert(cache_entry.Tokens >= 1);
1114 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
1115 peek(requestNetwork_in, RequestMsg) {
1116 assert(is_valid(cache_entry));
1117 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1118 out_msg.Address := address;
1119 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1120 out_msg.Sender := machineID;
1121 out_msg.Destination.add(in_msg.Requestor);
1122 assert(cache_entry.Tokens > (max_tokens() / 2));
1123 out_msg.Tokens := cache_entry.Tokens;
1124 out_msg.DataBlk := cache_entry.DataBlk;
1125 out_msg.Dirty := cache_entry.Dirty;
1126 if (in_msg.isLocal) {
1127 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
1129 out_msg.MessageSize := MessageSizeType:Response_Data;
1133 cache_entry.Tokens := 0;
1136 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
1137 // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1138 assert(is_valid(cache_entry));
1139 if (cache_entry.Tokens > 0) {
1140 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1141 out_msg.Address := address;
1142 if (cache_entry.Tokens > (max_tokens() / 2)) {
1143 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1145 out_msg.Type := CoherenceResponseType:ACK;
1147 out_msg.Sender := machineID;
1148 out_msg.Destination.add(persistentTable.findSmallest(address));
1149 assert(cache_entry.Tokens >= 1);
1150 out_msg.Tokens := cache_entry.Tokens;
1151 out_msg.DataBlk := cache_entry.DataBlk;
1152 out_msg.MessageSize := MessageSizeType:Response_Control;
1155 cache_entry.Tokens := 0;
1158 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
1159 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1160 assert(is_valid(cache_entry));
1161 assert(cache_entry.Tokens > 0);
1162 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1163 out_msg.Address := address;
1164 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1165 out_msg.Sender := machineID;
1166 out_msg.Destination.add(persistentTable.findSmallest(address));
1167 assert(cache_entry.Tokens > (max_tokens() / 2));
1168 out_msg.Tokens := cache_entry.Tokens;
1169 out_msg.DataBlk := cache_entry.DataBlk;
1170 out_msg.Dirty := cache_entry.Dirty;
1171 out_msg.MessageSize := MessageSizeType:Response_Data;
1173 cache_entry.Tokens := 0;
1176 action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
1177 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1178 assert(is_valid(cache_entry));
1179 assert(cache_entry.Tokens > 0);
1180 if (cache_entry.Tokens > 1) {
1181 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1182 out_msg.Address := address;
1183 if (cache_entry.Tokens > (max_tokens() / 2)) {
1184 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1186 out_msg.Type := CoherenceResponseType:ACK;
1188 out_msg.Sender := machineID;
1189 out_msg.Destination.add(persistentTable.findSmallest(address));
1190 assert(cache_entry.Tokens >= 1);
1191 if (cache_entry.Tokens > N_tokens) {
1192 out_msg.Tokens := cache_entry.Tokens - N_tokens;
1194 out_msg.Tokens := cache_entry.Tokens - 1;
1196 out_msg.DataBlk := cache_entry.DataBlk;
1197 out_msg.MessageSize := MessageSizeType:Response_Control;
1200 if (cache_entry.Tokens > N_tokens) {
1201 cache_entry.Tokens := N_tokens;
1203 cache_entry.Tokens := 1;
1207 action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
1208 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1209 assert(is_valid(cache_entry));
1210 assert(cache_entry.Tokens > ((max_tokens() / 2) + 1));
1211 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1212 out_msg.Address := address;
1213 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1214 out_msg.Sender := machineID;
1215 out_msg.Destination.add(persistentTable.findSmallest(address));
1216 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
1217 out_msg.Tokens := cache_entry.Tokens - N_tokens;
1219 out_msg.Tokens := cache_entry.Tokens - 1;
1221 assert(out_msg.Tokens > (max_tokens() / 2));
1222 out_msg.DataBlk := cache_entry.DataBlk;
1223 out_msg.Dirty := cache_entry.Dirty;
1224 out_msg.MessageSize := MessageSizeType:Response_Data;
1226 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
1227 cache_entry.Tokens := N_tokens;
1229 cache_entry.Tokens := 1;
1233 action(fo_sendDataWithOwnerToken, "fo", desc="Send data and owner tokens") {
1234 assert(is_valid(cache_entry));
1235 assert(cache_entry.Tokens == ((max_tokens() / 2) + 1));
1236 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1237 out_msg.Address := address;
1238 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1239 out_msg.Sender := machineID;
1240 out_msg.Destination.add(persistentTable.findSmallest(address));
1241 out_msg.Tokens := cache_entry.Tokens;
1242 assert(out_msg.Tokens > (max_tokens() / 2));
1243 out_msg.DataBlk := cache_entry.DataBlk;
1244 out_msg.Dirty := cache_entry.Dirty;
1245 out_msg.MessageSize := MessageSizeType:Response_Data;
1247 cache_entry.Tokens := 0;
1250 action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
1251 // assert(persistentTable.isLocked(address));
1253 peek(responseNetwork_in, ResponseMsg) {
1254 // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1255 // FIXME, should use a 3rd vnet in some cases
1256 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
1257 out_msg.Address := address;
1258 out_msg.Type := in_msg.Type;
1259 out_msg.Sender := machineID;
1260 out_msg.Destination.add(persistentTable.findSmallest(address));
1261 out_msg.Tokens := in_msg.Tokens;
1262 out_msg.DataBlk := in_msg.DataBlk;
1263 out_msg.Dirty := in_msg.Dirty;
1264 out_msg.MessageSize := in_msg.MessageSize;
1270 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
1271 assert(is_valid(cache_entry));
1272 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1273 address, cache_entry.DataBlk);
1275 sequencer.readCallback(address,
1276 GenericMachineType:L1Cache,
1277 cache_entry.DataBlk);
1281 action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
1282 assert(is_valid(cache_entry));
1283 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1284 address, cache_entry.DataBlk);
1285 peek(responseNetwork_in, ResponseMsg) {
1287 sequencer.readCallback(address,
1288 getNondirectHitMachType(address, in_msg.Sender),
1289 cache_entry.DataBlk);
1294 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
1295 assert(is_valid(cache_entry));
1296 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1297 address, cache_entry.DataBlk);
1299 sequencer.writeCallback(address,
1300 GenericMachineType:L1Cache,
1301 cache_entry.DataBlk);
1303 cache_entry.Dirty := true;
1304 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
1307 action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
1308 assert(is_valid(cache_entry));
1309 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1310 address, cache_entry.DataBlk);
1311 peek(responseNetwork_in, ResponseMsg) {
1313 sequencer.writeCallback(address,
1314 getNondirectHitMachType(address, in_msg.Sender),
1315 cache_entry.DataBlk);
1318 cache_entry.Dirty := true;
1319 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
1322 action(i_allocateTBE, "i", desc="Allocate TBE") {
1323 check_allocate(L1_TBEs);
1324 L1_TBEs.allocate(address);
1325 set_tbe(L1_TBEs[address]);
1326 tbe.IssueCount := 0;
1327 peek(mandatoryQueue_in, CacheMsg) {
1328 tbe.PC := in_msg.ProgramCounter;
1329 tbe.AccessType := cache_request_type_to_access_type(in_msg.Type);
1330 if (in_msg.Type == CacheRequestType:ATOMIC) {
1331 tbe.IsAtomic := true;
1333 tbe.Prefetch := in_msg.Prefetch;
1334 tbe.AccessMode := in_msg.AccessMode;
1336 tbe.IssueTime := get_time();
1339 action(ta_traceStalledAddress, "ta", desc="Trace Stalled Address") {
1340 peek(mandatoryQueue_in, CacheMsg) {
1341 APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
1345 action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
1346 if (reissueTimerTable.isSet(address)) {
1347 reissueTimerTable.unset(address);
1351 action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
1352 useTimerTable.unset(address);
1355 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
1356 mandatoryQueue_in.dequeue();
1359 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
1360 persistentNetwork_in.dequeue();
1363 action(m_popRequestQueue, "m", desc="Pop request queue.") {
1364 requestNetwork_in.dequeue();
1367 action(n_popResponseQueue, "n", desc="Pop response queue") {
1368 responseNetwork_in.dequeue();
1371 action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
1372 useTimerTable.set(address, 50);
1375 action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
1376 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1377 out_msg.Address := address;
1378 out_msg.Type := CoherenceResponseType:INV;
1379 out_msg.Tokens := 0;
1380 out_msg.Sender := machineID;
1382 out_msg.Destination.add(mapAddressToRange(address,
1383 MachineType:L2Cache,
1385 l2_select_num_bits));
1387 out_msg.MessageSize := MessageSizeType:Response_Control;
1392 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
1393 peek(responseNetwork_in, ResponseMsg) {
1394 assert(is_valid(cache_entry));
1395 assert(in_msg.Tokens != 0);
1396 DPRINTF(RubySlicc, "L1 received tokens for address: %s, tokens: %d\n",
1397 in_msg.Address, in_msg.Tokens);
1398 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
1399 DPRINTF(RubySlicc, "%d\n", cache_entry.Tokens);
1401 if (cache_entry.Dirty == false && in_msg.Dirty) {
1402 cache_entry.Dirty := true;
1407 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1409 assert(is_valid(tbe));
1410 if (tbe.WentPersistent) {
1411 // assert(starving == true);
1412 outstandingRequests := outstandingRequests - 1;
1413 enqueue(persistentNetwork_out, PersistentMsg, latency = l1_request_latency) {
1414 out_msg.Address := address;
1415 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
1416 out_msg.Requestor := machineID;
1417 out_msg.Destination.broadcast(MachineType:L1Cache);
1420 // Currently the configuration system limits the system to only one
1421 // chip. Therefore, if we assume one shared L2 cache, then only one
1422 // pertinent L2 cache exist.
1424 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
1426 out_msg.Destination.add(mapAddressToRange(address,
1427 MachineType:L2Cache,
1429 l2_select_num_bits));
1431 out_msg.Destination.add(map_Address_to_Directory(address));
1432 out_msg.MessageSize := MessageSizeType:Persistent_Control;
1437 // Update average latency
1438 if (tbe.IssueCount <= 1) {
1439 if (tbe.ExternalResponse == true) {
1440 updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(tbe.IssueTime));
1445 //if (tbe.WentPersistent) {
1446 // profile_token_retry(address, tbe.AccessType, 2);
1449 // profile_token_retry(address, tbe.AccessType, 1);
1452 //profile_token_retry(address, tbe.AccessType, tbe.IssueCount);
1453 L1_TBEs.deallocate(address);
1457 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
1458 assert(is_valid(cache_entry));
1459 if (cache_entry.Tokens > 0) {
1460 peek(requestNetwork_in, RequestMsg) {
1461 enqueue(responseNetwork_out, ResponseMsg, latency = l1_response_latency) {
1462 out_msg.Address := address;
1463 if (cache_entry.Tokens > (max_tokens() / 2)) {
1464 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1466 out_msg.Type := CoherenceResponseType:ACK;
1468 out_msg.Sender := machineID;
1469 out_msg.Destination.add(in_msg.Requestor);
1470 assert(cache_entry.Tokens >= 1);
1471 out_msg.Tokens := cache_entry.Tokens;
1472 out_msg.DataBlk := cache_entry.DataBlk;
1473 out_msg.MessageSize := MessageSizeType:Response_Control;
1477 cache_entry.Tokens := 0;
1480 action(u_writeDataToCache, "u", desc="Write data to cache") {
1481 peek(responseNetwork_in, ResponseMsg) {
1482 assert(is_valid(cache_entry));
1483 cache_entry.DataBlk := in_msg.DataBlk;
1484 if (cache_entry.Dirty == false && in_msg.Dirty) {
1485 cache_entry.Dirty := in_msg.Dirty;
1491 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1492 assert(getTokens(cache_entry) == 0);
1493 if (L1DcacheMemory.isTagPresent(address)) {
1494 L1DcacheMemory.deallocate(address);
1496 L1IcacheMemory.deallocate(address);
1498 unset_cache_entry();
1501 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1502 if (is_valid(cache_entry)) {
1504 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
1508 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
1509 if (is_valid(cache_entry)) {
1511 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
1515 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
1516 peek(mandatoryQueue_in, CacheMsg) {
1517 if (L1DcacheMemory.isTagPresent(address)) {
1518 L1DcacheMemory.profileMiss(in_msg);
1520 L1IcacheMemory.profileMiss(in_msg);
1525 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
1526 peek(responseNetwork_in, ResponseMsg) {
1527 assert(is_valid(cache_entry));
1528 assert(cache_entry.DataBlk == in_msg.DataBlk);
1532 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1533 peek(mandatoryQueue_in, CacheMsg) {
1534 APPEND_TRANSITION_COMMENT(in_msg.LineAddress);
1536 stall_and_wait(mandatoryQueue_in, address);
1539 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1540 wake_up_dependents(address);
1543 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1544 wake_up_all_dependents();
1547 //*****************************************************
1549 //*****************************************************
1551 // Transitions for Load/Store/L2_Replacement from transient states
1552 transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L1_Replacement) {
1553 ta_traceStalledAddress;
1554 zz_stallAndWaitMandatoryQueue;
1557 transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, {Store, Atomic}) {
1558 zz_stallAndWaitMandatoryQueue;
1561 transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
1562 zz_stallAndWaitMandatoryQueue;
1567 transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
1568 l_popPersistentQueue;
1571 // Transitions from NP
1572 transition(NP, Load, IS) {
1573 ii_allocateL1DCacheBlock;
1577 k_popMandatoryQueue;
1580 transition(NP, Ifetch, IS) {
1581 pp_allocateL1ICacheBlock;
1585 k_popMandatoryQueue;
1588 transition(NP, {Store, Atomic}, IM) {
1589 ii_allocateL1DCacheBlock;
1591 b_issueWriteRequest;
1593 k_popMandatoryQueue;
1596 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1601 transition(NP, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) {
1605 transition(NP, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
1606 l_popPersistentQueue;
1609 // Transitions from Idle
1610 transition(I, Load, IS) {
1614 k_popMandatoryQueue;
1617 transition(I, Ifetch, IS) {
1621 k_popMandatoryQueue;
1624 transition(I, {Store, Atomic}, IM) {
1626 b_issueWriteRequest;
1628 k_popMandatoryQueue;
1631 transition(I, L1_Replacement) {
1632 ta_traceStalledAddress;
1633 tr_tokenReplacement;
1634 gg_deallocateL1CacheBlock;
1635 ka_wakeUpAllDependents;
1638 transition(I, {Transient_GETX, Transient_Local_GETX}) {
1639 t_sendAckWithCollectedTokens;
1643 transition(I, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
1647 transition(I, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, I_L) {
1648 e_sendAckWithCollectedTokens;
1649 l_popPersistentQueue;
1652 transition(I_L, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}) {
1653 l_popPersistentQueue;
1656 transition(I, Ack) {
1657 q_updateTokensFromResponse;
1661 transition(I, Data_Shared, S) {
1663 q_updateTokensFromResponse;
1667 transition(I, Data_Owner, O) {
1669 q_updateTokensFromResponse;
1673 transition(I, Data_All_Tokens, M) {
1675 q_updateTokensFromResponse;
1679 // Transitions from Shared
1680 transition({S, SM, S_L, SM_L}, {Load, Ifetch}) {
1682 k_popMandatoryQueue;
1685 transition(S, {Store, Atomic}, SM) {
1687 b_issueWriteRequest;
1689 k_popMandatoryQueue;
1692 transition(S, L1_Replacement, I) {
1693 ta_traceStalledAddress;
1694 cc_sharedReplacement; // Only needed in some cases
1695 gg_deallocateL1CacheBlock;
1696 ka_wakeUpAllDependents;
1699 transition(S, {Transient_GETX, Transient_Local_GETX}, I) {
1700 t_sendAckWithCollectedTokens;
1701 p_informL2AboutTokenLoss;
1705 // only owner responds to non-local requests
1706 transition(S, Transient_GETS) {
1710 transition(S, Transient_Local_GETS) {
1711 d_sendDataWithToken;
1715 transition(S, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
1719 transition({S, S_L}, Persistent_GETX, I_L) {
1720 e_sendAckWithCollectedTokens;
1721 p_informL2AboutTokenLoss;
1722 l_popPersistentQueue;
1725 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1726 f_sendAckWithAllButNorOneTokens;
1727 l_popPersistentQueue;
1730 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1731 l_popPersistentQueue;
1734 transition(S, Ack) {
1735 q_updateTokensFromResponse;
1739 transition(S, Data_Shared) {
1740 w_assertIncomingDataAndCacheDataMatch;
1741 q_updateTokensFromResponse;
1745 transition(S, Data_Owner, O) {
1746 w_assertIncomingDataAndCacheDataMatch;
1747 q_updateTokensFromResponse;
1751 transition(S, Data_All_Tokens, M) {
1752 w_assertIncomingDataAndCacheDataMatch;
1753 q_updateTokensFromResponse;
1757 // Transitions from Owned
1758 transition({O, OM}, {Load, Ifetch}) {
1760 k_popMandatoryQueue;
1763 transition(O, {Store, Atomic}, OM) {
1765 b_issueWriteRequest;
1767 k_popMandatoryQueue;
1770 transition(O, L1_Replacement, I) {
1771 ta_traceStalledAddress;
1773 gg_deallocateL1CacheBlock;
1774 ka_wakeUpAllDependents;
1777 transition(O, {Transient_GETX, Transient_Local_GETX}, I) {
1778 dd_sendDataWithAllTokens;
1779 p_informL2AboutTokenLoss;
1783 transition(O, Persistent_GETX, I_L) {
1784 ee_sendDataWithAllTokens;
1785 p_informL2AboutTokenLoss;
1786 l_popPersistentQueue;
1789 transition(O, Persistent_GETS, S_L) {
1790 ff_sendDataWithAllButNorOneTokens;
1791 l_popPersistentQueue;
1794 transition(O, Persistent_GETS_Last_Token, I_L) {
1795 fo_sendDataWithOwnerToken;
1796 l_popPersistentQueue;
1799 transition(O, Transient_GETS) {
1800 d_sendDataWithToken;
1804 transition(O, Transient_Local_GETS) {
1805 d_sendDataWithToken;
1809 // ran out of tokens, wait for it to go persistent
1810 transition(O, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
1814 transition(O, Ack) {
1815 q_updateTokensFromResponse;
1819 transition(O, Ack_All_Tokens, M) {
1820 q_updateTokensFromResponse;
1824 transition(O, Data_Shared) {
1825 w_assertIncomingDataAndCacheDataMatch;
1826 q_updateTokensFromResponse;
1830 transition(O, Data_All_Tokens, M) {
1831 w_assertIncomingDataAndCacheDataMatch;
1832 q_updateTokensFromResponse;
1836 // Transitions from Modified
1837 transition({MM, MM_W}, {Load, Ifetch}) {
1839 k_popMandatoryQueue;
1842 transition({MM_W}, {Store, Atomic}) {
1844 k_popMandatoryQueue;
1847 transition(MM, Store) {
1849 k_popMandatoryQueue;
1852 transition(MM, Atomic, M) {
1854 k_popMandatoryQueue;
1857 transition(MM, L1_Replacement, I) {
1858 ta_traceStalledAddress;
1860 gg_deallocateL1CacheBlock;
1861 ka_wakeUpAllDependents;
1864 transition(MM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}, I) {
1865 dd_sendDataWithAllTokens;
1866 p_informL2AboutTokenLoss;
1870 transition({MM_W}, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
1874 // Implement the migratory sharing optimization, even for persistent requests
1875 transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
1876 ee_sendDataWithAllTokens;
1877 p_informL2AboutTokenLoss;
1878 l_popPersistentQueue;
1881 // ignore persistent requests in lockout period
1882 transition(MM_W, {Persistent_GETX, Persistent_GETS}) {
1883 l_popPersistentQueue;
1886 transition(MM_W, Use_TimeoutNoStarvers, MM) {
1889 kd_wakeUpDependents;
1892 transition(MM_W, Use_TimeoutNoStarvers_NoMig, M) {
1895 kd_wakeUpDependents;
1898 // Transitions from Dirty Exclusive
1899 transition({M, M_W}, {Load, Ifetch}) {
1901 k_popMandatoryQueue;
1904 transition(M, Store, MM) {
1906 k_popMandatoryQueue;
1909 transition(M, Atomic) {
1911 k_popMandatoryQueue;
1914 transition(M_W, Store, MM_W) {
1916 k_popMandatoryQueue;
1919 transition(M_W, Atomic) {
1921 k_popMandatoryQueue;
1924 transition(M, L1_Replacement, I) {
1925 ta_traceStalledAddress;
1927 gg_deallocateL1CacheBlock;
1928 ka_wakeUpAllDependents;
1931 transition(M, {Transient_GETX, Transient_Local_GETX}, I) {
1932 dd_sendDataWithAllTokens;
1933 p_informL2AboutTokenLoss;
1937 transition(M, Transient_Local_GETS, O) {
1938 d_sendDataWithToken;
1942 transition(M, Transient_GETS, O) {
1943 d_sendDataWithNTokenIfAvail;
1947 transition(M_W, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
1951 transition(M, Persistent_GETX, I_L) {
1952 ee_sendDataWithAllTokens;
1953 p_informL2AboutTokenLoss;
1954 l_popPersistentQueue;
1957 transition(M, Persistent_GETS, S_L) {
1958 ff_sendDataWithAllButNorOneTokens;
1959 l_popPersistentQueue;
1962 // ignore persistent requests in lockout period
1963 transition(M_W, {Persistent_GETX, Persistent_GETS}) {
1964 l_popPersistentQueue;
1967 transition(M_W, Use_TimeoutStarverS, S_L) {
1969 ff_sendDataWithAllButNorOneTokens;
1973 // someone unlocked during timeout
1974 transition(M_W, {Use_TimeoutNoStarvers, Use_TimeoutNoStarvers_NoMig}, M) {
1977 kd_wakeUpDependents;
1980 transition(M_W, Use_TimeoutStarverX, I_L) {
1982 ee_sendDataWithAllTokens;
1983 p_informL2AboutTokenLoss;
1990 transition(MM_W, {Use_TimeoutStarverX, Use_TimeoutStarverS}, I_L) {
1992 ee_sendDataWithAllTokens;
1993 p_informL2AboutTokenLoss;
1999 // Transient_GETX and Transient_GETS in transient states
2000 transition(OM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
2001 m_popRequestQueue; // Even if we have the data, we can pretend we don't have it yet.
2004 transition(IS, {Transient_GETX, Transient_Local_GETX}) {
2005 t_sendAckWithCollectedTokens;
2009 transition(IS, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
2013 transition(IS, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IS_L) {
2014 e_sendAckWithCollectedTokens;
2015 l_popPersistentQueue;
2018 transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
2019 l_popPersistentQueue;
2022 transition(IM, {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, IM_L) {
2023 e_sendAckWithCollectedTokens;
2024 l_popPersistentQueue;
2027 transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
2028 l_popPersistentQueue;
2031 transition({SM, SM_L}, Persistent_GETX, IM_L) {
2032 e_sendAckWithCollectedTokens;
2033 l_popPersistentQueue;
2036 transition(SM, {Persistent_GETS, Persistent_GETS_Last_Token}, SM_L) {
2037 f_sendAckWithAllButNorOneTokens;
2038 l_popPersistentQueue;
2041 transition(SM_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
2042 l_popPersistentQueue;
2045 transition(OM, Persistent_GETX, IM_L) {
2046 ee_sendDataWithAllTokens;
2047 l_popPersistentQueue;
2050 transition(OM, Persistent_GETS, SM_L) {
2051 ff_sendDataWithAllButNorOneTokens;
2052 l_popPersistentQueue;
2055 transition(OM, Persistent_GETS_Last_Token, IM_L) {
2056 fo_sendDataWithOwnerToken;
2057 l_popPersistentQueue;
2060 // Transitions from IM/SM
2062 transition({IM, SM}, Ack) {
2063 q_updateTokensFromResponse;
2067 transition(IM, Data_Shared, SM) {
2069 q_updateTokensFromResponse;
2073 transition(IM, Data_Owner, OM) {
2075 q_updateTokensFromResponse;
2079 transition(IM, Data_All_Tokens, MM_W) {
2081 q_updateTokensFromResponse;
2082 xx_external_store_hit;
2083 o_scheduleUseTimeout;
2084 j_unsetReissueTimer;
2086 kd_wakeUpDependents;
2089 transition(SM, Data_Shared) {
2090 w_assertIncomingDataAndCacheDataMatch;
2091 q_updateTokensFromResponse;
2095 transition(SM, Data_Owner, OM) {
2096 w_assertIncomingDataAndCacheDataMatch;
2097 q_updateTokensFromResponse;
2101 transition(SM, Data_All_Tokens, MM_W) {
2102 w_assertIncomingDataAndCacheDataMatch;
2103 q_updateTokensFromResponse;
2104 xx_external_store_hit;
2105 o_scheduleUseTimeout;
2106 j_unsetReissueTimer;
2108 kd_wakeUpDependents;
2111 transition({IM, SM}, {Transient_GETX, Transient_Local_GETX}, IM) { // We don't have the data yet, but we might have collected some tokens. We give them up here to avoid livelock
2112 t_sendAckWithCollectedTokens;
2116 transition({IM, SM}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
2120 transition({IM, SM}, Request_Timeout) {
2121 j_unsetReissueTimer;
2122 b_issueWriteRequest;
2125 // Transitions from OM
2127 transition(OM, Ack) {
2128 q_updateTokensFromResponse;
2132 transition(OM, Ack_All_Tokens, MM_W) {
2133 q_updateTokensFromResponse;
2134 xx_external_store_hit;
2135 o_scheduleUseTimeout;
2136 j_unsetReissueTimer;
2138 kd_wakeUpDependents;
2141 transition(OM, Data_Shared) {
2142 w_assertIncomingDataAndCacheDataMatch;
2143 q_updateTokensFromResponse;
2147 transition(OM, Data_All_Tokens, MM_W) {
2148 w_assertIncomingDataAndCacheDataMatch;
2149 q_updateTokensFromResponse;
2150 xx_external_store_hit;
2151 o_scheduleUseTimeout;
2152 j_unsetReissueTimer;
2154 kd_wakeUpDependents;
2157 transition(OM, Request_Timeout) {
2158 j_unsetReissueTimer;
2159 b_issueWriteRequest;
2162 // Transitions from IS
2164 transition(IS, Ack) {
2165 q_updateTokensFromResponse;
2169 transition(IS, Data_Shared, S) {
2171 q_updateTokensFromResponse;
2172 x_external_load_hit;
2174 j_unsetReissueTimer;
2176 kd_wakeUpDependents;
2179 transition(IS, Data_Owner, O) {
2181 q_updateTokensFromResponse;
2182 x_external_load_hit;
2184 j_unsetReissueTimer;
2186 kd_wakeUpDependents;
2189 transition(IS, Data_All_Tokens, M_W) {
2191 q_updateTokensFromResponse;
2192 x_external_load_hit;
2193 o_scheduleUseTimeout;
2194 j_unsetReissueTimer;
2196 kd_wakeUpDependents;
2199 transition(IS, Request_Timeout) {
2200 j_unsetReissueTimer;
2204 // Transitions from I_L
2206 transition(I_L, Load, IS_L) {
2207 ii_allocateL1DCacheBlock;
2211 k_popMandatoryQueue;
2214 transition(I_L, Ifetch, IS_L) {
2215 pp_allocateL1ICacheBlock;
2219 k_popMandatoryQueue;
2222 transition(I_L, {Store, Atomic}, IM_L) {
2223 ii_allocateL1DCacheBlock;
2225 b_issueWriteRequest;
2227 k_popMandatoryQueue;
2231 // Transitions from S_L
2233 transition(S_L, {Store, Atomic}, SM_L) {
2235 b_issueWriteRequest;
2237 k_popMandatoryQueue;
2240 // Other transitions from *_L states
2242 transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS, Transient_GETX, Transient_Local_GETX}) {
2246 transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
2247 g_bounceResponseToStarver;
2251 transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
2252 g_bounceResponseToStarver;
2256 transition({I_L, S_L}, Data_All_Tokens) {
2257 g_bounceResponseToStarver;
2261 transition(IS_L, Request_Timeout) {
2262 j_unsetReissueTimer;
2266 transition({IM_L, SM_L}, Request_Timeout) {
2267 j_unsetReissueTimer;
2268 b_issueWriteRequest;
2271 // Opportunisticly Complete the memory operation in the following
2272 // cases. Note: these transitions could just use
2273 // g_bounceResponseToStarver, but if we have the data and tokens, we
2274 // might as well complete the memory request while we have the
2275 // chance (and then immediately forward on the data)
2277 transition(IM_L, Data_All_Tokens, MM_W) {
2279 q_updateTokensFromResponse;
2280 xx_external_store_hit;
2281 j_unsetReissueTimer;
2282 o_scheduleUseTimeout;
2284 kd_wakeUpDependents;
2287 transition(SM_L, Data_All_Tokens, S_L) {
2289 q_updateTokensFromResponse;
2290 xx_external_store_hit;
2291 ff_sendDataWithAllButNorOneTokens;
2293 j_unsetReissueTimer;
2297 transition(IS_L, Data_Shared, I_L) {
2299 q_updateTokensFromResponse;
2300 x_external_load_hit;
2302 e_sendAckWithCollectedTokens;
2303 p_informL2AboutTokenLoss;
2304 j_unsetReissueTimer;
2308 transition(IS_L, Data_Owner, I_L) {
2310 q_updateTokensFromResponse;
2311 x_external_load_hit;
2312 ee_sendDataWithAllTokens;
2314 p_informL2AboutTokenLoss;
2315 j_unsetReissueTimer;
2319 transition(IS_L, Data_All_Tokens, M_W) {
2321 q_updateTokensFromResponse;
2322 x_external_load_hit;
2323 j_unsetReissueTimer;
2324 o_scheduleUseTimeout;
2326 kd_wakeUpDependents;
2330 // Own_Lock_or_Unlock
2332 transition(I_L, Own_Lock_or_Unlock, I) {
2333 l_popPersistentQueue;
2334 kd_wakeUpDependents;
2337 transition(S_L, Own_Lock_or_Unlock, S) {
2338 l_popPersistentQueue;
2339 kd_wakeUpDependents;
2342 transition(IM_L, Own_Lock_or_Unlock, IM) {
2343 l_popPersistentQueue;
2344 kd_wakeUpDependents;
2347 transition(IS_L, Own_Lock_or_Unlock, IS) {
2348 l_popPersistentQueue;
2349 kd_wakeUpDependents;
2352 transition(SM_L, Own_Lock_or_Unlock, SM) {
2353 l_popPersistentQueue;
2354 kd_wakeUpDependents;