Merge Ruby Stuff
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOESI_CMP_token-L1cache.sm 1.22 05/01/19 15:55:39-06:00 beckmann@s0-28.cs.wisc.edu $
32 *
33 */
34
35 machine(L1Cache, "Token protocol") {
36
37 // From this node's L1 cache TO the network
38 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
39 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
40 // a local L1 -> this L2 bank
41 MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false";
42 MessageBuffer persistentFromL1Cache, network="To", virtual_network="3", ordered="true";
43
44 // To this node's L1 cache FROM the network
45 // a L2 bank -> this L1
46 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false";
47 // a L2 bank -> this L1
48 MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false";
49 MessageBuffer persistentToL1Cache, network="From", virtual_network="3", ordered="true";
50
51 // STATES
52 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
53 // Base states
54 NP, "NP", desc="Not Present";
55 I, "I", desc="Idle";
56 S, "S", desc="Shared";
57 O, "O", desc="Owned";
58 M, "M", desc="Modified (dirty)";
59 MM, "MM", desc="Modified (dirty and locally modified)";
60 M_W, "M^W", desc="Modified (dirty), waiting";
61 MM_W, "MM^W", desc="Modified (dirty and locally modified), waiting";
62
63 // Transient States
64 IM, "IM", desc="Issued GetX";
65 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
66 OM, "OM", desc="Issued GetX, received data";
67 IS, "IS", desc="Issued GetS";
68
69 // Locked states
70 I_L, "I^L", desc="Invalid, Locked";
71 S_L, "S^L", desc="Shared, Locked";
72 IM_L, "IM^L", desc="Invalid, Locked, trying to go to Modified";
73 SM_L, "SM^L", desc="Shared, Locked, trying to go to Modified";
74 IS_L, "IS^L", desc="Invalid, Locked, trying to go to Shared";
75 }
76
77 // EVENTS
78 enumeration(Event, desc="Cache events") {
79 Load, desc="Load request from the processor";
80 Ifetch, desc="I-fetch request from the processor";
81 Store, desc="Store request from the processor";
82 L1_Replacement, desc="L1 Replacement";
83
84 // Responses
85 Data_Shared, desc="Received a data message, we are now a sharer";
86 Data_Owner, desc="Received a data message, we are now the owner";
87 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
88 Ack, desc="Received an ack message";
89 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
90
91 // Requests
92 Transient_GETX, desc="A GetX from another processor";
93 Transient_Local_GETX, desc="A GetX from another processor";
94 Transient_GETS, desc="A GetS from another processor";
95 Transient_Local_GETS, desc="A GetS from another processor";
96 Transient_GETS_Last_Token, desc="A GetS from another processor";
97 Transient_Local_GETS_Last_Token, desc="A GetS from another processor";
98
99 // Lock/Unlock for distributed
100 Persistent_GETX, desc="Another processor has priority to read/write";
101 Persistent_GETS, desc="Another processor has priority to read";
102 Own_Lock_or_Unlock, desc="This processor now has priority";
103
104 // Triggers
105 Request_Timeout, desc="Timeout";
106 Use_TimeoutStarverX, desc="Timeout";
107 Use_TimeoutStarverS, desc="Timeout";
108 Use_TimeoutNoStarvers, desc="Timeout";
109
110 }
111
112 // TYPES
113
114 int getRetryThreshold();
115 int getFixedTimeoutLatency();
116 bool getDynamicTimeoutEnabled();
117
118 // CacheEntry
119 structure(Entry, desc="...", interface="AbstractCacheEntry") {
120 State CacheState, desc="cache state";
121 bool Dirty, desc="Is the data dirty (different than memory)?";
122 int Tokens, desc="The number of tokens we're holding for the line";
123 DataBlock DataBlk, desc="data for the block";
124 }
125
126
127 // TBE fields
128 structure(TBE, desc="...") {
129 Address Address, desc="Physical address for this TBE";
130 State TBEState, desc="Transient state";
131 int IssueCount, default="0", desc="The number of times we've issued a request for this line.";
132 Address PC, desc="Program counter of request";
133
134 bool WentPersistent, default="false", desc="Request went persistent";
135 bool ExternalResponse, default="false", desc="Response came from an external controller";
136
137 AccessType AccessType, desc="Type of request (used for profiling)";
138 Time IssueTime, desc="Time the request was issued";
139 AccessModeType AccessMode, desc="user/supervisor access type";
140 PrefetchBit Prefetch, desc="Is this a prefetch request";
141 }
142
143 external_type(CacheMemory) {
144 bool cacheAvail(Address);
145 Address cacheProbe(Address);
146 void allocate(Address);
147 void deallocate(Address);
148 Entry lookup(Address);
149 void changePermission(Address, AccessPermission);
150 bool isTagPresent(Address);
151 }
152
153 external_type(TBETable) {
154 TBE lookup(Address);
155 void allocate(Address);
156 void deallocate(Address);
157 bool isPresent(Address);
158 }
159
160
161 TBETable L1_TBEs, template_hack="<L1Cache_TBE>";
162 CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
163 CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
164
165 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
166 Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
167
168 bool starving, default="false";
169
170 PersistentTable persistentTable, constructor_hack="i";
171 TimerTable useTimerTable;
172 TimerTable reissueTimerTable;
173
174 int outstandingRequests, default="0";
175 int outstandingPersistentRequests, default="0";
176
177 int averageLatencyHysteresis, default="(8)"; // Constant that provides hysteresis for calculated the estimated average
178 int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_vec[i]))";
179
180 int averageLatencyEstimate() {
181 DEBUG_EXPR( (averageLatencyCounter >> averageLatencyHysteresis) );
182 profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) );
183 return averageLatencyCounter >> averageLatencyHysteresis;
184 }
185
186 void updateAverageLatencyEstimate(int latency) {
187 DEBUG_EXPR( latency );
188 assert(latency >= 0);
189
190 // By subtracting the current average and then adding the most
191 // recent sample, we calculate an estimate of the recent average.
192 // If we simply used a running sum and divided by the total number
193 // of entries, the estimate of the average would adapt very slowly
194 // after the execution has run for a long time.
195 // averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
196
197 averageLatencyCounter := averageLatencyCounter - averageLatencyEstimate() + latency;
198 }
199
200
201 Entry getCacheEntry(Address addr), return_by_ref="yes" {
202 if (L1DcacheMemory.isTagPresent(addr)) {
203 return L1DcacheMemory[addr];
204 } else {
205 return L1IcacheMemory[addr];
206 }
207 }
208
209 int getTokens(Address addr) {
210 if (L1DcacheMemory.isTagPresent(addr)) {
211 return L1DcacheMemory[addr].Tokens;
212 } else if (L1IcacheMemory.isTagPresent(addr)) {
213 return L1IcacheMemory[addr].Tokens;
214 } else {
215 return 0;
216 }
217 }
218
219 void changePermission(Address addr, AccessPermission permission) {
220 if (L1DcacheMemory.isTagPresent(addr)) {
221 return L1DcacheMemory.changePermission(addr, permission);
222 } else {
223 return L1IcacheMemory.changePermission(addr, permission);
224 }
225 }
226
227 bool isCacheTagPresent(Address addr) {
228 return (L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
229 }
230
231 State getState(Address addr) {
232 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
233
234 if (L1_TBEs.isPresent(addr)) {
235 return L1_TBEs[addr].TBEState;
236 } else if (isCacheTagPresent(addr)) {
237 return getCacheEntry(addr).CacheState;
238 } else {
239 if ((persistentTable.isLocked(addr) == true) && (persistentTable.findSmallest(addr) != machineID)) {
240 // Not in cache, in persistent table, but this processor isn't highest priority
241 return State:I_L;
242 } else {
243 return State:NP;
244 }
245 }
246 }
247
248 void setState(Address addr, State state) {
249 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
250
251 if (L1_TBEs.isPresent(addr)) {
252 assert(state != State:I);
253 assert(state != State:S);
254 assert(state != State:O);
255 assert(state != State:MM);
256 assert(state != State:M);
257 L1_TBEs[addr].TBEState := state;
258 }
259
260 if (isCacheTagPresent(addr)) {
261 // Make sure the token count is in range
262 assert(getCacheEntry(addr).Tokens >= 0);
263 assert(getCacheEntry(addr).Tokens <= max_tokens());
264
265 if ((state == State:I_L) ||
266 (state == State:IM_L) ||
267 (state == State:IS_L)) {
268 // Make sure we have no tokens in the "Invalid, locked" states
269 if (isCacheTagPresent(addr)) {
270 assert(getCacheEntry(addr).Tokens == 0);
271 }
272
273 // Make sure the line is locked
274 // assert(persistentTable.isLocked(addr));
275
276 // But we shouldn't have highest priority for it
277 // assert(persistentTable.findSmallest(addr) != id);
278
279 } else if ((state == State:S_L) ||
280 (state == State:SM_L)) {
281 assert(getCacheEntry(addr).Tokens >= 1);
282
283 // Make sure the line is locked...
284 // assert(persistentTable.isLocked(addr));
285
286 // ...But we shouldn't have highest priority for it...
287 // assert(persistentTable.findSmallest(addr) != id);
288
289 // ...And it must be a GETS request
290 // assert(persistentTable.typeOfSmallest(addr) == AccessType:Read);
291
292 } else {
293
294 // If there is an entry in the persistent table of this block,
295 // this processor needs to have an entry in the table for this
296 // block, and that entry better be the smallest (highest
297 // priority). Otherwise, the state should have been one of
298 // locked states
299
300 //if (persistentTable.isLocked(addr)) {
301 // assert(persistentTable.findSmallest(addr) == id);
302 //}
303 }
304
305 // in M and E you have all the tokens
306 if (state == State:MM || state == State:M || state == State:MM_W || state == State:M_W) {
307 assert(getCacheEntry(addr).Tokens == max_tokens());
308 }
309
310 // in NP you have no tokens
311 if (state == State:NP) {
312 assert(getCacheEntry(addr).Tokens == 0);
313 }
314
315 // You have at least one token in S-like states
316 if (state == State:S || state == State:SM) {
317 assert(getCacheEntry(addr).Tokens > 0);
318 }
319
320 // You have at least half the token in O-like states
321 if (state == State:O && state == State:OM) {
322 assert(getCacheEntry(addr).Tokens >= 1); // Must have at least one token
323 assert(getCacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
324 }
325
326 getCacheEntry(addr).CacheState := state;
327
328 // Set permission
329 if (state == State:MM ||
330 state == State:MM_W) {
331 changePermission(addr, AccessPermission:Read_Write);
332 } else if ((state == State:S) ||
333 (state == State:O) ||
334 (state == State:M) ||
335 (state == State:M_W) ||
336 (state == State:SM) ||
337 (state == State:S_L) ||
338 (state == State:SM_L) ||
339 (state == State:OM)) {
340 changePermission(addr, AccessPermission:Read_Only);
341 } else {
342 changePermission(addr, AccessPermission:Invalid);
343 }
344 }
345 }
346
347 Event mandatory_request_type_to_event(CacheRequestType type) {
348 if (type == CacheRequestType:LD) {
349 return Event:Load;
350 } else if (type == CacheRequestType:IFETCH) {
351 return Event:Ifetch;
352 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
353 return Event:Store;
354 } else {
355 error("Invalid CacheRequestType");
356 }
357 }
358
359 AccessType cache_request_type_to_access_type(CacheRequestType type) {
360 if ((type == CacheRequestType:LD) || (type == CacheRequestType:IFETCH)) {
361 return AccessType:Read;
362 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
363 return AccessType:Write;
364 } else {
365 error("Invalid CacheRequestType");
366 }
367 }
368
369 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
370 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
371 return GenericMachineType:L1Cache_wCC; // NOTE direct L1 hits should not call this
372 } else if (machineIDToMachineType(sender) == MachineType:L2Cache) {
373 if ( sender == (map_L1CacheMachId_to_L2Cache(addr,machineID))) {
374 return GenericMachineType:L2Cache;
375 } else {
376 return GenericMachineType:L2Cache_wCC;
377 }
378 } else {
379 return ConvertMachToGenericMach(machineIDToMachineType(sender));
380 }
381 }
382
383 bool okToIssueStarving(Address addr) {
384 return persistentTable.okToIssueStarving(addr);
385 }
386
387 void markPersistentEntries(Address addr) {
388 persistentTable.markEntries(addr);
389 }
390
391 MessageBuffer triggerQueue, ordered="false", random="false";
392
393 // ** OUT_PORTS **
394 out_port(persistentNetwork_out, PersistentMsg, persistentFromL1Cache);
395 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
396 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
397 out_port(requestRecycle_out, RequestMsg, requestToL1Cache);
398
399 // ** IN_PORTS **
400
401 // Use Timer
402 in_port(useTimerTable_in, Address, useTimerTable) {
403 if (useTimerTable_in.isReady()) {
404 if (persistentTable.isLocked(useTimerTable.readyAddress()) && (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
405 if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
406 trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress());
407 }
408 else {
409 trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress());
410 }
411 }
412 else {
413 trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress());
414 }
415 }
416 }
417
418 // Reissue Timer
419 in_port(reissueTimerTable_in, Address, reissueTimerTable) {
420 if (reissueTimerTable_in.isReady()) {
421 trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
422 }
423 }
424
425
426
427 // Persistent Network
428 in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache) {
429 if (persistentNetwork_in.isReady()) {
430 peek(persistentNetwork_in, PersistentMsg) {
431 assert(in_msg.Destination.isElement(machineID));
432
433 // Apply the lockdown or unlockdown message to the table
434 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
435 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
436 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
437 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
438 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
439 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
440 } else {
441 error("Unexpected message");
442 }
443
444 // React to the message based on the current state of the table
445 if (persistentTable.isLocked(in_msg.Address)) {
446 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
447 // Our Own Lock - this processor is highest priority
448 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
449 } else {
450 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
451 trigger(Event:Persistent_GETS, in_msg.Address);
452 } else {
453 trigger(Event:Persistent_GETX, in_msg.Address);
454 }
455 }
456 } else {
457 // Unlock case - no entries in the table
458 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
459 }
460 }
461 }
462 }
463
464
465 // Request Network
466 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
467 if (requestNetwork_in.isReady()) {
468 peek(requestNetwork_in, RequestMsg) {
469 assert(in_msg.Destination.isElement(machineID));
470 if (in_msg.Type == CoherenceRequestType:GETX) {
471 if (in_msg.isLocal) {
472 trigger(Event:Transient_Local_GETX, in_msg.Address);
473 }
474 else {
475 trigger(Event:Transient_GETX, in_msg.Address);
476 }
477 } else if (in_msg.Type == CoherenceRequestType:GETS) {
478 if ( (L1DcacheMemory.isTagPresent(in_msg.Address) || L1IcacheMemory.isTagPresent(in_msg.Address)) && getCacheEntry(in_msg.Address).Tokens == 1) {
479 if (in_msg.isLocal) {
480 trigger(Event:Transient_Local_GETS_Last_Token, in_msg.Address);
481 }
482 else {
483 trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
484 }
485 }
486 else {
487 if (in_msg.isLocal) {
488 trigger(Event:Transient_Local_GETS, in_msg.Address);
489 }
490 else {
491 trigger(Event:Transient_GETS, in_msg.Address);
492 }
493 }
494 } else {
495 error("Unexpected message");
496 }
497 }
498 }
499 }
500
501 // Response Network
502 in_port(responseNetwork_in, ResponseMsg, responseToL1Cache) {
503 if (responseNetwork_in.isReady()) {
504 peek(responseNetwork_in, ResponseMsg) {
505 assert(in_msg.Destination.isElement(machineID));
506
507 // Mark TBE flag if response received off-chip. Use this to update average latency estimate
508 if ( in_msg.SenderMachine == MachineType:L2Cache ) {
509
510 if (in_msg.Sender == map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID)) {
511 // came from an off-chip L2 cache
512 if (L1_TBEs.isPresent(in_msg.Address)) {
513 // L1_TBEs[in_msg.Address].ExternalResponse := true;
514 // profile_offchipL2_response(in_msg.Address);
515 }
516 }
517 else {
518 // profile_onchipL2_response(in_msg.Address );
519 }
520 } else if ( in_msg.SenderMachine == MachineType:Directory ) {
521 if (L1_TBEs.isPresent(in_msg.Address)) {
522 L1_TBEs[in_msg.Address].ExternalResponse := true;
523 // profile_memory_response( in_msg.Address);
524 }
525 } else if ( in_msg.SenderMachine == MachineType:L1Cache) {
526 if (isLocalProcessor(machineID, in_msg.Sender) == false) {
527 if (L1_TBEs.isPresent(in_msg.Address)) {
528 // L1_TBEs[in_msg.Address].ExternalResponse := true;
529 // profile_offchipL1_response(in_msg.Address );
530 }
531 }
532 else {
533 // profile_onchipL1_response(in_msg.Address );
534 }
535 } else {
536 error("unexpected SenderMachine");
537 }
538
539
540 if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
541 if (in_msg.Type == CoherenceResponseType:ACK) {
542 trigger(Event:Ack, in_msg.Address);
543 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
544 trigger(Event:Data_Owner, in_msg.Address);
545 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
546 trigger(Event:Data_Shared, in_msg.Address);
547 } else {
548 error("Unexpected message");
549 }
550 } else {
551 if (in_msg.Type == CoherenceResponseType:ACK) {
552 trigger(Event:Ack_All_Tokens, in_msg.Address);
553 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
554 trigger(Event:Data_All_Tokens, in_msg.Address);
555 } else {
556 error("Unexpected message");
557 }
558 }
559 }
560 }
561 }
562
563 // Mandatory Queue
564 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
565 if (mandatoryQueue_in.isReady()) {
566 peek(mandatoryQueue_in, CacheMsg) {
567 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
568
569 if (in_msg.Type == CacheRequestType:IFETCH) {
570 // ** INSTRUCTION ACCESS ***
571
572 // Check to see if it is in the OTHER L1
573 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
574 // The block is in the wrong L1, try to write it to the L2
575 trigger(Event:L1_Replacement, in_msg.Address);
576 }
577
578 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
579 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
580 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
581 } else {
582 if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
583 // L1 does't have the line, but we have space for it in the L1
584 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
585 } else {
586 // No room in the L1, so we need to make room
587 trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
588 }
589 }
590 } else {
591 // *** DATA ACCESS ***
592
593 // Check to see if it is in the OTHER L1
594 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
595 // The block is in the wrong L1, try to write it to the L2
596 trigger(Event:L1_Replacement, in_msg.Address);
597 }
598
599 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
600 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
601 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
602 } else {
603 if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
604 // L1 does't have the line, but we have space for it in the L1
605 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
606 } else {
607 // No room in the L1, so we need to make room
608 trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
609 }
610 }
611 }
612 }
613 }
614 }
615
616 // ACTIONS
617
618 action(a_issueReadRequest, "a", desc="Issue GETS") {
619 if (L1_TBEs[address].IssueCount == 0) {
620 // Update outstanding requests
621 profile_outstanding_request(outstandingRequests);
622 outstandingRequests := outstandingRequests + 1;
623 }
624
625 if (L1_TBEs[address].IssueCount >= getRetryThreshold() ) {
626 // Issue a persistent request if possible
627 if (okToIssueStarving(address) && (starving == false)) {
628 enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
629 out_msg.Address := address;
630 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
631 out_msg.Requestor := machineID;
632 out_msg.Destination.broadcast(MachineType:L1Cache);
633 out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
634 out_msg.Destination.add(map_Address_to_Directory(address));
635 out_msg.MessageSize := MessageSizeType:Persistent_Control;
636 out_msg.Prefetch := L1_TBEs[address].Prefetch;
637 out_msg.AccessMode := L1_TBEs[address].AccessMode;
638 }
639 markPersistentEntries(address);
640 starving := true;
641
642 if (L1_TBEs[address].IssueCount == 0) {
643 profile_persistent_prediction(address, L1_TBEs[address].AccessType);
644 }
645
646 // Update outstanding requests
647 profile_outstanding_persistent_request(outstandingPersistentRequests);
648 outstandingPersistentRequests := outstandingPersistentRequests + 1;
649
650 // Increment IssueCount
651 L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
652
653 L1_TBEs[address].WentPersistent := true;
654
655 // Do not schedule a wakeup, a persistent requests will always complete
656 }
657 else {
658
659 // We'd like to issue a persistent request, but are not allowed
660 // to issue a P.R. right now. This, we do not increment the
661 // IssueCount.
662
663 // Set a wakeup timer
664 reissueTimerTable.set(address, 10);
665
666 }
667 } else {
668 // Make a normal request
669 enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
670 out_msg.Address := address;
671 out_msg.Type := CoherenceRequestType:GETS;
672 out_msg.Requestor := machineID;
673 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
674 out_msg.RetryNum := L1_TBEs[address].IssueCount;
675 if (L1_TBEs[address].IssueCount == 0) {
676 out_msg.MessageSize := MessageSizeType:Request_Control;
677 } else {
678 out_msg.MessageSize := MessageSizeType:Reissue_Control;
679 }
680 out_msg.Prefetch := L1_TBEs[address].Prefetch;
681 out_msg.AccessMode := L1_TBEs[address].AccessMode;
682 }
683
684 // send to other local L1s, with local bit set
685 enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
686 out_msg.Address := address;
687 out_msg.Type := CoherenceRequestType:GETS;
688 out_msg.Requestor := machineID;
689 out_msg.Destination := getOtherLocalL1IDs(machineID);
690 out_msg.RetryNum := L1_TBEs[address].IssueCount;
691 out_msg.isLocal := true;
692 if (L1_TBEs[address].IssueCount == 0) {
693 out_msg.MessageSize := MessageSizeType:Request_Control;
694 } else {
695 out_msg.MessageSize := MessageSizeType:Reissue_Control;
696 }
697 out_msg.Prefetch := L1_TBEs[address].Prefetch;
698 out_msg.AccessMode := L1_TBEs[address].AccessMode;
699 }
700
701 // Increment IssueCount
702 L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
703
704 // Set a wakeup timer
705
706 if (getDynamicTimeoutEnabled()) {
707 reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
708 } else {
709 reissueTimerTable.set(address, getFixedTimeoutLatency());
710 }
711
712 }
713 }
714
715 action(b_issueWriteRequest, "b", desc="Issue GETX") {
716
717 if (L1_TBEs[address].IssueCount == 0) {
718 // Update outstanding requests
719 profile_outstanding_request(outstandingRequests);
720 outstandingRequests := outstandingRequests + 1;
721 }
722
723 if (L1_TBEs[address].IssueCount >= getRetryThreshold() ) {
724 // Issue a persistent request if possible
725 if ( okToIssueStarving(address) && (starving == false)) {
726 enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
727 out_msg.Address := address;
728 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
729 out_msg.Requestor := machineID;
730 out_msg.RequestorMachine := MachineType:L1Cache;
731 out_msg.Destination.broadcast(MachineType:L1Cache);
732 out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
733 out_msg.Destination.add(map_Address_to_Directory(address));
734 out_msg.MessageSize := MessageSizeType:Persistent_Control;
735 out_msg.Prefetch := L1_TBEs[address].Prefetch;
736 out_msg.AccessMode := L1_TBEs[address].AccessMode;
737 }
738 markPersistentEntries(address);
739 starving := true;
740
741 // Update outstanding requests
742 profile_outstanding_persistent_request(outstandingPersistentRequests);
743 outstandingPersistentRequests := outstandingPersistentRequests + 1;
744
745 if (L1_TBEs[address].IssueCount == 0) {
746 profile_persistent_prediction(address, L1_TBEs[address].AccessType);
747 }
748
749 // Increment IssueCount
750 L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
751
752 L1_TBEs[address].WentPersistent := true;
753
754 // Do not schedule a wakeup, a persistent requests will always complete
755 }
756 else {
757
758 // We'd like to issue a persistent request, but are not allowed
759 // to issue a P.R. right now. This, we do not increment the
760 // IssueCount.
761
762 // Set a wakeup timer
763 reissueTimerTable.set(address, 10);
764 }
765
766
767 } else {
768 // Make a normal request
769 enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
770 out_msg.Address := address;
771 out_msg.Type := CoherenceRequestType:GETX;
772 out_msg.Requestor := machineID;
773 out_msg.RequestorMachine := MachineType:L1Cache;
774 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
775 out_msg.RetryNum := L1_TBEs[address].IssueCount;
776
777 if (L1_TBEs[address].IssueCount == 0) {
778 out_msg.MessageSize := MessageSizeType:Request_Control;
779 } else {
780 out_msg.MessageSize := MessageSizeType:Reissue_Control;
781 }
782 out_msg.Prefetch := L1_TBEs[address].Prefetch;
783 out_msg.AccessMode := L1_TBEs[address].AccessMode;
784 }
785
786 // send to other local L1s too
787 enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
788 out_msg.Address := address;
789 out_msg.Type := CoherenceRequestType:GETX;
790 out_msg.Requestor := machineID;
791 out_msg.isLocal := true;
792 out_msg.Destination := getOtherLocalL1IDs(machineID);
793 out_msg.RetryNum := L1_TBEs[address].IssueCount;
794 if (L1_TBEs[address].IssueCount == 0) {
795 out_msg.MessageSize := MessageSizeType:Request_Control;
796 } else {
797 out_msg.MessageSize := MessageSizeType:Reissue_Control;
798 }
799 out_msg.Prefetch := L1_TBEs[address].Prefetch;
800 out_msg.AccessMode := L1_TBEs[address].AccessMode;
801 }
802
803 // Increment IssueCount
804 L1_TBEs[address].IssueCount := L1_TBEs[address].IssueCount + 1;
805
806 DEBUG_EXPR("incremented issue count");
807 DEBUG_EXPR(L1_TBEs[address].IssueCount);
808
809 // Set a wakeup timer
810 if (getDynamicTimeoutEnabled()) {
811 reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
812 } else {
813 reissueTimerTable.set(address, getFixedTimeoutLatency());
814 }
815 }
816 }
817
818 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
819 peek(responseNetwork_in, ResponseMsg) {
820 // FIXME, should use a 3rd vnet
821 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
822 out_msg.Address := address;
823 out_msg.Type := in_msg.Type;
824 out_msg.Sender := machineID;
825 out_msg.SenderMachine := MachineType:L1Cache;
826 out_msg.Destination.add(map_Address_to_Directory(address));
827 out_msg.Tokens := in_msg.Tokens;
828 out_msg.MessageSize := in_msg.MessageSize;
829 out_msg.DataBlk := in_msg.DataBlk;
830 out_msg.Dirty := in_msg.Dirty;
831 }
832 }
833 }
834
835 action(c_ownedReplacement, "c", desc="Issue writeback") {
836 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
837 out_msg.Address := address;
838 out_msg.Sender := machineID;
839 out_msg.SenderMachine := MachineType:L1Cache;
840 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
841 out_msg.Tokens := getCacheEntry(address).Tokens;
842 out_msg.DataBlk := getCacheEntry(address).DataBlk;
843 out_msg.Dirty := getCacheEntry(address).Dirty;
844 out_msg.Type := CoherenceResponseType:WB_OWNED;
845
846 // always send the data?
847 out_msg.MessageSize := MessageSizeType:Writeback_Data;
848 }
849 getCacheEntry(address).Tokens := 0;
850 }
851
852 action(cc_sharedReplacement, "\c", desc="Issue dirty writeback") {
853
854 // don't send writeback if replacing block with no tokens
855 if (getCacheEntry(address).Tokens != 0) {
856 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
857 out_msg.Address := address;
858 out_msg.Sender := machineID;
859 out_msg.SenderMachine := MachineType:L1Cache;
860 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
861 out_msg.Tokens := getCacheEntry(address).Tokens;
862 out_msg.DataBlk := getCacheEntry(address).DataBlk;
863 // assert(getCacheEntry(address).Dirty == false);
864 out_msg.Dirty := false;
865
866 // always send the data?
867 if (getCacheEntry(address).Tokens > 1) {
868 out_msg.MessageSize := MessageSizeType:Writeback_Data;
869 out_msg.Type := CoherenceResponseType:WB_SHARED_DATA;
870 } else {
871 out_msg.MessageSize := MessageSizeType:Writeback_Control;
872 out_msg.Type := CoherenceResponseType:WB_TOKENS;
873 }
874 }
875 getCacheEntry(address).Tokens := 0;
876 }
877 }
878
879
880 action(d_sendDataWithToken, "d", desc="Send data and a token from cache to requestor") {
881 peek(requestNetwork_in, RequestMsg) {
882 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
883 out_msg.Address := address;
884 out_msg.Type := CoherenceResponseType:DATA_SHARED;
885 out_msg.Sender := machineID;
886 out_msg.SenderMachine := MachineType:L1Cache;
887 out_msg.Destination.add(in_msg.Requestor);
888 out_msg.Tokens := 1;
889 out_msg.DataBlk := getCacheEntry(address).DataBlk;
890 // out_msg.Dirty := getCacheEntry(address).Dirty;
891 out_msg.Dirty := false;
892 if (in_msg.isLocal) {
893 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
894 } else {
895 out_msg.MessageSize := MessageSizeType:Response_Data;
896 }
897 }
898 }
899 getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
900 assert(getCacheEntry(address).Tokens >= 1);
901 }
902
903 action(d_sendDataWithNTokenIfAvail, "\dd", desc="Send data and a token from cache to requestor") {
904 peek(requestNetwork_in, RequestMsg) {
905 if (getCacheEntry(address).Tokens > N_tokens()) {
906 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
907 out_msg.Address := address;
908 out_msg.Type := CoherenceResponseType:DATA_SHARED;
909 out_msg.Sender := machineID;
910 out_msg.SenderMachine := MachineType:L1Cache;
911 out_msg.Destination.add(in_msg.Requestor);
912 out_msg.Tokens := N_tokens();
913 out_msg.DataBlk := getCacheEntry(address).DataBlk;
914 // out_msg.Dirty := getCacheEntry(address).Dirty;
915 out_msg.Dirty := false;
916 if (in_msg.isLocal) {
917 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
918 } else {
919 out_msg.MessageSize := MessageSizeType:Response_Data;
920 }
921 }
922 getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - N_tokens();
923 }
924 else if (getCacheEntry(address).Tokens > 1) {
925 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
926 out_msg.Address := address;
927 out_msg.Type := CoherenceResponseType:DATA_SHARED;
928 out_msg.Sender := machineID;
929 out_msg.SenderMachine := MachineType:L1Cache;
930 out_msg.Destination.add(in_msg.Requestor);
931 out_msg.Tokens := 1;
932 out_msg.DataBlk := getCacheEntry(address).DataBlk;
933 // out_msg.Dirty := getCacheEntry(address).Dirty;
934 out_msg.Dirty := false;
935 if (in_msg.isLocal) {
936 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
937 } else {
938 out_msg.MessageSize := MessageSizeType:Response_Data;
939 }
940 }
941 getCacheEntry(address).Tokens := getCacheEntry(address).Tokens - 1;
942 }
943 }
944 // assert(getCacheEntry(address).Tokens >= 1);
945 }
946
947 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
948 peek(requestNetwork_in, RequestMsg) {
949 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
950 out_msg.Address := address;
951 out_msg.Type := CoherenceResponseType:DATA_OWNER;
952 out_msg.Sender := machineID;
953 out_msg.SenderMachine := MachineType:L1Cache;
954 out_msg.Destination.add(in_msg.Requestor);
955 assert(getCacheEntry(address).Tokens >= 1);
956 out_msg.Tokens := getCacheEntry(address).Tokens;
957 out_msg.DataBlk := getCacheEntry(address).DataBlk;
958 out_msg.Dirty := getCacheEntry(address).Dirty;
959 if (in_msg.isLocal) {
960 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
961 } else {
962 out_msg.MessageSize := MessageSizeType:Response_Data;
963 }
964 }
965 }
966 getCacheEntry(address).Tokens := 0;
967 }
968
969 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
970 // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
971 if (getCacheEntry(address).Tokens > 0) {
972 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
973 out_msg.Address := address;
974 out_msg.Type := CoherenceResponseType:ACK;
975 out_msg.Sender := machineID;
976 out_msg.SenderMachine := MachineType:L1Cache;
977 out_msg.Destination.add(persistentTable.findSmallest(address));
978 assert(getCacheEntry(address).Tokens >= 1);
979 out_msg.Tokens := getCacheEntry(address).Tokens;
980 out_msg.MessageSize := MessageSizeType:Response_Control;
981 }
982 }
983 getCacheEntry(address).Tokens := 0;
984 }
985
986 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
987 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
988 assert(getCacheEntry(address).Tokens > 0);
989 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
990 out_msg.Address := address;
991 out_msg.Type := CoherenceResponseType:DATA_OWNER;
992 out_msg.Sender := machineID;
993 out_msg.SenderMachine := MachineType:L1Cache;
994 out_msg.Destination.add(persistentTable.findSmallest(address));
995 assert(getCacheEntry(address).Tokens >= 1);
996 out_msg.Tokens := getCacheEntry(address).Tokens;
997 out_msg.DataBlk := getCacheEntry(address).DataBlk;
998 out_msg.Dirty := getCacheEntry(address).Dirty;
999 out_msg.MessageSize := MessageSizeType:Response_Data;
1000 }
1001 getCacheEntry(address).Tokens := 0;
1002 }
1003
1004 action(f_sendAckWithAllButNorOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
1005 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1006 assert(getCacheEntry(address).Tokens > 0);
1007 if (getCacheEntry(address).Tokens > 1) {
1008 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
1009 out_msg.Address := address;
1010 out_msg.Type := CoherenceResponseType:ACK;
1011 out_msg.Sender := machineID;
1012 out_msg.SenderMachine := MachineType:L1Cache;
1013 out_msg.Destination.add(persistentTable.findSmallest(address));
1014 assert(getCacheEntry(address).Tokens >= 1);
1015 if (getCacheEntry(address).Tokens > N_tokens()) {
1016 out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens();
1017 } else {
1018 out_msg.Tokens := getCacheEntry(address).Tokens - 1;
1019 }
1020 out_msg.MessageSize := MessageSizeType:Response_Control;
1021 }
1022 }
1023 if (getCacheEntry(address).Tokens > N_tokens()) {
1024 getCacheEntry(address).Tokens := N_tokens();
1025 } else {
1026 getCacheEntry(address).Tokens := 1;
1027 }
1028 }
1029
1030 action(ff_sendDataWithAllButNorOneTokens, "\f", desc="Send data and out tokens but one to starver") {
1031 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1032 assert(getCacheEntry(address).Tokens > 0);
1033 if (getCacheEntry(address).Tokens > 1) {
1034 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
1035 out_msg.Address := address;
1036 out_msg.Type := CoherenceResponseType:DATA_OWNER;
1037 out_msg.Sender := machineID;
1038 out_msg.SenderMachine := MachineType:L1Cache;
1039 out_msg.Destination.add(persistentTable.findSmallest(address));
1040 assert(getCacheEntry(address).Tokens >= 1);
1041 if (getCacheEntry(address).Tokens > N_tokens()) {
1042 out_msg.Tokens := getCacheEntry(address).Tokens - N_tokens();
1043 } else {
1044 out_msg.Tokens := getCacheEntry(address).Tokens - 1;
1045 }
1046 out_msg.DataBlk := getCacheEntry(address).DataBlk;
1047 out_msg.Dirty := getCacheEntry(address).Dirty;
1048 out_msg.MessageSize := MessageSizeType:Response_Data;
1049 }
1050 if (getCacheEntry(address).Tokens > N_tokens()) {
1051 getCacheEntry(address).Tokens := N_tokens();
1052 } else {
1053 getCacheEntry(address).Tokens := 1;
1054 }
1055 }
1056 }
1057
1058 action(g_bounceResponseToStarver, "g", desc="Redirect response to starving processor") {
1059 // assert(persistentTable.isLocked(address));
1060
1061 peek(responseNetwork_in, ResponseMsg) {
1062 // assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
1063 // FIXME, should use a 3rd vnet in some cases
1064 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
1065 out_msg.Address := address;
1066 out_msg.Type := in_msg.Type;
1067 out_msg.Sender := machineID;
1068 out_msg.SenderMachine := MachineType:L1Cache;
1069 out_msg.Destination.add(persistentTable.findSmallest(address));
1070 out_msg.Tokens := in_msg.Tokens;
1071 out_msg.DataBlk := in_msg.DataBlk;
1072 out_msg.Dirty := in_msg.Dirty;
1073 out_msg.MessageSize := in_msg.MessageSize;
1074 }
1075 }
1076 }
1077
1078
1079 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
1080 DEBUG_EXPR(address);
1081 DEBUG_EXPR(getCacheEntry(address).DataBlk);
1082 sequencer.readCallback(address, getCacheEntry(address).DataBlk, GenericMachineType:L1Cache, PrefetchBit:No);
1083 }
1084
1085 action(x_external_load_hit, "x", desc="Notify sequencer the load completed.") {
1086 DEBUG_EXPR(address);
1087 DEBUG_EXPR(getCacheEntry(address).DataBlk);
1088 peek(responseNetwork_in, ResponseMsg) {
1089
1090 sequencer.readCallback(address, getCacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
1091 }
1092 }
1093
1094 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
1095 DEBUG_EXPR(address);
1096 DEBUG_EXPR(getCacheEntry(address).DataBlk);
1097 sequencer.writeCallback(address, getCacheEntry(address).DataBlk, GenericMachineType:L1Cache, PrefetchBit:No);
1098 getCacheEntry(address).Dirty := true;
1099 DEBUG_EXPR(getCacheEntry(address).DataBlk);
1100 }
1101
1102 action(xx_external_store_hit, "\x", desc="Notify sequencer that store completed.") {
1103 DEBUG_EXPR(address);
1104 DEBUG_EXPR(getCacheEntry(address).DataBlk);
1105 peek(responseNetwork_in, ResponseMsg) {
1106 sequencer.writeCallback(address, getCacheEntry(address).DataBlk, getNondirectHitMachType(in_msg.Address, in_msg.Sender), PrefetchBit:No);
1107 }
1108 getCacheEntry(address).Dirty := true;
1109 DEBUG_EXPR(getCacheEntry(address).DataBlk);
1110 }
1111
1112 action(i_allocateTBE, "i", desc="Allocate TBE") {
1113 check_allocate(L1_TBEs);
1114 L1_TBEs.allocate(address);
1115 L1_TBEs[address].IssueCount := 0;
1116 peek(mandatoryQueue_in, CacheMsg) {
1117 L1_TBEs[address].PC := in_msg.ProgramCounter;
1118 L1_TBEs[address].AccessType := cache_request_type_to_access_type(in_msg.Type);
1119 L1_TBEs[address].Prefetch := in_msg.Prefetch;
1120 L1_TBEs[address].AccessMode := in_msg.AccessMode;
1121 }
1122 L1_TBEs[address].IssueTime := get_time();
1123 }
1124
1125
1126 action(j_unsetReissueTimer, "j", desc="Unset reissue timer.") {
1127 if (reissueTimerTable.isSet(address)) {
1128 reissueTimerTable.unset(address);
1129 }
1130 }
1131
1132 action(jj_unsetUseTimer, "\j", desc="Unset use timer.") {
1133 useTimerTable.unset(address);
1134 }
1135
1136
1137
1138 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
1139 mandatoryQueue_in.dequeue();
1140 }
1141
1142 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
1143 persistentNetwork_in.dequeue();
1144 }
1145
1146 action(m_popRequestQueue, "m", desc="Pop request queue.") {
1147 requestNetwork_in.dequeue();
1148 }
1149
1150 action(n_popResponseQueue, "n", desc="Pop response queue") {
1151 responseNetwork_in.dequeue();
1152 }
1153
1154 action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
1155 useTimerTable.set(address, 50);
1156 }
1157
1158 action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
1159 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
1160 out_msg.Address := address;
1161 out_msg.Type := CoherenceResponseType:INV;
1162 out_msg.Tokens := 0;
1163 out_msg.Sender := machineID;
1164 out_msg.SenderMachine := MachineType:L1Cache;
1165 out_msg.DestMachine := MachineType:L2Cache;
1166 out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
1167 out_msg.MessageSize := MessageSizeType:Response_Control;
1168 }
1169 }
1170
1171
1172 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
1173 peek(responseNetwork_in, ResponseMsg) {
1174 assert(in_msg.Tokens != 0);
1175 DEBUG_EXPR("MRM_DEBUG L1 received tokens");
1176 DEBUG_EXPR(in_msg.Address);
1177 DEBUG_EXPR(in_msg.Tokens);
1178 getCacheEntry(address).Tokens := getCacheEntry(address).Tokens + in_msg.Tokens;
1179 DEBUG_EXPR(getCacheEntry(address).Tokens);
1180
1181 if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
1182 getCacheEntry(address).Dirty := true;
1183 }
1184 }
1185 }
1186
1187 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1188
1189 if (L1_TBEs[address].WentPersistent) {
1190 // assert(starving == true);
1191 outstandingRequests := outstandingRequests - 1;
1192 enqueue(persistentNetwork_out, PersistentMsg, latency="L1_REQUEST_LATENCY") {
1193 out_msg.Address := address;
1194 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
1195 out_msg.Requestor := machineID;
1196 out_msg.RequestorMachine := MachineType:L1Cache;
1197 out_msg.Destination.broadcast(MachineType:L1Cache);
1198 out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
1199 out_msg.Destination.add(map_Address_to_Directory(address));
1200 out_msg.MessageSize := MessageSizeType:Persistent_Control;
1201 }
1202 starving := false;
1203 }
1204
1205 // Update average latency
1206 if (L1_TBEs[address].IssueCount <= 1) {
1207 if (L1_TBEs[address].ExternalResponse == true) {
1208 updateAverageLatencyEstimate(time_to_int(get_time()) - time_to_int(L1_TBEs[address].IssueTime));
1209 }
1210 }
1211
1212 // Profile
1213 //if (L1_TBEs[address].WentPersistent) {
1214 // profile_token_retry(address, L1_TBEs[address].AccessType, 2);
1215 //}
1216 //else {
1217 // profile_token_retry(address, L1_TBEs[address].AccessType, 1);
1218 //}
1219
1220 profile_token_retry(address, L1_TBEs[address].AccessType, L1_TBEs[address].IssueCount);
1221 L1_TBEs.deallocate(address);
1222 }
1223
1224 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
1225 if (getCacheEntry(address).Tokens > 0) {
1226 peek(requestNetwork_in, RequestMsg) {
1227 enqueue(responseNetwork_out, ResponseMsg, latency="L1_RESPONSE_LATENCY") {
1228 out_msg.Address := address;
1229 out_msg.Type := CoherenceResponseType:ACK;
1230 out_msg.Sender := machineID;
1231 out_msg.SenderMachine := MachineType:L1Cache;
1232 out_msg.Destination.add(in_msg.Requestor);
1233 assert(getCacheEntry(address).Tokens >= 1);
1234 out_msg.Tokens := getCacheEntry(address).Tokens;
1235 out_msg.MessageSize := MessageSizeType:Response_Control;
1236 }
1237 }
1238 }
1239 getCacheEntry(address).Tokens := 0;
1240 }
1241
1242 action(u_writeDataToCache, "u", desc="Write data to cache") {
1243 peek(responseNetwork_in, ResponseMsg) {
1244 getCacheEntry(address).DataBlk := in_msg.DataBlk;
1245 if (getCacheEntry(address).Dirty == false && in_msg.Dirty) {
1246 getCacheEntry(address).Dirty := in_msg.Dirty;
1247 }
1248
1249 }
1250 }
1251
1252 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1253 if (L1DcacheMemory.isTagPresent(address)) {
1254 L1DcacheMemory.deallocate(address);
1255 } else {
1256 L1IcacheMemory.deallocate(address);
1257 }
1258 }
1259
1260 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1261 if (L1DcacheMemory.isTagPresent(address) == false) {
1262 L1DcacheMemory.allocate(address);
1263 }
1264 }
1265
1266 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
1267 if (L1IcacheMemory.isTagPresent(address) == false) {
1268 L1IcacheMemory.allocate(address);
1269 }
1270 }
1271
1272 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
1273 peek(mandatoryQueue_in, CacheMsg) {
1274 // profile_miss(in_msg, id);
1275 }
1276 }
1277
1278 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
1279 peek(responseNetwork_in, ResponseMsg) {
1280 assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
1281 }
1282 }
1283
1284
1285 action(z_stall, "z", desc="Stall") {
1286
1287 }
1288
1289 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1290 mandatoryQueue_in.recycle();
1291 }
1292
1293 //*****************************************************
1294 // TRANSITIONS
1295 //*****************************************************
1296
1297 // Transitions for Load/Store/L2_Replacement from transient states
1298 transition({IM, SM, OM, IS, IM_L, IS_L, I_L, S_L, SM_L, M_W, MM_W}, L1_Replacement) {
1299 zz_recycleMandatoryQueue;
1300 }
1301
1302 transition({IM, SM, OM, IS, IM_L, IS_L, SM_L}, Store) {
1303 zz_recycleMandatoryQueue;
1304 }
1305
1306 transition({IM, IS, IM_L, IS_L}, {Load, Ifetch}) {
1307 zz_recycleMandatoryQueue;
1308 }
1309
1310
1311 // Lockdowns
1312 transition({NP, I, S, O, M, MM, M_W, MM_W, IM, SM, OM, IS}, Own_Lock_or_Unlock) {
1313 l_popPersistentQueue;
1314 }
1315
1316 // Transitions from NP
1317 transition(NP, Load, IS) {
1318 ii_allocateL1DCacheBlock;
1319 i_allocateTBE;
1320 a_issueReadRequest;
1321 uu_profileMiss;
1322 k_popMandatoryQueue;
1323 }
1324
1325 transition(NP, Ifetch, IS) {
1326 pp_allocateL1ICacheBlock;
1327 i_allocateTBE;
1328 a_issueReadRequest;
1329 uu_profileMiss;
1330 k_popMandatoryQueue;
1331 }
1332
1333 transition(NP, Store, IM) {
1334 ii_allocateL1DCacheBlock;
1335 i_allocateTBE;
1336 b_issueWriteRequest;
1337 uu_profileMiss;
1338 k_popMandatoryQueue;
1339 }
1340
1341 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1342 bb_bounceResponse;
1343 n_popResponseQueue;
1344 }
1345
1346 transition(NP, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) {
1347 m_popRequestQueue;
1348 }
1349
1350 transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
1351 l_popPersistentQueue;
1352 }
1353
1354 // Transitions from Idle
1355 transition(I, Load, IS) {
1356 i_allocateTBE;
1357 a_issueReadRequest;
1358 uu_profileMiss;
1359 k_popMandatoryQueue;
1360 }
1361
1362 transition(I, Ifetch, IS) {
1363 i_allocateTBE;
1364 a_issueReadRequest;
1365 uu_profileMiss;
1366 k_popMandatoryQueue;
1367 }
1368
1369 transition(I, Store, IM) {
1370 i_allocateTBE;
1371 b_issueWriteRequest;
1372 uu_profileMiss;
1373 k_popMandatoryQueue;
1374 }
1375
1376 transition(I, L1_Replacement) {
1377 cc_sharedReplacement;
1378 gg_deallocateL1CacheBlock;
1379 }
1380
1381 transition(I, {Transient_GETX, Transient_Local_GETX}) {
1382 t_sendAckWithCollectedTokens;
1383 m_popRequestQueue;
1384 }
1385
1386 transition(I, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
1387 m_popRequestQueue;
1388 }
1389
1390 transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
1391 e_sendAckWithCollectedTokens;
1392 l_popPersistentQueue;
1393 }
1394
1395 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1396 l_popPersistentQueue;
1397 }
1398
1399 transition(I, Ack) {
1400 q_updateTokensFromResponse;
1401 n_popResponseQueue;
1402 }
1403
1404 transition(I, Data_Shared, S) {
1405 u_writeDataToCache;
1406 q_updateTokensFromResponse;
1407 n_popResponseQueue;
1408 }
1409
1410 transition(I, Data_Owner, O) {
1411 u_writeDataToCache;
1412 q_updateTokensFromResponse;
1413 n_popResponseQueue;
1414 }
1415
1416 transition(I, Data_All_Tokens, M) {
1417 u_writeDataToCache;
1418 q_updateTokensFromResponse;
1419 n_popResponseQueue;
1420 }
1421
1422 // Transitions from Shared
1423 transition({S, SM, S_L, SM_L}, {Load, Ifetch}) {
1424 h_load_hit;
1425 k_popMandatoryQueue;
1426 }
1427
1428 transition(S, Store, SM) {
1429 i_allocateTBE;
1430 b_issueWriteRequest;
1431 uu_profileMiss;
1432 k_popMandatoryQueue;
1433 }
1434
1435 transition(S, L1_Replacement, I) {
1436 cc_sharedReplacement; // Only needed in some cases
1437 gg_deallocateL1CacheBlock;
1438 }
1439
1440 transition(S, {Transient_GETX, Transient_Local_GETX}, I) {
1441 t_sendAckWithCollectedTokens;
1442 p_informL2AboutTokenLoss;
1443 m_popRequestQueue;
1444 }
1445
1446 // only owner responds to non-local requests
1447 transition(S, Transient_GETS) {
1448 m_popRequestQueue;
1449 }
1450
1451 transition(S, Transient_Local_GETS) {
1452 d_sendDataWithToken;
1453 m_popRequestQueue;
1454 }
1455
1456 transition(S, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
1457 m_popRequestQueue;
1458 }
1459
1460 transition({S, S_L}, Persistent_GETX, I_L) {
1461 e_sendAckWithCollectedTokens;
1462 p_informL2AboutTokenLoss;
1463 l_popPersistentQueue;
1464 }
1465
1466 transition(S, Persistent_GETS, S_L) {
1467 f_sendAckWithAllButNorOneTokens;
1468 l_popPersistentQueue;
1469 }
1470
1471 transition(S_L, Persistent_GETS) {
1472 l_popPersistentQueue;
1473 }
1474
1475 transition(S, Ack) {
1476 q_updateTokensFromResponse;
1477 n_popResponseQueue;
1478 }
1479
1480 transition(S, Data_Shared) {
1481 w_assertIncomingDataAndCacheDataMatch;
1482 q_updateTokensFromResponse;
1483 n_popResponseQueue;
1484 }
1485
1486 transition(S, Data_Owner, O) {
1487 w_assertIncomingDataAndCacheDataMatch;
1488 q_updateTokensFromResponse;
1489 n_popResponseQueue;
1490 }
1491
1492 transition(S, Data_All_Tokens, M) {
1493 w_assertIncomingDataAndCacheDataMatch;
1494 q_updateTokensFromResponse;
1495 n_popResponseQueue;
1496 }
1497
1498 // Transitions from Owned
1499 transition({O, OM}, {Load, Ifetch}) {
1500 h_load_hit;
1501 k_popMandatoryQueue;
1502 }
1503
1504 transition(O, Store, OM) {
1505 i_allocateTBE;
1506 b_issueWriteRequest;
1507 uu_profileMiss;
1508 k_popMandatoryQueue;
1509 }
1510
1511 transition(O, L1_Replacement, I) {
1512 c_ownedReplacement;
1513 gg_deallocateL1CacheBlock;
1514 }
1515
1516 transition(O, {Transient_GETX, Transient_Local_GETX}, I) {
1517 dd_sendDataWithAllTokens;
1518 p_informL2AboutTokenLoss;
1519 m_popRequestQueue;
1520 }
1521
1522 transition(O, Persistent_GETX, I_L) {
1523 ee_sendDataWithAllTokens;
1524 p_informL2AboutTokenLoss;
1525 l_popPersistentQueue;
1526 }
1527
1528 transition(O, Persistent_GETS, S_L) {
1529 ff_sendDataWithAllButNorOneTokens;
1530 l_popPersistentQueue;
1531 }
1532
1533 transition(O, Transient_GETS) {
1534 d_sendDataWithToken;
1535 m_popRequestQueue;
1536 }
1537
1538 transition(O, Transient_Local_GETS) {
1539 d_sendDataWithToken;
1540 m_popRequestQueue;
1541 }
1542
1543 // ran out of tokens, wait for it to go persistent
1544 transition(O, {Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token}) {
1545 m_popRequestQueue;
1546 }
1547
1548 transition(O, Ack) {
1549 q_updateTokensFromResponse;
1550 n_popResponseQueue;
1551 }
1552
1553 transition(O, Ack_All_Tokens, M) {
1554 q_updateTokensFromResponse;
1555 n_popResponseQueue;
1556 }
1557
1558 transition(O, Data_Shared) {
1559 w_assertIncomingDataAndCacheDataMatch;
1560 q_updateTokensFromResponse;
1561 n_popResponseQueue;
1562 }
1563
1564 transition(O, Data_All_Tokens, M) {
1565 w_assertIncomingDataAndCacheDataMatch;
1566 q_updateTokensFromResponse;
1567 n_popResponseQueue;
1568 }
1569
1570 // Transitions from Modified
1571 transition({MM, MM_W}, {Load, Ifetch}) {
1572 h_load_hit;
1573 k_popMandatoryQueue;
1574 }
1575
1576 transition({MM, MM_W}, Store) {
1577 hh_store_hit;
1578 k_popMandatoryQueue;
1579 }
1580
1581 transition(MM, L1_Replacement, I) {
1582 c_ownedReplacement;
1583 gg_deallocateL1CacheBlock;
1584 }
1585
1586 transition(MM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}, I) {
1587 dd_sendDataWithAllTokens;
1588 p_informL2AboutTokenLoss;
1589 m_popRequestQueue;
1590 }
1591
1592 transition({MM_W}, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
1593 m_popRequestQueue;
1594 }
1595
1596 // Implement the migratory sharing optimization, even for persistent requests
1597 transition(MM, {Persistent_GETX, Persistent_GETS}, I_L) {
1598 ee_sendDataWithAllTokens;
1599 p_informL2AboutTokenLoss;
1600 l_popPersistentQueue;
1601 }
1602
1603 // ignore persistent requests in lockout period
1604 transition(MM_W, {Persistent_GETX, Persistent_GETS}) {
1605 l_popPersistentQueue;
1606 }
1607
1608
1609 transition(MM_W, Use_TimeoutNoStarvers, MM) {
1610 s_deallocateTBE;
1611 jj_unsetUseTimer;
1612 }
1613
1614 // Transitions from Dirty Exclusive
1615 transition({M, M_W}, {Load, Ifetch}) {
1616 h_load_hit;
1617 k_popMandatoryQueue;
1618 }
1619
1620 transition(M, Store, MM) {
1621 hh_store_hit;
1622 k_popMandatoryQueue;
1623 }
1624
1625 transition(M_W, Store, MM_W) {
1626 hh_store_hit;
1627 k_popMandatoryQueue;
1628 }
1629
1630 transition(M, L1_Replacement, I) {
1631 c_ownedReplacement;
1632 gg_deallocateL1CacheBlock;
1633 }
1634
1635 transition(M, {Transient_GETX, Transient_Local_GETX}, I) {
1636 dd_sendDataWithAllTokens;
1637 p_informL2AboutTokenLoss;
1638 m_popRequestQueue;
1639 }
1640
1641 transition(M, Transient_Local_GETS, O) {
1642 d_sendDataWithToken;
1643 m_popRequestQueue;
1644 }
1645
1646 transition(M, Transient_GETS, O) {
1647 d_sendDataWithNTokenIfAvail;
1648 m_popRequestQueue;
1649 }
1650
1651 transition(M_W, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_Local_GETS}) { // Ignore the request
1652 m_popRequestQueue;
1653 }
1654
1655 transition(M, Persistent_GETX, I_L) {
1656 ee_sendDataWithAllTokens;
1657 p_informL2AboutTokenLoss;
1658 l_popPersistentQueue;
1659 }
1660
1661 transition(M, Persistent_GETS, S_L) {
1662 ff_sendDataWithAllButNorOneTokens;
1663 l_popPersistentQueue;
1664 }
1665
1666 // ignore persistent requests in lockout period
1667 transition(M_W, {Persistent_GETX, Persistent_GETS}) {
1668 l_popPersistentQueue;
1669 }
1670
1671 transition(M_W, Use_TimeoutStarverS, S_L) {
1672 s_deallocateTBE;
1673 ff_sendDataWithAllButNorOneTokens;
1674 jj_unsetUseTimer;
1675 }
1676
1677 // someone unlocked during timeout
1678 transition(M_W, Use_TimeoutNoStarvers, M) {
1679 s_deallocateTBE;
1680 jj_unsetUseTimer;
1681 }
1682
1683 transition(M_W, Use_TimeoutStarverX, I_L) {
1684 s_deallocateTBE;
1685 ee_sendDataWithAllTokens;
1686 p_informL2AboutTokenLoss;
1687 jj_unsetUseTimer;
1688 }
1689
1690
1691
1692 // migratory
1693 transition(MM_W, {Use_TimeoutStarverX, Use_TimeoutStarverS}, I_L) {
1694 s_deallocateTBE;
1695 ee_sendDataWithAllTokens;
1696 p_informL2AboutTokenLoss;
1697 jj_unsetUseTimer;
1698
1699 }
1700
1701
1702 // Transient_GETX and Transient_GETS in transient states
1703 transition(OM, {Transient_GETX, Transient_Local_GETX, Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
1704 m_popRequestQueue; // Even if we have the data, we can pretend we don't have it yet.
1705 }
1706
1707 transition(IS, {Transient_GETX, Transient_Local_GETX}) {
1708 t_sendAckWithCollectedTokens;
1709 m_popRequestQueue;
1710 }
1711
1712 transition(IS, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
1713 m_popRequestQueue;
1714 }
1715
1716 transition(IS, {Persistent_GETX, Persistent_GETS}, IS_L) {
1717 e_sendAckWithCollectedTokens;
1718 l_popPersistentQueue;
1719 }
1720
1721 transition(IS_L, {Persistent_GETX, Persistent_GETS}) {
1722 l_popPersistentQueue;
1723 }
1724
1725 transition(IM, {Persistent_GETX, Persistent_GETS}, IM_L) {
1726 e_sendAckWithCollectedTokens;
1727 l_popPersistentQueue;
1728 }
1729
1730 transition(IM_L, {Persistent_GETX, Persistent_GETS}) {
1731 l_popPersistentQueue;
1732 }
1733
1734 transition({SM, SM_L}, Persistent_GETX, IM_L) {
1735 e_sendAckWithCollectedTokens;
1736 l_popPersistentQueue;
1737 }
1738
1739 transition(SM, Persistent_GETS, SM_L) {
1740 f_sendAckWithAllButNorOneTokens;
1741 l_popPersistentQueue;
1742 }
1743
1744 transition(SM_L, Persistent_GETS) {
1745 l_popPersistentQueue;
1746 }
1747
1748 transition(OM, Persistent_GETX, IM_L) {
1749 ee_sendDataWithAllTokens;
1750 l_popPersistentQueue;
1751 }
1752
1753 transition(OM, Persistent_GETS, SM_L) {
1754 ff_sendDataWithAllButNorOneTokens;
1755 l_popPersistentQueue;
1756 }
1757
1758 // Transitions from IM/SM
1759
1760 transition({IM, SM}, Ack) {
1761 q_updateTokensFromResponse;
1762 n_popResponseQueue;
1763 }
1764
1765 transition(IM, Data_Shared, SM) {
1766 u_writeDataToCache;
1767 q_updateTokensFromResponse;
1768 n_popResponseQueue;
1769 }
1770
1771 transition(IM, Data_Owner, OM) {
1772 u_writeDataToCache;
1773 q_updateTokensFromResponse;
1774 n_popResponseQueue;
1775 }
1776
1777 transition(IM, Data_All_Tokens, MM_W) {
1778 u_writeDataToCache;
1779 q_updateTokensFromResponse;
1780 xx_external_store_hit;
1781 o_scheduleUseTimeout;
1782 j_unsetReissueTimer;
1783 n_popResponseQueue;
1784 }
1785
1786 transition(SM, Data_Shared) {
1787 w_assertIncomingDataAndCacheDataMatch;
1788 q_updateTokensFromResponse;
1789 n_popResponseQueue;
1790 }
1791
1792 transition(SM, Data_Owner, OM) {
1793 w_assertIncomingDataAndCacheDataMatch;
1794 q_updateTokensFromResponse;
1795 n_popResponseQueue;
1796 }
1797
1798 transition(SM, Data_All_Tokens, MM_W) {
1799 w_assertIncomingDataAndCacheDataMatch;
1800 q_updateTokensFromResponse;
1801 xx_external_store_hit;
1802 o_scheduleUseTimeout;
1803 j_unsetReissueTimer;
1804 n_popResponseQueue;
1805 }
1806
1807 transition({IM, SM}, {Transient_GETX, Transient_Local_GETX}, IM) { // We don't have the data yet, but we might have collected some tokens. We give them up here to avoid livelock
1808 t_sendAckWithCollectedTokens;
1809 m_popRequestQueue;
1810 }
1811
1812 transition({IM, SM}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS}) {
1813 m_popRequestQueue;
1814 }
1815
1816 transition({IM, SM}, Request_Timeout) {
1817 j_unsetReissueTimer;
1818 b_issueWriteRequest;
1819 }
1820
1821 // Transitions from OM
1822
1823 transition(OM, Ack) {
1824 q_updateTokensFromResponse;
1825 n_popResponseQueue;
1826 }
1827
1828 transition(OM, Ack_All_Tokens, MM_W) {
1829 q_updateTokensFromResponse;
1830 xx_external_store_hit;
1831 o_scheduleUseTimeout;
1832 j_unsetReissueTimer;
1833 n_popResponseQueue;
1834 }
1835
1836 transition(OM, Data_Shared) {
1837 w_assertIncomingDataAndCacheDataMatch;
1838 q_updateTokensFromResponse;
1839 n_popResponseQueue;
1840 }
1841
1842 transition(OM, Data_All_Tokens, MM_W) {
1843 w_assertIncomingDataAndCacheDataMatch;
1844 q_updateTokensFromResponse;
1845 xx_external_store_hit;
1846 o_scheduleUseTimeout;
1847 j_unsetReissueTimer;
1848 n_popResponseQueue;
1849 }
1850
1851 transition(OM, Request_Timeout) {
1852 j_unsetReissueTimer;
1853 b_issueWriteRequest;
1854 }
1855
1856 // Transitions from IS
1857
1858 transition(IS, Ack) {
1859 q_updateTokensFromResponse;
1860 n_popResponseQueue;
1861 }
1862
1863 transition(IS, Data_Shared, S) {
1864 u_writeDataToCache;
1865 q_updateTokensFromResponse;
1866 x_external_load_hit;
1867 s_deallocateTBE;
1868 j_unsetReissueTimer;
1869 n_popResponseQueue;
1870 }
1871
1872 transition(IS, Data_Owner, O) {
1873 u_writeDataToCache;
1874 q_updateTokensFromResponse;
1875 x_external_load_hit;
1876 s_deallocateTBE;
1877 j_unsetReissueTimer;
1878 n_popResponseQueue;
1879 }
1880
1881 transition(IS, Data_All_Tokens, M_W) {
1882 u_writeDataToCache;
1883 q_updateTokensFromResponse;
1884 x_external_load_hit;
1885 o_scheduleUseTimeout;
1886 j_unsetReissueTimer;
1887 n_popResponseQueue;
1888 }
1889
1890 transition(IS, Request_Timeout) {
1891 j_unsetReissueTimer;
1892 a_issueReadRequest;
1893 }
1894
1895 // Transitions from I_L
1896
1897 transition(I_L, Load, IS_L) {
1898 ii_allocateL1DCacheBlock;
1899 i_allocateTBE;
1900 a_issueReadRequest;
1901 uu_profileMiss;
1902 k_popMandatoryQueue;
1903 }
1904
1905 transition(I_L, Ifetch, IS_L) {
1906 pp_allocateL1ICacheBlock;
1907 i_allocateTBE;
1908 a_issueReadRequest;
1909 uu_profileMiss;
1910 k_popMandatoryQueue;
1911 }
1912
1913 transition(I_L, Store, IM_L) {
1914 ii_allocateL1DCacheBlock;
1915 i_allocateTBE;
1916 b_issueWriteRequest;
1917 uu_profileMiss;
1918 k_popMandatoryQueue;
1919 }
1920
1921
1922 // Transitions from S_L
1923
1924 transition(S_L, Store, SM_L) {
1925 i_allocateTBE;
1926 b_issueWriteRequest;
1927 uu_profileMiss;
1928 k_popMandatoryQueue;
1929 }
1930
1931 // Other transitions from *_L states
1932
1933 transition({I_L, IM_L, IS_L, S_L, SM_L}, {Transient_GETS, Transient_GETS_Last_Token, Transient_Local_GETS_Last_Token, Transient_Local_GETS, Transient_GETX, Transient_Local_GETX}) {
1934 m_popRequestQueue;
1935 }
1936
1937 transition({I_L, IM_L, IS_L, S_L, SM_L}, Ack) {
1938 g_bounceResponseToStarver;
1939 n_popResponseQueue;
1940 }
1941
1942 transition({I_L, IM_L, S_L, SM_L}, {Data_Shared, Data_Owner}) {
1943 g_bounceResponseToStarver;
1944 n_popResponseQueue;
1945 }
1946
1947 transition({I_L, S_L}, Data_All_Tokens) {
1948 g_bounceResponseToStarver;
1949 n_popResponseQueue;
1950 }
1951
1952 transition(IS_L, Request_Timeout) {
1953 j_unsetReissueTimer;
1954 a_issueReadRequest;
1955 }
1956
1957 transition({IM_L, SM_L}, Request_Timeout) {
1958 j_unsetReissueTimer;
1959 b_issueWriteRequest;
1960 }
1961
1962 // Opportunisticly Complete the memory operation in the following
1963 // cases. Note: these transitions could just use
1964 // g_bounceResponseToStarver, but if we have the data and tokens, we
1965 // might as well complete the memory request while we have the
1966 // chance (and then immediately forward on the data)
1967
1968 transition(IM_L, Data_All_Tokens, MM_W) {
1969 u_writeDataToCache;
1970 q_updateTokensFromResponse;
1971 xx_external_store_hit;
1972 j_unsetReissueTimer;
1973 o_scheduleUseTimeout;
1974 n_popResponseQueue;
1975 }
1976
1977 transition(SM_L, Data_All_Tokens, S_L) {
1978 u_writeDataToCache;
1979 q_updateTokensFromResponse;
1980 xx_external_store_hit;
1981 ff_sendDataWithAllButNorOneTokens;
1982 s_deallocateTBE;
1983 j_unsetReissueTimer;
1984 n_popResponseQueue;
1985 }
1986
1987 transition(IS_L, Data_Shared, I_L) {
1988 u_writeDataToCache;
1989 q_updateTokensFromResponse;
1990 x_external_load_hit;
1991 s_deallocateTBE;
1992 e_sendAckWithCollectedTokens;
1993 p_informL2AboutTokenLoss;
1994 j_unsetReissueTimer;
1995 n_popResponseQueue;
1996 }
1997
1998 transition(IS_L, Data_Owner, I_L) {
1999 u_writeDataToCache;
2000 q_updateTokensFromResponse;
2001 x_external_load_hit;
2002 ee_sendDataWithAllTokens;
2003 s_deallocateTBE;
2004 p_informL2AboutTokenLoss;
2005 j_unsetReissueTimer;
2006 n_popResponseQueue;
2007 }
2008
2009 transition(IS_L, Data_All_Tokens, M_W) {
2010 u_writeDataToCache;
2011 q_updateTokensFromResponse;
2012 x_external_load_hit;
2013 j_unsetReissueTimer;
2014 o_scheduleUseTimeout;
2015 n_popResponseQueue;
2016 }
2017
2018
2019 // Own_Lock_or_Unlock
2020
2021 transition(I_L, Own_Lock_or_Unlock, I) {
2022 l_popPersistentQueue;
2023 }
2024
2025 transition(S_L, Own_Lock_or_Unlock, S) {
2026 l_popPersistentQueue;
2027 }
2028
2029 transition(IM_L, Own_Lock_or_Unlock, IM) {
2030 l_popPersistentQueue;
2031 }
2032
2033 transition(IS_L, Own_Lock_or_Unlock, IS) {
2034 l_popPersistentQueue;
2035 }
2036
2037 transition(SM_L, Own_Lock_or_Unlock, SM) {
2038 l_popPersistentQueue;
2039 }
2040 }
2041