Resurrection of the CMP token protocol to GEM5
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "Token protocol")
36 : int l2_request_latency,
37 int l2_response_latency,
38 int N_tokens,
39 bool filtering_enabled
40 {
41
42 // L2 BANK QUEUES
43 // From local bank of L2 cache TO the network
44
45 // this L2 bank -> a local L1 || mod-directory
46 MessageBuffer responseFromL2Cache, network="To", virtual_network="1", ordered="false";
47 // this L2 bank -> mod-directory
48 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="3", ordered="false";
49 // this L2 bank -> a local L1
50 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="4", ordered="false";
51
52
53 // FROM the network to this local bank of L2 cache
54
55 // a local L1 || mod-directory -> this L2 bank
56 MessageBuffer responseToL2Cache, network="From", virtual_network="1", ordered="false";
57 MessageBuffer persistentToL2Cache, network="From", virtual_network="2", ordered="true";
58 // mod-directory -> this L2 bank
59 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="3", ordered="false";
60 // a local L1 -> this L2 bank
61 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="4", ordered="false";
62
63 // STATES
64 enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
65 // Base states
66 NP, desc="Not Present";
67 I, desc="Idle";
68 S, desc="Shared, not present in any local L1s";
69 O, desc="Owned, not present in any L1s";
70 M, desc="Modified, not present in any L1s";
71
72 // Locked states
73 I_L, "I^L", desc="Invalid, Locked";
74 S_L, "S^L", desc="Shared, Locked";
75 }
76
77 // EVENTS
78 enumeration(Event, desc="Cache events") {
79
80 // Requests
81 L1_GETS, desc="local L1 GETS request";
82 L1_GETS_Last_Token, desc="local L1 GETS request";
83 L1_GETX, desc="local L1 GETX request";
84 L1_INV, desc="L1 no longer has tokens";
85 Transient_GETX, desc="A GetX from another processor";
86 Transient_GETS, desc="A GetS from another processor";
87 Transient_GETS_Last_Token, desc="A GetS from another processor";
88
89 // events initiated by this L2
90 L2_Replacement, desc="L2 Replacement", format="!r";
91
92 // events of external L2 responses
93
94 // Responses
95 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
96 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
97 Writeback_All_Tokens, desc="Received a writeback from L1";
98 Writeback_Owned, desc="Received a writeback from L1";
99
100
101 Data_Shared, desc="Received a data message, we are now a sharer";
102 Data_Owner, desc="Received a data message, we are now the owner";
103 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
104 Ack, desc="Received an ack message";
105 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
106
107 // Lock/Unlock
108 Persistent_GETX, desc="Another processor has priority to read/write";
109 Persistent_GETS, desc="Another processor has priority to read";
110 Own_Lock_or_Unlock, desc="This processor now has priority";
111 }
112
113 // TYPES
114
115 // CacheEntry
116 structure(Entry, desc="...", interface="AbstractCacheEntry") {
117 State CacheState, desc="cache state";
118 bool Dirty, desc="Is the data dirty (different than memory)?";
119 int Tokens, desc="The number of tokens we're holding for the line";
120 DataBlock DataBlk, desc="data for the block";
121 }
122
123 structure(DirEntry, desc="...") {
124 Set Sharers, desc="Set of the internal processors that want the block in shared state";
125 bool exclusive, default="false", desc="if local exclusive is likely";
126 }
127
128 external_type(CacheMemory) {
129 bool cacheAvail(Address);
130 Address cacheProbe(Address);
131 void allocate(Address, Entry);
132 void deallocate(Address);
133 Entry lookup(Address);
134 void changePermission(Address, AccessPermission);
135 bool isTagPresent(Address);
136 void setMRU(Address);
137 }
138
139 external_type(PerfectCacheMemory) {
140 void allocate(Address);
141 void deallocate(Address);
142 DirEntry lookup(Address);
143 bool isTagPresent(Address);
144 }
145
146 external_type(PersistentTable) {
147 void persistentRequestLock(Address, MachineID, AccessType);
148 void persistentRequestUnlock(Address, MachineID);
149 MachineID findSmallest(Address);
150 AccessType typeOfSmallest(Address);
151 void markEntries(Address);
152 bool isLocked(Address);
153 int countStarvingForAddress(Address);
154 int countReadStarvingForAddress(Address);
155 }
156
157 CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
158
159 PersistentTable persistentTable;
160 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
161
162 Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
163 if (L2cacheMemory.isTagPresent(addr)) {
164 return L2cacheMemory[addr];
165 }
166 assert(false);
167 return L2cacheMemory[addr];
168 }
169
170 int getTokens(Address addr) {
171 if (L2cacheMemory.isTagPresent(addr)) {
172 return L2cacheMemory[addr].Tokens;
173 } else {
174 return 0;
175 }
176 }
177
178 void changePermission(Address addr, AccessPermission permission) {
179 if (L2cacheMemory.isTagPresent(addr)) {
180 return L2cacheMemory.changePermission(addr, permission);
181 }
182 }
183
184 bool isCacheTagPresent(Address addr) {
185 return (L2cacheMemory.isTagPresent(addr) );
186 }
187
188 State getState(Address addr) {
189 if (isCacheTagPresent(addr)) {
190 return getL2CacheEntry(addr).CacheState;
191 } else if (persistentTable.isLocked(addr) == true) {
192 return State:I_L;
193 } else {
194 return State:NP;
195 }
196 }
197
198 string getStateStr(Address addr) {
199 return L2Cache_State_to_string(getState(addr));
200 }
201
202 void setState(Address addr, State state) {
203
204
205 if (isCacheTagPresent(addr)) {
206 // Make sure the token count is in range
207 assert(getL2CacheEntry(addr).Tokens >= 0);
208 assert(getL2CacheEntry(addr).Tokens <= max_tokens());
209
210 // Make sure we have no tokens in L
211 if ((state == State:I_L) ) {
212 if (isCacheTagPresent(addr)) {
213 assert(getL2CacheEntry(addr).Tokens == 0);
214 }
215 }
216
217 // in M and E you have all the tokens
218 if (state == State:M ) {
219 assert(getL2CacheEntry(addr).Tokens == max_tokens());
220 }
221
222 // in NP you have no tokens
223 if (state == State:NP) {
224 assert(getL2CacheEntry(addr).Tokens == 0);
225 }
226
227 // You have at least one token in S-like states
228 if (state == State:S ) {
229 assert(getL2CacheEntry(addr).Tokens > 0);
230 }
231
232 // You have at least half the token in O-like states
233 if (state == State:O ) {
234 assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
235 // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
236 }
237
238 getL2CacheEntry(addr).CacheState := state;
239
240 // Set permission
241 if (state == State:I) {
242 changePermission(addr, AccessPermission:Invalid);
243 } else if (state == State:S || state == State:O ) {
244 changePermission(addr, AccessPermission:Read_Only);
245 } else if (state == State:M ) {
246 changePermission(addr, AccessPermission:Read_Write);
247 } else {
248 changePermission(addr, AccessPermission:Invalid);
249 }
250 }
251 }
252
253 void removeSharer(Address addr, NodeID id) {
254
255 if (localDirectory.isTagPresent(addr)) {
256 localDirectory[addr].Sharers.remove(id);
257 if (localDirectory[addr].Sharers.count() == 0) {
258 localDirectory.deallocate(addr);
259 }
260 }
261 }
262
263 bool sharersExist(Address addr) {
264 if (localDirectory.isTagPresent(addr)) {
265 if (localDirectory[addr].Sharers.count() > 0) {
266 return true;
267 }
268 else {
269 return false;
270 }
271 }
272 else {
273 return false;
274 }
275 }
276
277 bool exclusiveExists(Address addr) {
278 if (localDirectory.isTagPresent(addr)) {
279 if (localDirectory[addr].exclusive == true) {
280 return true;
281 }
282 else {
283 return false;
284 }
285 }
286 else {
287 return false;
288 }
289 }
290
291 // assumes that caller will check to make sure tag is present
292 Set getSharers(Address addr) {
293 return localDirectory[addr].Sharers;
294 }
295
296 void setNewWriter(Address addr, NodeID id) {
297 if (localDirectory.isTagPresent(addr) == false) {
298 localDirectory.allocate(addr);
299 }
300 localDirectory[addr].Sharers.clear();
301 localDirectory[addr].Sharers.add(id);
302 localDirectory[addr].exclusive := true;
303 }
304
305 void addNewSharer(Address addr, NodeID id) {
306 if (localDirectory.isTagPresent(addr) == false) {
307 localDirectory.allocate(addr);
308 }
309 localDirectory[addr].Sharers.add(id);
310 // localDirectory[addr].exclusive := false;
311 }
312
313 void clearExclusiveBitIfExists(Address addr) {
314 if (localDirectory.isTagPresent(addr) == true) {
315 localDirectory[addr].exclusive := false;
316 }
317 }
318
319 // ** OUT_PORTS **
320 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
321 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
322 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
323
324
325
326 // ** IN_PORTS **
327
328 // Persistent Network
329 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
330 if (persistentNetwork_in.isReady()) {
331 peek(persistentNetwork_in, PersistentMsg) {
332 assert(in_msg.Destination.isElement(machineID));
333
334 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
335 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
336 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
337 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
338 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
339 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
340 } else {
341 error("Unexpected message");
342 }
343
344 // React to the message based on the current state of the table
345 if (persistentTable.isLocked(in_msg.Address)) {
346
347 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
348 trigger(Event:Persistent_GETS, in_msg.Address);
349 } else {
350 trigger(Event:Persistent_GETX, in_msg.Address);
351 }
352 }
353 else {
354 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
355 }
356 }
357 }
358 }
359
360
361 // Request Network
362 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
363 if (requestNetwork_in.isReady()) {
364 peek(requestNetwork_in, RequestMsg) {
365 assert(in_msg.Destination.isElement(machineID));
366
367 if (in_msg.Type == CoherenceRequestType:GETX) {
368 trigger(Event:Transient_GETX, in_msg.Address);
369 } else if (in_msg.Type == CoherenceRequestType:GETS) {
370 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
371 trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
372 }
373 else {
374 trigger(Event:Transient_GETS, in_msg.Address);
375 }
376 } else {
377 error("Unexpected message");
378 }
379 }
380 }
381 }
382
383 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
384 if (L1requestNetwork_in.isReady()) {
385 peek(L1requestNetwork_in, RequestMsg) {
386 assert(in_msg.Destination.isElement(machineID));
387 if (in_msg.Type == CoherenceRequestType:GETX) {
388 trigger(Event:L1_GETX, in_msg.Address);
389 } else if (in_msg.Type == CoherenceRequestType:GETS) {
390 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
391 trigger(Event:L1_GETS_Last_Token, in_msg.Address);
392 }
393 else {
394 trigger(Event:L1_GETS, in_msg.Address);
395 }
396 } else {
397 error("Unexpected message");
398 }
399 }
400 }
401 }
402
403
404 // Response Network
405 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
406 if (responseNetwork_in.isReady()) {
407 peek(responseNetwork_in, ResponseMsg) {
408 assert(in_msg.Destination.isElement(machineID));
409 if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
410 if (in_msg.Type == CoherenceResponseType:ACK) {
411 trigger(Event:Ack, in_msg.Address);
412 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
413 trigger(Event:Data_Owner, in_msg.Address);
414 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
415 trigger(Event:Data_Shared, in_msg.Address);
416 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
417
418 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
419
420 // either room is available or the block is already present
421
422 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
423 assert(in_msg.Dirty == false);
424 trigger(Event:Writeback_Tokens, in_msg.Address);
425 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
426 assert(in_msg.Dirty == false);
427 trigger(Event:Writeback_Shared_Data, in_msg.Address);
428 }
429 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
430 //assert(in_msg.Dirty == false);
431 trigger(Event:Writeback_Owned, in_msg.Address);
432 }
433 }
434 else {
435 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
436 }
437 } else if (in_msg.Type == CoherenceResponseType:INV) {
438 trigger(Event:L1_INV, in_msg.Address);
439 } else {
440 error("Unexpected message");
441 }
442 } else {
443 if (in_msg.Type == CoherenceResponseType:ACK) {
444 trigger(Event:Ack_All_Tokens, in_msg.Address);
445 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
446 trigger(Event:Data_All_Tokens, in_msg.Address);
447 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
448 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
449
450 // either room is available or the block is already present
451
452 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
453 assert(in_msg.Dirty == false);
454 assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
455 trigger(Event:Writeback_All_Tokens, in_msg.Address);
456 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
457 assert(in_msg.Dirty == false);
458 trigger(Event:Writeback_All_Tokens, in_msg.Address);
459 }
460 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
461 trigger(Event:Writeback_All_Tokens, in_msg.Address);
462 }
463 }
464 else {
465 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
466 }
467 } else if (in_msg.Type == CoherenceResponseType:INV) {
468 trigger(Event:L1_INV, in_msg.Address);
469 } else {
470 DEBUG_EXPR(in_msg.Type);
471 error("Unexpected message");
472 }
473 }
474 }
475 }
476 }
477
478
479 // ACTIONS
480
481 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
482
483 peek(L1requestNetwork_in, RequestMsg) {
484
485 // if this is a retry or no local sharers, broadcast normally
486
487 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
488 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
489 out_msg.Address := in_msg.Address;
490 out_msg.Type := in_msg.Type;
491 out_msg.Requestor := in_msg.Requestor;
492 out_msg.RequestorMachine := in_msg.RequestorMachine;
493 out_msg.RetryNum := in_msg.RetryNum;
494
495 //
496 // If a statically shared L2 cache, then no other L2 caches can
497 // store the block
498 //
499 //out_msg.Destination.broadcast(MachineType:L2Cache);
500 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
501 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
502
503 out_msg.Destination.add(map_Address_to_Directory(address));
504 out_msg.MessageSize := MessageSizeType:Request_Control;
505 out_msg.AccessMode := in_msg.AccessMode;
506 out_msg.Prefetch := in_msg.Prefetch;
507 } //enqueue
508 // } // if
509
510 //profile_filter_action(0);
511 } // peek
512 } //action
513
514
515 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
516 peek(responseNetwork_in, ResponseMsg) {
517 // FIXME, should use a 3rd vnet
518 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
519 out_msg.Address := address;
520 out_msg.Type := in_msg.Type;
521 out_msg.Sender := machineID;
522 out_msg.SenderMachine := MachineType:L2Cache;
523 out_msg.Destination.add(map_Address_to_Directory(address));
524 out_msg.Tokens := in_msg.Tokens;
525 out_msg.MessageSize := in_msg.MessageSize;
526 out_msg.DataBlk := in_msg.DataBlk;
527 out_msg.Dirty := in_msg.Dirty;
528 }
529 }
530 }
531
532 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
533 if (getL2CacheEntry(address).Tokens > 0) {
534 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
535 out_msg.Address := address;
536 out_msg.Type := CoherenceResponseType:ACK;
537 out_msg.Sender := machineID;
538 out_msg.SenderMachine := MachineType:L2Cache;
539 out_msg.Destination.add(map_Address_to_Directory(address));
540 out_msg.Tokens := getL2CacheEntry(address).Tokens;
541 out_msg.MessageSize := MessageSizeType:Writeback_Control;
542 }
543 getL2CacheEntry(address).Tokens := 0;
544 }
545 }
546
547 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
548 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
549 out_msg.Address := address;
550 out_msg.Sender := machineID;
551 out_msg.SenderMachine := MachineType:L2Cache;
552 out_msg.Destination.add(map_Address_to_Directory(address));
553 out_msg.Tokens := getL2CacheEntry(address).Tokens;
554 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
555 out_msg.Dirty := getL2CacheEntry(address).Dirty;
556
557 if (getL2CacheEntry(address).Dirty) {
558 out_msg.MessageSize := MessageSizeType:Writeback_Data;
559 out_msg.Type := CoherenceResponseType:DATA_OWNER;
560 } else {
561 out_msg.MessageSize := MessageSizeType:Writeback_Control;
562 out_msg.Type := CoherenceResponseType:ACK_OWNER;
563 }
564 }
565 getL2CacheEntry(address).Tokens := 0;
566 }
567
568 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
569 peek(requestNetwork_in, RequestMsg) {
570 if (getL2CacheEntry(address).Tokens > N_tokens) {
571 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
572 out_msg.Address := address;
573 out_msg.Type := CoherenceResponseType:DATA_SHARED;
574 out_msg.Sender := machineID;
575 out_msg.SenderMachine := MachineType:L2Cache;
576 out_msg.Destination.add(in_msg.Requestor);
577 out_msg.Tokens := N_tokens;
578 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
579 out_msg.Dirty := false;
580 out_msg.MessageSize := MessageSizeType:Response_Data;
581 }
582 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
583 }
584 else {
585 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
586 out_msg.Address := address;
587 out_msg.Type := CoherenceResponseType:DATA_SHARED;
588 out_msg.Sender := machineID;
589 out_msg.SenderMachine := MachineType:L2Cache;
590 out_msg.Destination.add(in_msg.Requestor);
591 out_msg.Tokens := 1;
592 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
593 out_msg.Dirty := false;
594 out_msg.MessageSize := MessageSizeType:Response_Data;
595 }
596 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
597 }
598 }
599 }
600
601 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
602 peek(requestNetwork_in, RequestMsg) {
603 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
604 out_msg.Address := address;
605 out_msg.Type := CoherenceResponseType:DATA_OWNER;
606 out_msg.Sender := machineID;
607 out_msg.SenderMachine := MachineType:L2Cache;
608 out_msg.Destination.add(in_msg.Requestor);
609 assert(getL2CacheEntry(address).Tokens >= 1);
610 out_msg.Tokens := getL2CacheEntry(address).Tokens;
611 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
612 out_msg.Dirty := getL2CacheEntry(address).Dirty;
613 out_msg.MessageSize := MessageSizeType:Response_Data;
614 }
615 }
616 getL2CacheEntry(address).Tokens := 0;
617 }
618
619 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
620 if (getL2CacheEntry(address).Tokens > 0) {
621 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
622 out_msg.Address := address;
623 out_msg.Type := CoherenceResponseType:ACK;
624 out_msg.Sender := machineID;
625 out_msg.SenderMachine := MachineType:L2Cache;
626 out_msg.Destination.add(persistentTable.findSmallest(address));
627 assert(getL2CacheEntry(address).Tokens >= 1);
628 out_msg.Tokens := getL2CacheEntry(address).Tokens;
629 out_msg.MessageSize := MessageSizeType:Response_Control;
630 }
631 }
632 getL2CacheEntry(address).Tokens := 0;
633 }
634
635 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
636 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
637 out_msg.Address := address;
638 out_msg.Type := CoherenceResponseType:DATA_OWNER;
639 out_msg.Sender := machineID;
640 out_msg.SenderMachine := MachineType:L2Cache;
641 out_msg.Destination.add(persistentTable.findSmallest(address));
642 assert(getL2CacheEntry(address).Tokens >= 1);
643 out_msg.Tokens := getL2CacheEntry(address).Tokens;
644 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
645 out_msg.Dirty := getL2CacheEntry(address).Dirty;
646 out_msg.MessageSize := MessageSizeType:Response_Data;
647 }
648 getL2CacheEntry(address).Tokens := 0;
649 }
650
651 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
652 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
653 assert(getL2CacheEntry(address).Tokens > 0);
654 if (getL2CacheEntry(address).Tokens > 1) {
655 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
656 out_msg.Address := address;
657 out_msg.Type := CoherenceResponseType:ACK;
658 out_msg.Sender := machineID;
659 out_msg.SenderMachine := MachineType:L2Cache;
660 out_msg.Destination.add(persistentTable.findSmallest(address));
661 assert(getL2CacheEntry(address).Tokens >= 1);
662 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
663 out_msg.MessageSize := MessageSizeType:Response_Control;
664 }
665 }
666 getL2CacheEntry(address).Tokens := 1;
667 }
668
669 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
670 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
671 assert(getL2CacheEntry(address).Tokens > 0);
672 if (getL2CacheEntry(address).Tokens > 1) {
673 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
674 out_msg.Address := address;
675 out_msg.Type := CoherenceResponseType:DATA_OWNER;
676 out_msg.Sender := machineID;
677 out_msg.SenderMachine := MachineType:L2Cache;
678 out_msg.Destination.add(persistentTable.findSmallest(address));
679 assert(getL2CacheEntry(address).Tokens >= 1);
680 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
681 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
682 out_msg.Dirty := getL2CacheEntry(address).Dirty;
683 out_msg.MessageSize := MessageSizeType:Response_Data;
684 }
685 getL2CacheEntry(address).Tokens := 1;
686 }
687 }
688
689
690
691 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
692 // assert(persistentTable.isLocked(address));
693 peek(responseNetwork_in, ResponseMsg) {
694 // FIXME, should use a 3rd vnet in some cases
695 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
696 out_msg.Address := address;
697 out_msg.Type := in_msg.Type;
698 out_msg.Sender := machineID;
699 out_msg.SenderMachine := MachineType:L2Cache;
700 out_msg.Destination.add(persistentTable.findSmallest(address));
701 out_msg.Tokens := in_msg.Tokens;
702 out_msg.DataBlk := in_msg.DataBlk;
703 out_msg.Dirty := in_msg.Dirty;
704 out_msg.MessageSize := in_msg.MessageSize;
705 }
706 }
707 }
708
709 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
710 //assert(persistentTable.isLocked(address));
711 peek(responseNetwork_in, ResponseMsg) {
712 // FIXME, should use a 3rd vnet in some cases
713 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
714 out_msg.Address := address;
715 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
716 out_msg.Type := CoherenceResponseType:DATA_SHARED;
717 } else {
718 out_msg.Type := CoherenceResponseType:ACK;
719 }
720 out_msg.Sender := machineID;
721 out_msg.SenderMachine := MachineType:L2Cache;
722 out_msg.Destination.add(persistentTable.findSmallest(address));
723 out_msg.Tokens := in_msg.Tokens;
724 out_msg.DataBlk := in_msg.DataBlk;
725 out_msg.Dirty := in_msg.Dirty;
726 out_msg.MessageSize := in_msg.MessageSize;
727 }
728 }
729 }
730
731 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
732 // assert(persistentTable.isLocked(address));
733 peek(responseNetwork_in, ResponseMsg) {
734 // FIXME, should use a 3rd vnet in some cases
735 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
736 out_msg.Address := address;
737 out_msg.Type := CoherenceResponseType:DATA_OWNER;
738 out_msg.Sender := machineID;
739 out_msg.SenderMachine := MachineType:L2Cache;
740 out_msg.Destination.add(persistentTable.findSmallest(address));
741 out_msg.Tokens := in_msg.Tokens;
742 out_msg.DataBlk := in_msg.DataBlk;
743 out_msg.Dirty := in_msg.Dirty;
744 out_msg.MessageSize := in_msg.MessageSize;
745 }
746 }
747 }
748
749
750 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
751 peek(responseNetwork_in, ResponseMsg) {
752 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
753 }
754 }
755
756 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
757 peek(requestNetwork_in, RequestMsg) {
758 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
759 //profile_filter_action(1);
760 DEBUG_EXPR("filtered message");
761 DEBUG_EXPR(in_msg.RetryNum);
762 }
763 else {
764 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
765 out_msg.Address := in_msg.Address;
766 out_msg.Requestor := in_msg.Requestor;
767 out_msg.RequestorMachine := in_msg.RequestorMachine;
768
769 //
770 // Currently assuming only one chip so all L1s are local
771 //
772 //out_msg.Destination := getLocalL1IDs(machineID);
773 out_msg.Destination.broadcast(MachineType:L1Cache);
774 out_msg.Destination.remove(in_msg.Requestor);
775
776 out_msg.Type := in_msg.Type;
777 out_msg.isLocal := false;
778 out_msg.MessageSize := MessageSizeType:Request_Control;
779 out_msg.AccessMode := in_msg.AccessMode;
780 out_msg.Prefetch := in_msg.Prefetch;
781 }
782 //profile_filter_action(0);
783 }
784 }
785 }
786
787
788 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
789 peek(L1requestNetwork_in, RequestMsg) {
790 assert(getL2CacheEntry(address).Tokens > 0);
791 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
792 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
793 out_msg.Address := address;
794 out_msg.Type := CoherenceResponseType:DATA_SHARED;
795 out_msg.Sender := machineID;
796 out_msg.SenderMachine := MachineType:L2Cache;
797 out_msg.Destination.add(in_msg.Requestor);
798 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
799 out_msg.Dirty := false;
800 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
801 out_msg.Tokens := 1;
802 }
803 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
804 }
805 }
806
807 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
808 peek(L1requestNetwork_in, RequestMsg) {
809 assert(getL2CacheEntry(address).Tokens > 0);
810 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
811 out_msg.Address := address;
812 out_msg.Type := CoherenceResponseType:DATA_OWNER;
813 out_msg.Sender := machineID;
814 out_msg.SenderMachine := MachineType:L2Cache;
815 out_msg.Destination.add(in_msg.Requestor);
816 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
817 out_msg.Dirty := getL2CacheEntry(address).Dirty;
818 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
819 out_msg.Tokens := 1;
820 }
821 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
822 }
823 }
824
825 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
826 peek(L1requestNetwork_in, RequestMsg) {
827 // assert(getL2CacheEntry(address).Tokens == max_tokens());
828 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
829 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
830 out_msg.Address := address;
831 out_msg.Type := CoherenceResponseType:DATA_OWNER;
832 out_msg.Sender := machineID;
833 out_msg.SenderMachine := MachineType:L2Cache;
834 out_msg.Destination.add(in_msg.Requestor);
835 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
836 out_msg.Dirty := getL2CacheEntry(address).Dirty;
837 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
838 //out_msg.Tokens := max_tokens();
839 out_msg.Tokens := getL2CacheEntry(address).Tokens;
840 }
841 getL2CacheEntry(address).Tokens := 0;
842 }
843 }
844
845 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
846 persistentNetwork_in.dequeue();
847 }
848
849 action(m_popRequestQueue, "m", desc="Pop request queue.") {
850 requestNetwork_in.dequeue();
851 }
852
853 action(n_popResponseQueue, "n", desc="Pop response queue") {
854 responseNetwork_in.dequeue();
855 }
856
857 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
858 L1requestNetwork_in.dequeue();
859 }
860
861
862 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
863 peek(responseNetwork_in, ResponseMsg) {
864 assert(in_msg.Tokens != 0);
865 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
866
867 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
868 // may not trigger this action.
869 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
870 getL2CacheEntry(address).Dirty := true;
871 }
872 }
873 }
874
875 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
876 peek(L1requestNetwork_in, RequestMsg) {
877 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
878 if (in_msg.Type == CoherenceRequestType:GETX) {
879 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
880 } else if (in_msg.Type == CoherenceRequestType:GETS) {
881 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
882 }
883 }
884 }
885 }
886
887 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
888 clearExclusiveBitIfExists(address);
889 }
890
891 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
892 peek(L1requestNetwork_in, RequestMsg) {
893 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
894 (isCacheTagPresent(address))) {
895 L2cacheMemory.setMRU(address);
896 }
897 }
898 }
899
900 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
901 if (getL2CacheEntry(address).Tokens > 0) {
902 peek(requestNetwork_in, RequestMsg) {
903 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
904 out_msg.Address := address;
905 out_msg.Type := CoherenceResponseType:ACK;
906 out_msg.Sender := machineID;
907 out_msg.SenderMachine := MachineType:L2Cache;
908 out_msg.Destination.add(in_msg.Requestor);
909 assert(getL2CacheEntry(address).Tokens >= 1);
910 out_msg.Tokens := getL2CacheEntry(address).Tokens;
911 out_msg.MessageSize := MessageSizeType:Response_Control;
912 }
913 }
914 }
915 getL2CacheEntry(address).Tokens := 0;
916 }
917
918 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
919 if (getL2CacheEntry(address).Tokens > 0) {
920 peek(L1requestNetwork_in, RequestMsg) {
921 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
922 out_msg.Address := address;
923 out_msg.Type := CoherenceResponseType:ACK;
924 out_msg.Sender := machineID;
925 out_msg.SenderMachine := MachineType:L2Cache;
926 out_msg.Destination.add(in_msg.Requestor);
927 assert(getL2CacheEntry(address).Tokens >= 1);
928 out_msg.Tokens := getL2CacheEntry(address).Tokens;
929 out_msg.MessageSize := MessageSizeType:Response_Control;
930 }
931 }
932 }
933 getL2CacheEntry(address).Tokens := 0;
934 }
935
936 action(u_writeDataToCache, "u", desc="Write data to cache") {
937 peek(responseNetwork_in, ResponseMsg) {
938 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
939 if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
940 getL2CacheEntry(address).Dirty := in_msg.Dirty;
941 }
942 }
943 }
944
945 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
946 L2cacheMemory.allocate(address, new Entry);
947 }
948
949 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
950 L2cacheMemory.deallocate(address);
951 }
952
953 //action(uu_profileMiss, "\u", desc="Profile the demand miss") {
954 // peek(L1requestNetwork_in, RequestMsg) {
955 // AccessModeType not implemented
956 //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
957 // }
958 //}
959
960
961 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
962 peek(responseNetwork_in, ResponseMsg) {
963 assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
964 }
965 }
966
967
968 //*****************************************************
969 // TRANSITIONS
970 //*****************************************************
971
972 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
973
974 h_updateFilterFromL1HintOrWB;
975 n_popResponseQueue;
976 }
977
978 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
979 l_popPersistentQueue;
980 }
981
982
983 // Transitions from NP
984
985 transition(NP, {Transient_GETX, Transient_GETS}) {
986 // forward message to local sharers
987 r_clearExclusive;
988 j_forwardTransientRequestToLocalSharers;
989 m_popRequestQueue;
990 }
991
992
993 transition(NP, {L1_GETS, L1_GETX}) {
994 a_broadcastLocalRequest;
995 r_markNewSharer;
996 //uu_profileMiss;
997 o_popL1RequestQueue;
998 }
999
1000 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1001 bb_bounceResponse;
1002 n_popResponseQueue;
1003 }
1004
1005 transition(NP, Writeback_Shared_Data, S) {
1006 vv_allocateL2CacheBlock;
1007 u_writeDataToCache;
1008 q_updateTokensFromResponse;
1009 h_updateFilterFromL1HintOrWB;
1010 n_popResponseQueue;
1011 }
1012
1013 transition(NP, Writeback_Tokens, I) {
1014 vv_allocateL2CacheBlock;
1015 q_updateTokensFromResponse;
1016 h_updateFilterFromL1HintOrWB;
1017 n_popResponseQueue;
1018 }
1019
1020 transition(NP, Writeback_All_Tokens, M) {
1021 vv_allocateL2CacheBlock;
1022 u_writeDataToCache;
1023 q_updateTokensFromResponse;
1024 h_updateFilterFromL1HintOrWB;
1025 n_popResponseQueue;
1026 }
1027
1028 transition(NP, Writeback_Owned, O) {
1029 vv_allocateL2CacheBlock;
1030 u_writeDataToCache;
1031 q_updateTokensFromResponse;
1032 h_updateFilterFromL1HintOrWB;
1033 n_popResponseQueue;
1034 }
1035
1036
1037 transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
1038 l_popPersistentQueue;
1039 }
1040
1041 // Transitions from Idle
1042
1043 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1044 a_broadcastLocalRequest;
1045 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1046 r_markNewSharer;
1047 //uu_profileMiss;
1048 o_popL1RequestQueue;
1049 }
1050
1051 transition(I, L1_GETX) {
1052 a_broadcastLocalRequest;
1053 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1054 r_markNewSharer;
1055 //uu_profileMiss;
1056 o_popL1RequestQueue;
1057 }
1058
1059 transition(I, L2_Replacement) {
1060 c_cleanReplacement; // Only needed in some cases
1061 rr_deallocateL2CacheBlock;
1062 }
1063
1064 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1065 r_clearExclusive;
1066 t_sendAckWithCollectedTokens;
1067 j_forwardTransientRequestToLocalSharers;
1068 m_popRequestQueue;
1069 }
1070
1071 transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
1072 e_sendAckWithCollectedTokens;
1073 l_popPersistentQueue;
1074 }
1075
1076
1077 transition(I, Ack) {
1078 q_updateTokensFromResponse;
1079 n_popResponseQueue;
1080 }
1081
1082 transition(I, Data_Shared, S) {
1083 u_writeDataToCache;
1084 q_updateTokensFromResponse;
1085 n_popResponseQueue;
1086 }
1087
1088 transition(I, Writeback_Shared_Data, S) {
1089 u_writeDataToCache;
1090 q_updateTokensFromResponse;
1091 h_updateFilterFromL1HintOrWB;
1092 n_popResponseQueue;
1093 }
1094
1095 transition(I, Writeback_Tokens) {
1096 q_updateTokensFromResponse;
1097 h_updateFilterFromL1HintOrWB;
1098 n_popResponseQueue;
1099 }
1100
1101 transition(I, Data_Owner, O) {
1102 u_writeDataToCache;
1103 q_updateTokensFromResponse;
1104 n_popResponseQueue;
1105 }
1106
1107 transition(I, Writeback_Owned, O) {
1108 u_writeDataToCache;
1109 q_updateTokensFromResponse;
1110 h_updateFilterFromL1HintOrWB;
1111 n_popResponseQueue;
1112 }
1113
1114 transition(I, Data_All_Tokens, M) {
1115 u_writeDataToCache;
1116 q_updateTokensFromResponse;
1117 n_popResponseQueue;
1118 }
1119
1120
1121 transition(I, Writeback_All_Tokens, M) {
1122 u_writeDataToCache;
1123 q_updateTokensFromResponse;
1124 h_updateFilterFromL1HintOrWB;
1125 n_popResponseQueue;
1126 }
1127
1128 // Transitions from Shared
1129
1130 transition(S, L2_Replacement, I) {
1131 c_cleanReplacement;
1132 rr_deallocateL2CacheBlock;
1133 }
1134
1135 transition(S, Transient_GETX, I) {
1136 r_clearExclusive;
1137 t_sendAckWithCollectedTokens;
1138 j_forwardTransientRequestToLocalSharers;
1139 m_popRequestQueue;
1140 }
1141
1142 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1143 j_forwardTransientRequestToLocalSharers;
1144 r_clearExclusive;
1145 m_popRequestQueue;
1146 }
1147
1148 transition(S, Persistent_GETX, I_L) {
1149 e_sendAckWithCollectedTokens;
1150 l_popPersistentQueue;
1151 }
1152
1153
1154 transition(S, Persistent_GETS, S_L) {
1155 f_sendAckWithAllButOneTokens;
1156 l_popPersistentQueue;
1157 }
1158
1159
1160 transition(S, Ack) {
1161 q_updateTokensFromResponse;
1162 n_popResponseQueue;
1163 }
1164
1165 transition(S, Data_Shared) {
1166 w_assertIncomingDataAndCacheDataMatch;
1167 q_updateTokensFromResponse;
1168 n_popResponseQueue;
1169 }
1170
1171 transition(S, Writeback_Tokens) {
1172 q_updateTokensFromResponse;
1173 h_updateFilterFromL1HintOrWB;
1174 n_popResponseQueue;
1175 }
1176
1177 transition(S, Writeback_Shared_Data) {
1178 w_assertIncomingDataAndCacheDataMatch;
1179 q_updateTokensFromResponse;
1180 h_updateFilterFromL1HintOrWB;
1181 n_popResponseQueue;
1182 }
1183
1184
1185 transition(S, Data_Owner, O) {
1186 w_assertIncomingDataAndCacheDataMatch;
1187 q_updateTokensFromResponse;
1188 n_popResponseQueue;
1189 }
1190
1191 transition(S, Writeback_Owned, O) {
1192 w_assertIncomingDataAndCacheDataMatch;
1193 q_updateTokensFromResponse;
1194 h_updateFilterFromL1HintOrWB;
1195 n_popResponseQueue;
1196 }
1197
1198 transition(S, Data_All_Tokens, M) {
1199 w_assertIncomingDataAndCacheDataMatch;
1200 q_updateTokensFromResponse;
1201 n_popResponseQueue;
1202 }
1203
1204 transition(S, Writeback_All_Tokens, M) {
1205 w_assertIncomingDataAndCacheDataMatch;
1206 q_updateTokensFromResponse;
1207 h_updateFilterFromL1HintOrWB;
1208 n_popResponseQueue;
1209 }
1210
1211 transition(S, L1_GETX, I) {
1212 a_broadcastLocalRequest;
1213 tt_sendLocalAckWithCollectedTokens;
1214 r_markNewSharer;
1215 r_setMRU;
1216 //uu_profileMiss;
1217 o_popL1RequestQueue;
1218 }
1219
1220
1221 transition(S, L1_GETS) {
1222 k_dataFromL2CacheToL1Requestor;
1223 r_markNewSharer;
1224 r_setMRU;
1225 o_popL1RequestQueue;
1226 }
1227
1228 transition(S, L1_GETS_Last_Token, I) {
1229
1230 k_dataFromL2CacheToL1Requestor;
1231 r_markNewSharer;
1232 r_setMRU;
1233 o_popL1RequestQueue;
1234 }
1235
1236 // Transitions from Owned
1237
1238 transition(O, L2_Replacement, I) {
1239 cc_dirtyReplacement;
1240 rr_deallocateL2CacheBlock;
1241 }
1242
1243 transition(O, Transient_GETX, I) {
1244 r_clearExclusive;
1245 dd_sendDataWithAllTokens;
1246 j_forwardTransientRequestToLocalSharers;
1247 m_popRequestQueue;
1248 }
1249
1250 transition(O, Persistent_GETX, I_L) {
1251 ee_sendDataWithAllTokens;
1252 l_popPersistentQueue;
1253 }
1254
1255 transition(O, Persistent_GETS, S_L) {
1256 ff_sendDataWithAllButOneTokens;
1257 l_popPersistentQueue;
1258 }
1259
1260 transition(O, Transient_GETS) {
1261 // send multiple tokens
1262 r_clearExclusive;
1263 d_sendDataWithTokens;
1264 m_popRequestQueue;
1265 }
1266
1267 transition(O, Transient_GETS_Last_Token) {
1268 // WAIT FOR IT TO GO PERSISTENT
1269 r_clearExclusive;
1270 m_popRequestQueue;
1271 }
1272
1273 transition(O, Ack) {
1274 q_updateTokensFromResponse;
1275 n_popResponseQueue;
1276 }
1277
1278 transition(O, Ack_All_Tokens, M) {
1279 q_updateTokensFromResponse;
1280 n_popResponseQueue;
1281 }
1282
1283 transition(O, Data_Shared) {
1284 w_assertIncomingDataAndCacheDataMatch;
1285 q_updateTokensFromResponse;
1286 n_popResponseQueue;
1287 }
1288
1289
1290 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1291 w_assertIncomingDataAndCacheDataMatch;
1292 q_updateTokensFromResponse;
1293 h_updateFilterFromL1HintOrWB;
1294 n_popResponseQueue;
1295 }
1296
1297 transition(O, Data_All_Tokens, M) {
1298 w_assertIncomingDataAndCacheDataMatch;
1299 q_updateTokensFromResponse;
1300 n_popResponseQueue;
1301 }
1302
1303 transition(O, Writeback_All_Tokens, M) {
1304 w_assertIncomingDataAndCacheDataMatch;
1305 q_updateTokensFromResponse;
1306 h_updateFilterFromL1HintOrWB;
1307 n_popResponseQueue;
1308 }
1309
1310 transition(O, L1_GETS) {
1311 k_dataFromL2CacheToL1Requestor;
1312 r_markNewSharer;
1313 r_setMRU;
1314 o_popL1RequestQueue;
1315 }
1316
1317 transition(O, L1_GETS_Last_Token, I) {
1318 k_dataOwnerFromL2CacheToL1Requestor;
1319 r_markNewSharer;
1320 r_setMRU;
1321 o_popL1RequestQueue;
1322 }
1323
1324 transition(O, L1_GETX, I) {
1325 a_broadcastLocalRequest;
1326 k_dataAndAllTokensFromL2CacheToL1Requestor;
1327 r_markNewSharer;
1328 r_setMRU;
1329 //uu_profileMiss;
1330 o_popL1RequestQueue;
1331 }
1332
1333 // Transitions from M
1334
1335 transition(M, L2_Replacement, I) {
1336 cc_dirtyReplacement;
1337 rr_deallocateL2CacheBlock;
1338 }
1339
1340 // MRM_DEBUG: Give up all tokens even for GETS? ???
1341 transition(M, {Transient_GETX, Transient_GETS}, I) {
1342 r_clearExclusive;
1343 dd_sendDataWithAllTokens;
1344 m_popRequestQueue;
1345 }
1346
1347 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1348 ee_sendDataWithAllTokens;
1349 l_popPersistentQueue;
1350 }
1351
1352
1353 transition(M, L1_GETS, O) {
1354 k_dataFromL2CacheToL1Requestor;
1355 r_markNewSharer;
1356 r_setMRU;
1357 o_popL1RequestQueue;
1358 }
1359
1360 transition(M, L1_GETX, I) {
1361 k_dataAndAllTokensFromL2CacheToL1Requestor;
1362 r_markNewSharer;
1363 r_setMRU;
1364 o_popL1RequestQueue;
1365 }
1366
1367
1368 //Transitions from locked states
1369
1370 transition({I_L, S_L}, Ack) {
1371 gg_bounceResponseToStarver;
1372 n_popResponseQueue;
1373 }
1374
1375 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1376 gg_bounceResponseToStarver;
1377 n_popResponseQueue;
1378 }
1379
1380 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1381 gg_bounceWBSharedToStarver;
1382 h_updateFilterFromL1HintOrWB;
1383 n_popResponseQueue;
1384 }
1385
1386 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1387 gg_bounceWBOwnedToStarver;
1388 h_updateFilterFromL1HintOrWB;
1389 n_popResponseQueue;
1390 }
1391
1392 transition(S_L, L2_Replacement, I) {
1393 c_cleanReplacement;
1394 rr_deallocateL2CacheBlock;
1395 }
1396
1397 transition(I_L, L2_Replacement, I) {
1398 rr_deallocateL2CacheBlock;
1399 }
1400
1401 transition(I_L, Own_Lock_or_Unlock, I) {
1402 l_popPersistentQueue;
1403 }
1404
1405 transition(S_L, Own_Lock_or_Unlock, S) {
1406 l_popPersistentQueue;
1407 }
1408
1409 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1410 r_clearExclusive;
1411 m_popRequestQueue;
1412 }
1413
1414 transition(I_L, {L1_GETX, L1_GETS}) {
1415 a_broadcastLocalRequest;
1416 r_markNewSharer;
1417 //uu_profileMiss;
1418 o_popL1RequestQueue;
1419 }
1420
1421 transition(S_L, L1_GETX, I_L) {
1422 a_broadcastLocalRequest;
1423 tt_sendLocalAckWithCollectedTokens;
1424 r_markNewSharer;
1425 r_setMRU;
1426 //uu_profileMiss;
1427 o_popL1RequestQueue;
1428 }
1429
1430 transition(S_L, L1_GETS) {
1431 k_dataFromL2CacheToL1Requestor;
1432 r_markNewSharer;
1433 r_setMRU;
1434 o_popL1RequestQueue;
1435 }
1436
1437 transition(S_L, L1_GETS_Last_Token, I_L) {
1438 k_dataFromL2CacheToL1Requestor;
1439 r_markNewSharer;
1440 r_setMRU;
1441 o_popL1RequestQueue;
1442 }
1443
1444 transition(S_L, Persistent_GETX, I_L) {
1445 e_sendAckWithCollectedTokens;
1446 l_popPersistentQueue;
1447 }
1448
1449 transition(S_L, Persistent_GETS) {
1450 l_popPersistentQueue;
1451 }
1452
1453 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1454 l_popPersistentQueue;
1455 }
1456 }