ruby: Reduced ruby latencies
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "Token protocol")
36 : CacheMemory * L2cacheMemory,
37 int N_tokens,
38 int l2_request_latency = 5,
39 int l2_response_latency = 5,
40 bool filtering_enabled = true
41 {
42
43 // L2 BANK QUEUES
44 // From local bank of L2 cache TO the network
45
46 // this L2 bank -> a local L1 || mod-directory
47 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false";
48 // this L2 bank -> mod-directory
49 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false";
50 // this L2 bank -> a local L1
51 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false";
52
53
54 // FROM the network to this local bank of L2 cache
55
56 // a local L1 || mod-directory -> this L2 bank
57 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false";
58 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
59 // mod-directory -> this L2 bank
60 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false";
61 // a local L1 -> this L2 bank
62 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false";
63
64 // STATES
65 enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
66 // Base states
67 NP, desc="Not Present";
68 I, desc="Idle";
69 S, desc="Shared, not present in any local L1s";
70 O, desc="Owned, not present in any L1s";
71 M, desc="Modified, not present in any L1s";
72
73 // Locked states
74 I_L, "I^L", desc="Invalid, Locked";
75 S_L, "S^L", desc="Shared, Locked";
76 }
77
78 // EVENTS
79 enumeration(Event, desc="Cache events") {
80
81 // Requests
82 L1_GETS, desc="local L1 GETS request";
83 L1_GETS_Last_Token, desc="local L1 GETS request";
84 L1_GETX, desc="local L1 GETX request";
85 L1_INV, desc="L1 no longer has tokens";
86 Transient_GETX, desc="A GetX from another processor";
87 Transient_GETS, desc="A GetS from another processor";
88 Transient_GETS_Last_Token, desc="A GetS from another processor";
89
90 // events initiated by this L2
91 L2_Replacement, desc="L2 Replacement", format="!r";
92
93 // events of external L2 responses
94
95 // Responses
96 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
97 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
98 Writeback_All_Tokens, desc="Received a writeback from L1";
99 Writeback_Owned, desc="Received a writeback from L1";
100
101
102 Data_Shared, desc="Received a data message, we are now a sharer";
103 Data_Owner, desc="Received a data message, we are now the owner";
104 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
105 Ack, desc="Received an ack message";
106 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
107
108 // Lock/Unlock
109 Persistent_GETX, desc="Another processor has priority to read/write";
110 Persistent_GETS, desc="Another processor has priority to read";
111 Own_Lock_or_Unlock, desc="This processor now has priority";
112 }
113
114 // TYPES
115
116 // CacheEntry
117 structure(Entry, desc="...", interface="AbstractCacheEntry") {
118 State CacheState, desc="cache state";
119 bool Dirty, desc="Is the data dirty (different than memory)?";
120 int Tokens, desc="The number of tokens we're holding for the line";
121 DataBlock DataBlk, desc="data for the block";
122 }
123
124 structure(DirEntry, desc="...") {
125 Set Sharers, desc="Set of the internal processors that want the block in shared state";
126 bool exclusive, default="false", desc="if local exclusive is likely";
127 }
128
129 external_type(PerfectCacheMemory) {
130 void allocate(Address);
131 void deallocate(Address);
132 DirEntry lookup(Address);
133 bool isTagPresent(Address);
134 }
135
136 external_type(PersistentTable) {
137 void persistentRequestLock(Address, MachineID, AccessType);
138 void persistentRequestUnlock(Address, MachineID);
139 MachineID findSmallest(Address);
140 AccessType typeOfSmallest(Address);
141 void markEntries(Address);
142 bool isLocked(Address);
143 int countStarvingForAddress(Address);
144 int countReadStarvingForAddress(Address);
145 }
146
147 PersistentTable persistentTable;
148 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
149
150 Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
151 if (L2cacheMemory.isTagPresent(addr)) {
152 return static_cast(Entry, L2cacheMemory[addr]);
153 }
154 assert(false);
155 return static_cast(Entry, L2cacheMemory[addr]);
156 }
157
158 int getTokens(Address addr) {
159 if (L2cacheMemory.isTagPresent(addr)) {
160 return getL2CacheEntry(addr).Tokens;
161 } else {
162 return 0;
163 }
164 }
165
166 void changePermission(Address addr, AccessPermission permission) {
167 if (L2cacheMemory.isTagPresent(addr)) {
168 return L2cacheMemory.changePermission(addr, permission);
169 }
170 }
171
172 bool isCacheTagPresent(Address addr) {
173 return (L2cacheMemory.isTagPresent(addr) );
174 }
175
176 State getState(Address addr) {
177 if (isCacheTagPresent(addr)) {
178 return getL2CacheEntry(addr).CacheState;
179 } else if (persistentTable.isLocked(addr) == true) {
180 return State:I_L;
181 } else {
182 return State:NP;
183 }
184 }
185
186 std::string getStateStr(Address addr) {
187 return L2Cache_State_to_string(getState(addr));
188 }
189
190 void setState(Address addr, State state) {
191
192
193 if (isCacheTagPresent(addr)) {
194 // Make sure the token count is in range
195 assert(getL2CacheEntry(addr).Tokens >= 0);
196 assert(getL2CacheEntry(addr).Tokens <= max_tokens());
197
198 // Make sure we have no tokens in L
199 if ((state == State:I_L) ) {
200 if (isCacheTagPresent(addr)) {
201 assert(getL2CacheEntry(addr).Tokens == 0);
202 }
203 }
204
205 // in M and E you have all the tokens
206 if (state == State:M ) {
207 assert(getL2CacheEntry(addr).Tokens == max_tokens());
208 }
209
210 // in NP you have no tokens
211 if (state == State:NP) {
212 assert(getL2CacheEntry(addr).Tokens == 0);
213 }
214
215 // You have at least one token in S-like states
216 if (state == State:S ) {
217 assert(getL2CacheEntry(addr).Tokens > 0);
218 }
219
220 // You have at least half the token in O-like states
221 if (state == State:O ) {
222 assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
223 // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
224 }
225
226 getL2CacheEntry(addr).CacheState := state;
227
228 // Set permission
229 if (state == State:I) {
230 changePermission(addr, AccessPermission:Invalid);
231 } else if (state == State:S || state == State:O ) {
232 changePermission(addr, AccessPermission:Read_Only);
233 } else if (state == State:M ) {
234 changePermission(addr, AccessPermission:Read_Write);
235 } else {
236 changePermission(addr, AccessPermission:Invalid);
237 }
238 }
239 }
240
241 void removeSharer(Address addr, NodeID id) {
242
243 if (localDirectory.isTagPresent(addr)) {
244 localDirectory[addr].Sharers.remove(id);
245 if (localDirectory[addr].Sharers.count() == 0) {
246 localDirectory.deallocate(addr);
247 }
248 }
249 }
250
251 bool sharersExist(Address addr) {
252 if (localDirectory.isTagPresent(addr)) {
253 if (localDirectory[addr].Sharers.count() > 0) {
254 return true;
255 }
256 else {
257 return false;
258 }
259 }
260 else {
261 return false;
262 }
263 }
264
265 bool exclusiveExists(Address addr) {
266 if (localDirectory.isTagPresent(addr)) {
267 if (localDirectory[addr].exclusive == true) {
268 return true;
269 }
270 else {
271 return false;
272 }
273 }
274 else {
275 return false;
276 }
277 }
278
279 // assumes that caller will check to make sure tag is present
280 Set getSharers(Address addr) {
281 return localDirectory[addr].Sharers;
282 }
283
284 void setNewWriter(Address addr, NodeID id) {
285 if (localDirectory.isTagPresent(addr) == false) {
286 localDirectory.allocate(addr);
287 }
288 localDirectory[addr].Sharers.clear();
289 localDirectory[addr].Sharers.add(id);
290 localDirectory[addr].exclusive := true;
291 }
292
293 void addNewSharer(Address addr, NodeID id) {
294 if (localDirectory.isTagPresent(addr) == false) {
295 localDirectory.allocate(addr);
296 }
297 localDirectory[addr].Sharers.add(id);
298 // localDirectory[addr].exclusive := false;
299 }
300
301 void clearExclusiveBitIfExists(Address addr) {
302 if (localDirectory.isTagPresent(addr) == true) {
303 localDirectory[addr].exclusive := false;
304 }
305 }
306
307 GenericRequestType convertToGenericType(CoherenceRequestType type) {
308 if(type == CoherenceRequestType:GETS) {
309 return GenericRequestType:GETS;
310 } else if(type == CoherenceRequestType:GETX) {
311 return GenericRequestType:GETX;
312 } else {
313 DEBUG_EXPR(type);
314 error("invalid CoherenceRequestType");
315 }
316 }
317
318 // ** OUT_PORTS **
319 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
320 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
321 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
322
323
324
325 // ** IN_PORTS **
326
327 // Persistent Network
328 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
329 if (persistentNetwork_in.isReady()) {
330 peek(persistentNetwork_in, PersistentMsg) {
331 assert(in_msg.Destination.isElement(machineID));
332
333 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
334 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
335 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
336 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
337 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
338 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
339 } else {
340 error("Unexpected message");
341 }
342
343 // React to the message based on the current state of the table
344 if (persistentTable.isLocked(in_msg.Address)) {
345
346 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
347 trigger(Event:Persistent_GETS, in_msg.Address);
348 } else {
349 trigger(Event:Persistent_GETX, in_msg.Address);
350 }
351 }
352 else {
353 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
354 }
355 }
356 }
357 }
358
359
360 // Request Network
361 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
362 if (requestNetwork_in.isReady()) {
363 peek(requestNetwork_in, RequestMsg) {
364 assert(in_msg.Destination.isElement(machineID));
365
366 if (in_msg.Type == CoherenceRequestType:GETX) {
367 trigger(Event:Transient_GETX, in_msg.Address);
368 } else if (in_msg.Type == CoherenceRequestType:GETS) {
369 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
370 trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
371 }
372 else {
373 trigger(Event:Transient_GETS, in_msg.Address);
374 }
375 } else {
376 error("Unexpected message");
377 }
378 }
379 }
380 }
381
382 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
383 if (L1requestNetwork_in.isReady()) {
384 peek(L1requestNetwork_in, RequestMsg) {
385 assert(in_msg.Destination.isElement(machineID));
386 if (in_msg.Type == CoherenceRequestType:GETX) {
387 trigger(Event:L1_GETX, in_msg.Address);
388 } else if (in_msg.Type == CoherenceRequestType:GETS) {
389 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
390 trigger(Event:L1_GETS_Last_Token, in_msg.Address);
391 }
392 else {
393 trigger(Event:L1_GETS, in_msg.Address);
394 }
395 } else {
396 error("Unexpected message");
397 }
398 }
399 }
400 }
401
402
403 // Response Network
404 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
405 if (responseNetwork_in.isReady()) {
406 peek(responseNetwork_in, ResponseMsg) {
407 assert(in_msg.Destination.isElement(machineID));
408 if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
409 if (in_msg.Type == CoherenceResponseType:ACK) {
410 trigger(Event:Ack, in_msg.Address);
411 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
412 trigger(Event:Data_Owner, in_msg.Address);
413 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
414 trigger(Event:Data_Shared, in_msg.Address);
415 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
416
417 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
418
419 // either room is available or the block is already present
420
421 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
422 assert(in_msg.Dirty == false);
423 trigger(Event:Writeback_Tokens, in_msg.Address);
424 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
425 assert(in_msg.Dirty == false);
426 trigger(Event:Writeback_Shared_Data, in_msg.Address);
427 }
428 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
429 //assert(in_msg.Dirty == false);
430 trigger(Event:Writeback_Owned, in_msg.Address);
431 }
432 }
433 else {
434 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
435 }
436 } else if (in_msg.Type == CoherenceResponseType:INV) {
437 trigger(Event:L1_INV, in_msg.Address);
438 } else {
439 error("Unexpected message");
440 }
441 } else {
442 if (in_msg.Type == CoherenceResponseType:ACK) {
443 trigger(Event:Ack_All_Tokens, in_msg.Address);
444 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
445 trigger(Event:Data_All_Tokens, in_msg.Address);
446 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
447 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
448
449 // either room is available or the block is already present
450
451 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
452 assert(in_msg.Dirty == false);
453 assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
454 trigger(Event:Writeback_All_Tokens, in_msg.Address);
455 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
456 assert(in_msg.Dirty == false);
457 trigger(Event:Writeback_All_Tokens, in_msg.Address);
458 }
459 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
460 trigger(Event:Writeback_All_Tokens, in_msg.Address);
461 }
462 }
463 else {
464 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
465 }
466 } else if (in_msg.Type == CoherenceResponseType:INV) {
467 trigger(Event:L1_INV, in_msg.Address);
468 } else {
469 DEBUG_EXPR(in_msg.Type);
470 error("Unexpected message");
471 }
472 }
473 }
474 }
475 }
476
477
478 // ACTIONS
479
480 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
481
482 peek(L1requestNetwork_in, RequestMsg) {
483
484 // if this is a retry or no local sharers, broadcast normally
485
486 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
487 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
488 out_msg.Address := in_msg.Address;
489 out_msg.Type := in_msg.Type;
490 out_msg.Requestor := in_msg.Requestor;
491 out_msg.RetryNum := in_msg.RetryNum;
492
493 //
494 // If a statically shared L2 cache, then no other L2 caches can
495 // store the block
496 //
497 //out_msg.Destination.broadcast(MachineType:L2Cache);
498 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
499 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
500
501 out_msg.Destination.add(map_Address_to_Directory(address));
502 out_msg.MessageSize := MessageSizeType:Request_Control;
503 out_msg.AccessMode := in_msg.AccessMode;
504 out_msg.Prefetch := in_msg.Prefetch;
505 } //enqueue
506 // } // if
507
508 //profile_filter_action(0);
509 } // peek
510 } //action
511
512
513 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
514 peek(responseNetwork_in, ResponseMsg) {
515 // FIXME, should use a 3rd vnet
516 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
517 out_msg.Address := address;
518 out_msg.Type := in_msg.Type;
519 out_msg.Sender := machineID;
520 out_msg.Destination.add(map_Address_to_Directory(address));
521 out_msg.Tokens := in_msg.Tokens;
522 out_msg.MessageSize := in_msg.MessageSize;
523 out_msg.DataBlk := in_msg.DataBlk;
524 out_msg.Dirty := in_msg.Dirty;
525 }
526 }
527 }
528
529 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
530 if (getL2CacheEntry(address).Tokens > 0) {
531 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
532 out_msg.Address := address;
533 out_msg.Type := CoherenceResponseType:ACK;
534 out_msg.Sender := machineID;
535 out_msg.Destination.add(map_Address_to_Directory(address));
536 out_msg.Tokens := getL2CacheEntry(address).Tokens;
537 out_msg.MessageSize := MessageSizeType:Writeback_Control;
538 }
539 getL2CacheEntry(address).Tokens := 0;
540 }
541 }
542
543 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
544 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
545 out_msg.Address := address;
546 out_msg.Sender := machineID;
547 out_msg.Destination.add(map_Address_to_Directory(address));
548 out_msg.Tokens := getL2CacheEntry(address).Tokens;
549 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
550 out_msg.Dirty := getL2CacheEntry(address).Dirty;
551
552 if (getL2CacheEntry(address).Dirty) {
553 out_msg.MessageSize := MessageSizeType:Writeback_Data;
554 out_msg.Type := CoherenceResponseType:DATA_OWNER;
555 } else {
556 out_msg.MessageSize := MessageSizeType:Writeback_Control;
557 out_msg.Type := CoherenceResponseType:ACK_OWNER;
558 }
559 }
560 getL2CacheEntry(address).Tokens := 0;
561 }
562
563 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
564 peek(requestNetwork_in, RequestMsg) {
565 if (getL2CacheEntry(address).Tokens > N_tokens) {
566 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
567 out_msg.Address := address;
568 out_msg.Type := CoherenceResponseType:DATA_SHARED;
569 out_msg.Sender := machineID;
570 out_msg.Destination.add(in_msg.Requestor);
571 out_msg.Tokens := N_tokens;
572 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
573 out_msg.Dirty := false;
574 out_msg.MessageSize := MessageSizeType:Response_Data;
575 }
576 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
577 }
578 else {
579 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
580 out_msg.Address := address;
581 out_msg.Type := CoherenceResponseType:DATA_SHARED;
582 out_msg.Sender := machineID;
583 out_msg.Destination.add(in_msg.Requestor);
584 out_msg.Tokens := 1;
585 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
586 out_msg.Dirty := false;
587 out_msg.MessageSize := MessageSizeType:Response_Data;
588 }
589 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
590 }
591 }
592 }
593
594 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
595 peek(requestNetwork_in, RequestMsg) {
596 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
597 out_msg.Address := address;
598 out_msg.Type := CoherenceResponseType:DATA_OWNER;
599 out_msg.Sender := machineID;
600 out_msg.Destination.add(in_msg.Requestor);
601 assert(getL2CacheEntry(address).Tokens >= 1);
602 out_msg.Tokens := getL2CacheEntry(address).Tokens;
603 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
604 out_msg.Dirty := getL2CacheEntry(address).Dirty;
605 out_msg.MessageSize := MessageSizeType:Response_Data;
606 }
607 }
608 getL2CacheEntry(address).Tokens := 0;
609 }
610
611 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
612 if (getL2CacheEntry(address).Tokens > 0) {
613 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
614 out_msg.Address := address;
615 out_msg.Type := CoherenceResponseType:ACK;
616 out_msg.Sender := machineID;
617 out_msg.Destination.add(persistentTable.findSmallest(address));
618 assert(getL2CacheEntry(address).Tokens >= 1);
619 out_msg.Tokens := getL2CacheEntry(address).Tokens;
620 out_msg.MessageSize := MessageSizeType:Response_Control;
621 }
622 }
623 getL2CacheEntry(address).Tokens := 0;
624 }
625
626 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
627 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
628 out_msg.Address := address;
629 out_msg.Type := CoherenceResponseType:DATA_OWNER;
630 out_msg.Sender := machineID;
631 out_msg.Destination.add(persistentTable.findSmallest(address));
632 assert(getL2CacheEntry(address).Tokens >= 1);
633 out_msg.Tokens := getL2CacheEntry(address).Tokens;
634 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
635 out_msg.Dirty := getL2CacheEntry(address).Dirty;
636 out_msg.MessageSize := MessageSizeType:Response_Data;
637 }
638 getL2CacheEntry(address).Tokens := 0;
639 }
640
641 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
642 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
643 assert(getL2CacheEntry(address).Tokens > 0);
644 if (getL2CacheEntry(address).Tokens > 1) {
645 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
646 out_msg.Address := address;
647 out_msg.Type := CoherenceResponseType:ACK;
648 out_msg.Sender := machineID;
649 out_msg.Destination.add(persistentTable.findSmallest(address));
650 assert(getL2CacheEntry(address).Tokens >= 1);
651 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
652 out_msg.MessageSize := MessageSizeType:Response_Control;
653 }
654 }
655 getL2CacheEntry(address).Tokens := 1;
656 }
657
658 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
659 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
660 assert(getL2CacheEntry(address).Tokens > 0);
661 if (getL2CacheEntry(address).Tokens > 1) {
662 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
663 out_msg.Address := address;
664 out_msg.Type := CoherenceResponseType:DATA_OWNER;
665 out_msg.Sender := machineID;
666 out_msg.Destination.add(persistentTable.findSmallest(address));
667 assert(getL2CacheEntry(address).Tokens >= 1);
668 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
669 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
670 out_msg.Dirty := getL2CacheEntry(address).Dirty;
671 out_msg.MessageSize := MessageSizeType:Response_Data;
672 }
673 getL2CacheEntry(address).Tokens := 1;
674 }
675 }
676
677
678
679 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
680 // assert(persistentTable.isLocked(address));
681 peek(responseNetwork_in, ResponseMsg) {
682 // FIXME, should use a 3rd vnet in some cases
683 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
684 out_msg.Address := address;
685 out_msg.Type := in_msg.Type;
686 out_msg.Sender := machineID;
687 out_msg.Destination.add(persistentTable.findSmallest(address));
688 out_msg.Tokens := in_msg.Tokens;
689 out_msg.DataBlk := in_msg.DataBlk;
690 out_msg.Dirty := in_msg.Dirty;
691 out_msg.MessageSize := in_msg.MessageSize;
692 }
693 }
694 }
695
696 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
697 //assert(persistentTable.isLocked(address));
698 peek(responseNetwork_in, ResponseMsg) {
699 // FIXME, should use a 3rd vnet in some cases
700 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
701 out_msg.Address := address;
702 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
703 out_msg.Type := CoherenceResponseType:DATA_SHARED;
704 } else {
705 out_msg.Type := CoherenceResponseType:ACK;
706 }
707 out_msg.Sender := machineID;
708 out_msg.Destination.add(persistentTable.findSmallest(address));
709 out_msg.Tokens := in_msg.Tokens;
710 out_msg.DataBlk := in_msg.DataBlk;
711 out_msg.Dirty := in_msg.Dirty;
712 out_msg.MessageSize := in_msg.MessageSize;
713 }
714 }
715 }
716
717 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
718 // assert(persistentTable.isLocked(address));
719 peek(responseNetwork_in, ResponseMsg) {
720 // FIXME, should use a 3rd vnet in some cases
721 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
722 out_msg.Address := address;
723 out_msg.Type := CoherenceResponseType:DATA_OWNER;
724 out_msg.Sender := machineID;
725 out_msg.Destination.add(persistentTable.findSmallest(address));
726 out_msg.Tokens := in_msg.Tokens;
727 out_msg.DataBlk := in_msg.DataBlk;
728 out_msg.Dirty := in_msg.Dirty;
729 out_msg.MessageSize := in_msg.MessageSize;
730 }
731 }
732 }
733
734
735 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
736 peek(responseNetwork_in, ResponseMsg) {
737 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
738 }
739 }
740
741 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
742 peek(requestNetwork_in, RequestMsg) {
743 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
744 //profile_filter_action(1);
745 DEBUG_EXPR("filtered message");
746 DEBUG_EXPR(in_msg.RetryNum);
747 }
748 else {
749 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
750 out_msg.Address := in_msg.Address;
751 out_msg.Requestor := in_msg.Requestor;
752
753 //
754 // Currently assuming only one chip so all L1s are local
755 //
756 //out_msg.Destination := getLocalL1IDs(machineID);
757 out_msg.Destination.broadcast(MachineType:L1Cache);
758 out_msg.Destination.remove(in_msg.Requestor);
759
760 out_msg.Type := in_msg.Type;
761 out_msg.isLocal := false;
762 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
763 out_msg.AccessMode := in_msg.AccessMode;
764 out_msg.Prefetch := in_msg.Prefetch;
765 }
766 //profile_filter_action(0);
767 }
768 }
769 }
770
771
772 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
773 peek(L1requestNetwork_in, RequestMsg) {
774 assert(getL2CacheEntry(address).Tokens > 0);
775 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
776 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
777 out_msg.Address := address;
778 out_msg.Type := CoherenceResponseType:DATA_SHARED;
779 out_msg.Sender := machineID;
780 out_msg.Destination.add(in_msg.Requestor);
781 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
782 out_msg.Dirty := false;
783 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
784 out_msg.Tokens := 1;
785 }
786 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
787 }
788 }
789
790 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
791 peek(L1requestNetwork_in, RequestMsg) {
792 assert(getL2CacheEntry(address).Tokens > 0);
793 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
794 out_msg.Address := address;
795 out_msg.Type := CoherenceResponseType:DATA_OWNER;
796 out_msg.Sender := machineID;
797 out_msg.Destination.add(in_msg.Requestor);
798 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
799 out_msg.Dirty := getL2CacheEntry(address).Dirty;
800 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
801 out_msg.Tokens := 1;
802 }
803 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
804 }
805 }
806
807 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
808 peek(L1requestNetwork_in, RequestMsg) {
809 // assert(getL2CacheEntry(address).Tokens == max_tokens());
810 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
811 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
812 out_msg.Address := address;
813 out_msg.Type := CoherenceResponseType:DATA_OWNER;
814 out_msg.Sender := machineID;
815 out_msg.Destination.add(in_msg.Requestor);
816 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
817 out_msg.Dirty := getL2CacheEntry(address).Dirty;
818 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
819 //out_msg.Tokens := max_tokens();
820 out_msg.Tokens := getL2CacheEntry(address).Tokens;
821 }
822 getL2CacheEntry(address).Tokens := 0;
823 }
824 }
825
826 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
827 persistentNetwork_in.dequeue();
828 }
829
830 action(m_popRequestQueue, "m", desc="Pop request queue.") {
831 requestNetwork_in.dequeue();
832 }
833
834 action(n_popResponseQueue, "n", desc="Pop response queue") {
835 responseNetwork_in.dequeue();
836 }
837
838 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
839 L1requestNetwork_in.dequeue();
840 }
841
842
843 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
844 peek(responseNetwork_in, ResponseMsg) {
845 assert(in_msg.Tokens != 0);
846 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
847
848 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
849 // may not trigger this action.
850 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
851 getL2CacheEntry(address).Dirty := true;
852 }
853 }
854 }
855
856 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
857 peek(L1requestNetwork_in, RequestMsg) {
858 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
859 if (in_msg.Type == CoherenceRequestType:GETX) {
860 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
861 } else if (in_msg.Type == CoherenceRequestType:GETS) {
862 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
863 }
864 }
865 }
866 }
867
868 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
869 clearExclusiveBitIfExists(address);
870 }
871
872 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
873 peek(L1requestNetwork_in, RequestMsg) {
874 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
875 (isCacheTagPresent(address))) {
876 L2cacheMemory.setMRU(address);
877 }
878 }
879 }
880
881 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
882 if (getL2CacheEntry(address).Tokens > 0) {
883 peek(requestNetwork_in, RequestMsg) {
884 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
885 out_msg.Address := address;
886 out_msg.Type := CoherenceResponseType:ACK;
887 out_msg.Sender := machineID;
888 out_msg.Destination.add(in_msg.Requestor);
889 assert(getL2CacheEntry(address).Tokens >= 1);
890 out_msg.Tokens := getL2CacheEntry(address).Tokens;
891 out_msg.MessageSize := MessageSizeType:Response_Control;
892 }
893 }
894 }
895 getL2CacheEntry(address).Tokens := 0;
896 }
897
898 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
899 if (getL2CacheEntry(address).Tokens > 0) {
900 peek(L1requestNetwork_in, RequestMsg) {
901 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
902 out_msg.Address := address;
903 out_msg.Type := CoherenceResponseType:ACK;
904 out_msg.Sender := machineID;
905 out_msg.Destination.add(in_msg.Requestor);
906 assert(getL2CacheEntry(address).Tokens >= 1);
907 out_msg.Tokens := getL2CacheEntry(address).Tokens;
908 out_msg.MessageSize := MessageSizeType:Response_Control;
909 }
910 }
911 }
912 getL2CacheEntry(address).Tokens := 0;
913 }
914
915 action(u_writeDataToCache, "u", desc="Write data to cache") {
916 peek(responseNetwork_in, ResponseMsg) {
917 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
918 if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
919 getL2CacheEntry(address).Dirty := in_msg.Dirty;
920 }
921 }
922 }
923
924 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
925 L2cacheMemory.allocate(address, new Entry);
926 }
927
928 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
929 L2cacheMemory.deallocate(address);
930 }
931
932 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
933 peek(L1requestNetwork_in, RequestMsg) {
934 L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
935 in_msg.AccessMode,
936 in_msg.Prefetch);
937 }
938 }
939
940
941 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
942 peek(responseNetwork_in, ResponseMsg) {
943 assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
944 }
945 }
946
947
948 //*****************************************************
949 // TRANSITIONS
950 //*****************************************************
951
952 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
953
954 h_updateFilterFromL1HintOrWB;
955 n_popResponseQueue;
956 }
957
958 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
959 l_popPersistentQueue;
960 }
961
962
963 // Transitions from NP
964
965 transition(NP, {Transient_GETX, Transient_GETS}) {
966 // forward message to local sharers
967 r_clearExclusive;
968 j_forwardTransientRequestToLocalSharers;
969 m_popRequestQueue;
970 }
971
972
973 transition(NP, {L1_GETS, L1_GETX}) {
974 a_broadcastLocalRequest;
975 r_markNewSharer;
976 uu_profileMiss;
977 o_popL1RequestQueue;
978 }
979
980 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
981 bb_bounceResponse;
982 n_popResponseQueue;
983 }
984
985 transition(NP, Writeback_Shared_Data, S) {
986 vv_allocateL2CacheBlock;
987 u_writeDataToCache;
988 q_updateTokensFromResponse;
989 h_updateFilterFromL1HintOrWB;
990 n_popResponseQueue;
991 }
992
993 transition(NP, Writeback_Tokens, I) {
994 vv_allocateL2CacheBlock;
995 q_updateTokensFromResponse;
996 h_updateFilterFromL1HintOrWB;
997 n_popResponseQueue;
998 }
999
1000 transition(NP, Writeback_All_Tokens, M) {
1001 vv_allocateL2CacheBlock;
1002 u_writeDataToCache;
1003 q_updateTokensFromResponse;
1004 h_updateFilterFromL1HintOrWB;
1005 n_popResponseQueue;
1006 }
1007
1008 transition(NP, Writeback_Owned, O) {
1009 vv_allocateL2CacheBlock;
1010 u_writeDataToCache;
1011 q_updateTokensFromResponse;
1012 h_updateFilterFromL1HintOrWB;
1013 n_popResponseQueue;
1014 }
1015
1016
1017 transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
1018 l_popPersistentQueue;
1019 }
1020
1021 // Transitions from Idle
1022
1023 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1024 a_broadcastLocalRequest;
1025 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1026 r_markNewSharer;
1027 uu_profileMiss;
1028 o_popL1RequestQueue;
1029 }
1030
1031 transition(I, L1_GETX) {
1032 a_broadcastLocalRequest;
1033 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1034 r_markNewSharer;
1035 uu_profileMiss;
1036 o_popL1RequestQueue;
1037 }
1038
1039 transition(I, L2_Replacement) {
1040 c_cleanReplacement; // Only needed in some cases
1041 rr_deallocateL2CacheBlock;
1042 }
1043
1044 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1045 r_clearExclusive;
1046 t_sendAckWithCollectedTokens;
1047 j_forwardTransientRequestToLocalSharers;
1048 m_popRequestQueue;
1049 }
1050
1051 transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
1052 e_sendAckWithCollectedTokens;
1053 l_popPersistentQueue;
1054 }
1055
1056
1057 transition(I, Ack) {
1058 q_updateTokensFromResponse;
1059 n_popResponseQueue;
1060 }
1061
1062 transition(I, Data_Shared, S) {
1063 u_writeDataToCache;
1064 q_updateTokensFromResponse;
1065 n_popResponseQueue;
1066 }
1067
1068 transition(I, Writeback_Shared_Data, S) {
1069 u_writeDataToCache;
1070 q_updateTokensFromResponse;
1071 h_updateFilterFromL1HintOrWB;
1072 n_popResponseQueue;
1073 }
1074
1075 transition(I, Writeback_Tokens) {
1076 q_updateTokensFromResponse;
1077 h_updateFilterFromL1HintOrWB;
1078 n_popResponseQueue;
1079 }
1080
1081 transition(I, Data_Owner, O) {
1082 u_writeDataToCache;
1083 q_updateTokensFromResponse;
1084 n_popResponseQueue;
1085 }
1086
1087 transition(I, Writeback_Owned, O) {
1088 u_writeDataToCache;
1089 q_updateTokensFromResponse;
1090 h_updateFilterFromL1HintOrWB;
1091 n_popResponseQueue;
1092 }
1093
1094 transition(I, Data_All_Tokens, M) {
1095 u_writeDataToCache;
1096 q_updateTokensFromResponse;
1097 n_popResponseQueue;
1098 }
1099
1100
1101 transition(I, Writeback_All_Tokens, M) {
1102 u_writeDataToCache;
1103 q_updateTokensFromResponse;
1104 h_updateFilterFromL1HintOrWB;
1105 n_popResponseQueue;
1106 }
1107
1108 // Transitions from Shared
1109
1110 transition(S, L2_Replacement, I) {
1111 c_cleanReplacement;
1112 rr_deallocateL2CacheBlock;
1113 }
1114
1115 transition(S, Transient_GETX, I) {
1116 r_clearExclusive;
1117 t_sendAckWithCollectedTokens;
1118 j_forwardTransientRequestToLocalSharers;
1119 m_popRequestQueue;
1120 }
1121
1122 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1123 j_forwardTransientRequestToLocalSharers;
1124 r_clearExclusive;
1125 m_popRequestQueue;
1126 }
1127
1128 transition(S, Persistent_GETX, I_L) {
1129 e_sendAckWithCollectedTokens;
1130 l_popPersistentQueue;
1131 }
1132
1133
1134 transition(S, Persistent_GETS, S_L) {
1135 f_sendAckWithAllButOneTokens;
1136 l_popPersistentQueue;
1137 }
1138
1139
1140 transition(S, Ack) {
1141 q_updateTokensFromResponse;
1142 n_popResponseQueue;
1143 }
1144
1145 transition(S, Data_Shared) {
1146 w_assertIncomingDataAndCacheDataMatch;
1147 q_updateTokensFromResponse;
1148 n_popResponseQueue;
1149 }
1150
1151 transition(S, Writeback_Tokens) {
1152 q_updateTokensFromResponse;
1153 h_updateFilterFromL1HintOrWB;
1154 n_popResponseQueue;
1155 }
1156
1157 transition(S, Writeback_Shared_Data) {
1158 w_assertIncomingDataAndCacheDataMatch;
1159 q_updateTokensFromResponse;
1160 h_updateFilterFromL1HintOrWB;
1161 n_popResponseQueue;
1162 }
1163
1164
1165 transition(S, Data_Owner, O) {
1166 w_assertIncomingDataAndCacheDataMatch;
1167 q_updateTokensFromResponse;
1168 n_popResponseQueue;
1169 }
1170
1171 transition(S, Writeback_Owned, O) {
1172 w_assertIncomingDataAndCacheDataMatch;
1173 q_updateTokensFromResponse;
1174 h_updateFilterFromL1HintOrWB;
1175 n_popResponseQueue;
1176 }
1177
1178 transition(S, Data_All_Tokens, M) {
1179 w_assertIncomingDataAndCacheDataMatch;
1180 q_updateTokensFromResponse;
1181 n_popResponseQueue;
1182 }
1183
1184 transition(S, Writeback_All_Tokens, M) {
1185 w_assertIncomingDataAndCacheDataMatch;
1186 q_updateTokensFromResponse;
1187 h_updateFilterFromL1HintOrWB;
1188 n_popResponseQueue;
1189 }
1190
1191 transition(S, L1_GETX, I) {
1192 a_broadcastLocalRequest;
1193 tt_sendLocalAckWithCollectedTokens;
1194 r_markNewSharer;
1195 r_setMRU;
1196 uu_profileMiss;
1197 o_popL1RequestQueue;
1198 }
1199
1200
1201 transition(S, L1_GETS) {
1202 k_dataFromL2CacheToL1Requestor;
1203 r_markNewSharer;
1204 r_setMRU;
1205 o_popL1RequestQueue;
1206 }
1207
1208 transition(S, L1_GETS_Last_Token, I) {
1209
1210 k_dataFromL2CacheToL1Requestor;
1211 r_markNewSharer;
1212 r_setMRU;
1213 o_popL1RequestQueue;
1214 }
1215
1216 // Transitions from Owned
1217
1218 transition(O, L2_Replacement, I) {
1219 cc_dirtyReplacement;
1220 rr_deallocateL2CacheBlock;
1221 }
1222
1223 transition(O, Transient_GETX, I) {
1224 r_clearExclusive;
1225 dd_sendDataWithAllTokens;
1226 j_forwardTransientRequestToLocalSharers;
1227 m_popRequestQueue;
1228 }
1229
1230 transition(O, Persistent_GETX, I_L) {
1231 ee_sendDataWithAllTokens;
1232 l_popPersistentQueue;
1233 }
1234
1235 transition(O, Persistent_GETS, S_L) {
1236 ff_sendDataWithAllButOneTokens;
1237 l_popPersistentQueue;
1238 }
1239
1240 transition(O, Transient_GETS) {
1241 // send multiple tokens
1242 r_clearExclusive;
1243 d_sendDataWithTokens;
1244 m_popRequestQueue;
1245 }
1246
1247 transition(O, Transient_GETS_Last_Token) {
1248 // WAIT FOR IT TO GO PERSISTENT
1249 r_clearExclusive;
1250 m_popRequestQueue;
1251 }
1252
1253 transition(O, Ack) {
1254 q_updateTokensFromResponse;
1255 n_popResponseQueue;
1256 }
1257
1258 transition(O, Ack_All_Tokens, M) {
1259 q_updateTokensFromResponse;
1260 n_popResponseQueue;
1261 }
1262
1263 transition(O, Data_Shared) {
1264 w_assertIncomingDataAndCacheDataMatch;
1265 q_updateTokensFromResponse;
1266 n_popResponseQueue;
1267 }
1268
1269
1270 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1271 w_assertIncomingDataAndCacheDataMatch;
1272 q_updateTokensFromResponse;
1273 h_updateFilterFromL1HintOrWB;
1274 n_popResponseQueue;
1275 }
1276
1277 transition(O, Data_All_Tokens, M) {
1278 w_assertIncomingDataAndCacheDataMatch;
1279 q_updateTokensFromResponse;
1280 n_popResponseQueue;
1281 }
1282
1283 transition(O, Writeback_All_Tokens, M) {
1284 w_assertIncomingDataAndCacheDataMatch;
1285 q_updateTokensFromResponse;
1286 h_updateFilterFromL1HintOrWB;
1287 n_popResponseQueue;
1288 }
1289
1290 transition(O, L1_GETS) {
1291 k_dataFromL2CacheToL1Requestor;
1292 r_markNewSharer;
1293 r_setMRU;
1294 o_popL1RequestQueue;
1295 }
1296
1297 transition(O, L1_GETS_Last_Token, I) {
1298 k_dataOwnerFromL2CacheToL1Requestor;
1299 r_markNewSharer;
1300 r_setMRU;
1301 o_popL1RequestQueue;
1302 }
1303
1304 transition(O, L1_GETX, I) {
1305 a_broadcastLocalRequest;
1306 k_dataAndAllTokensFromL2CacheToL1Requestor;
1307 r_markNewSharer;
1308 r_setMRU;
1309 uu_profileMiss;
1310 o_popL1RequestQueue;
1311 }
1312
1313 // Transitions from M
1314
1315 transition(M, L2_Replacement, I) {
1316 cc_dirtyReplacement;
1317 rr_deallocateL2CacheBlock;
1318 }
1319
1320 // MRM_DEBUG: Give up all tokens even for GETS? ???
1321 transition(M, {Transient_GETX, Transient_GETS}, I) {
1322 r_clearExclusive;
1323 dd_sendDataWithAllTokens;
1324 m_popRequestQueue;
1325 }
1326
1327 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1328 ee_sendDataWithAllTokens;
1329 l_popPersistentQueue;
1330 }
1331
1332
1333 transition(M, L1_GETS, O) {
1334 k_dataFromL2CacheToL1Requestor;
1335 r_markNewSharer;
1336 r_setMRU;
1337 o_popL1RequestQueue;
1338 }
1339
1340 transition(M, L1_GETX, I) {
1341 k_dataAndAllTokensFromL2CacheToL1Requestor;
1342 r_markNewSharer;
1343 r_setMRU;
1344 o_popL1RequestQueue;
1345 }
1346
1347
1348 //Transitions from locked states
1349
1350 transition({I_L, S_L}, Ack) {
1351 gg_bounceResponseToStarver;
1352 n_popResponseQueue;
1353 }
1354
1355 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1356 gg_bounceResponseToStarver;
1357 n_popResponseQueue;
1358 }
1359
1360 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1361 gg_bounceWBSharedToStarver;
1362 h_updateFilterFromL1HintOrWB;
1363 n_popResponseQueue;
1364 }
1365
1366 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1367 gg_bounceWBOwnedToStarver;
1368 h_updateFilterFromL1HintOrWB;
1369 n_popResponseQueue;
1370 }
1371
1372 transition(S_L, L2_Replacement, I) {
1373 c_cleanReplacement;
1374 rr_deallocateL2CacheBlock;
1375 }
1376
1377 transition(I_L, L2_Replacement, I) {
1378 rr_deallocateL2CacheBlock;
1379 }
1380
1381 transition(I_L, Own_Lock_or_Unlock, I) {
1382 l_popPersistentQueue;
1383 }
1384
1385 transition(S_L, Own_Lock_or_Unlock, S) {
1386 l_popPersistentQueue;
1387 }
1388
1389 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1390 r_clearExclusive;
1391 m_popRequestQueue;
1392 }
1393
1394 transition(I_L, {L1_GETX, L1_GETS}) {
1395 a_broadcastLocalRequest;
1396 r_markNewSharer;
1397 uu_profileMiss;
1398 o_popL1RequestQueue;
1399 }
1400
1401 transition(S_L, L1_GETX, I_L) {
1402 a_broadcastLocalRequest;
1403 tt_sendLocalAckWithCollectedTokens;
1404 r_markNewSharer;
1405 r_setMRU;
1406 uu_profileMiss;
1407 o_popL1RequestQueue;
1408 }
1409
1410 transition(S_L, L1_GETS) {
1411 k_dataFromL2CacheToL1Requestor;
1412 r_markNewSharer;
1413 r_setMRU;
1414 o_popL1RequestQueue;
1415 }
1416
1417 transition(S_L, L1_GETS_Last_Token, I_L) {
1418 k_dataFromL2CacheToL1Requestor;
1419 r_markNewSharer;
1420 r_setMRU;
1421 o_popL1RequestQueue;
1422 }
1423
1424 transition(S_L, Persistent_GETX, I_L) {
1425 e_sendAckWithCollectedTokens;
1426 l_popPersistentQueue;
1427 }
1428
1429 transition(S_L, Persistent_GETS) {
1430 l_popPersistentQueue;
1431 }
1432
1433 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1434 l_popPersistentQueue;
1435 }
1436 }