This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "Token protocol")
36 : CacheMemory * L2cacheMemory,
37 int N_tokens,
38 int l2_request_latency = 5,
39 int l2_response_latency = 5,
40 bool filtering_enabled = true
41 {
42
43 // L2 BANK QUEUES
44 // From local bank of L2 cache TO the network
45
46 // this L2 bank -> a local L1 || mod-directory
47 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false";
48 // this L2 bank -> mod-directory
49 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false";
50 // this L2 bank -> a local L1
51 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false";
52
53
54 // FROM the network to this local bank of L2 cache
55
56 // a local L1 || mod-directory -> this L2 bank
57 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false";
58 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
59 // mod-directory -> this L2 bank
60 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false";
61 // a local L1 -> this L2 bank
62 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false";
63
64 // STATES
65 enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
66 // Base states
67 NP, desc="Not Present";
68 I, desc="Idle";
69 S, desc="Shared, not present in any local L1s";
70 O, desc="Owned, not present in any L1s";
71 M, desc="Modified, not present in any L1s";
72
73 // Locked states
74 I_L, "I^L", desc="Invalid, Locked";
75 S_L, "S^L", desc="Shared, Locked";
76 }
77
78 // EVENTS
79 enumeration(Event, desc="Cache events") {
80
81 // Requests
82 L1_GETS, desc="local L1 GETS request";
83 L1_GETS_Last_Token, desc="local L1 GETS request";
84 L1_GETX, desc="local L1 GETX request";
85 L1_INV, desc="L1 no longer has tokens";
86 Transient_GETX, desc="A GetX from another processor";
87 Transient_GETS, desc="A GetS from another processor";
88 Transient_GETS_Last_Token, desc="A GetS from another processor";
89
90 // events initiated by this L2
91 L2_Replacement, desc="L2 Replacement", format="!r";
92
93 // events of external L2 responses
94
95 // Responses
96 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
97 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
98 Writeback_All_Tokens, desc="Received a writeback from L1";
99 Writeback_Owned, desc="Received a writeback from L1";
100
101
102 Data_Shared, desc="Received a data message, we are now a sharer";
103 Data_Owner, desc="Received a data message, we are now the owner";
104 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
105 Ack, desc="Received an ack message";
106 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
107
108 // Lock/Unlock
109 Persistent_GETX, desc="Another processor has priority to read/write";
110 Persistent_GETS, desc="Another processor has priority to read";
111 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
112 Own_Lock_or_Unlock, desc="This processor now has priority";
113 }
114
115 // TYPES
116
117 // CacheEntry
118 structure(Entry, desc="...", interface="AbstractCacheEntry") {
119 State CacheState, desc="cache state";
120 bool Dirty, desc="Is the data dirty (different than memory)?";
121 int Tokens, desc="The number of tokens we're holding for the line";
122 DataBlock DataBlk, desc="data for the block";
123 }
124
125 structure(DirEntry, desc="...") {
126 Set Sharers, desc="Set of the internal processors that want the block in shared state";
127 bool exclusive, default="false", desc="if local exclusive is likely";
128 }
129
130 external_type(PerfectCacheMemory) {
131 void allocate(Address);
132 void deallocate(Address);
133 DirEntry lookup(Address);
134 bool isTagPresent(Address);
135 }
136
137 external_type(PersistentTable) {
138 void persistentRequestLock(Address, MachineID, AccessType);
139 void persistentRequestUnlock(Address, MachineID);
140 MachineID findSmallest(Address);
141 AccessType typeOfSmallest(Address);
142 void markEntries(Address);
143 bool isLocked(Address);
144 int countStarvingForAddress(Address);
145 int countReadStarvingForAddress(Address);
146 }
147
148 PersistentTable persistentTable;
149 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
150
151 Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
152 if (L2cacheMemory.isTagPresent(addr)) {
153 return static_cast(Entry, L2cacheMemory[addr]);
154 }
155 assert(false);
156 return static_cast(Entry, L2cacheMemory[addr]);
157 }
158
159 int getTokens(Address addr) {
160 if (L2cacheMemory.isTagPresent(addr)) {
161 return getL2CacheEntry(addr).Tokens;
162 } else {
163 return 0;
164 }
165 }
166
167 void changePermission(Address addr, AccessPermission permission) {
168 if (L2cacheMemory.isTagPresent(addr)) {
169 return L2cacheMemory.changePermission(addr, permission);
170 }
171 }
172
173 bool isCacheTagPresent(Address addr) {
174 return (L2cacheMemory.isTagPresent(addr) );
175 }
176
177 State getState(Address addr) {
178 if (isCacheTagPresent(addr)) {
179 return getL2CacheEntry(addr).CacheState;
180 } else if (persistentTable.isLocked(addr) == true) {
181 return State:I_L;
182 } else {
183 return State:NP;
184 }
185 }
186
187 std::string getStateStr(Address addr) {
188 return L2Cache_State_to_string(getState(addr));
189 }
190
191 void setState(Address addr, State state) {
192
193
194 if (isCacheTagPresent(addr)) {
195 // Make sure the token count is in range
196 assert(getL2CacheEntry(addr).Tokens >= 0);
197 assert(getL2CacheEntry(addr).Tokens <= max_tokens());
198 assert(getL2CacheEntry(addr).Tokens != (max_tokens() / 2));
199
200 // Make sure we have no tokens in L
201 if ((state == State:I_L) ) {
202 if (isCacheTagPresent(addr)) {
203 assert(getL2CacheEntry(addr).Tokens == 0);
204 }
205 }
206
207 // in M and E you have all the tokens
208 if (state == State:M ) {
209 assert(getL2CacheEntry(addr).Tokens == max_tokens());
210 }
211
212 // in NP you have no tokens
213 if (state == State:NP) {
214 assert(getL2CacheEntry(addr).Tokens == 0);
215 }
216
217 // You have at least one token in S-like states
218 if (state == State:S ) {
219 assert(getL2CacheEntry(addr).Tokens > 0);
220 }
221
222 // You have at least half the token in O-like states
223 if (state == State:O ) {
224 assert(getL2CacheEntry(addr).Tokens > (max_tokens() / 2));
225 }
226
227 getL2CacheEntry(addr).CacheState := state;
228
229 // Set permission
230 if (state == State:I) {
231 changePermission(addr, AccessPermission:Invalid);
232 } else if (state == State:S || state == State:O ) {
233 changePermission(addr, AccessPermission:Read_Only);
234 } else if (state == State:M ) {
235 changePermission(addr, AccessPermission:Read_Write);
236 } else {
237 changePermission(addr, AccessPermission:Invalid);
238 }
239 }
240 }
241
242 void removeSharer(Address addr, NodeID id) {
243
244 if (localDirectory.isTagPresent(addr)) {
245 localDirectory[addr].Sharers.remove(id);
246 if (localDirectory[addr].Sharers.count() == 0) {
247 localDirectory.deallocate(addr);
248 }
249 }
250 }
251
252 bool sharersExist(Address addr) {
253 if (localDirectory.isTagPresent(addr)) {
254 if (localDirectory[addr].Sharers.count() > 0) {
255 return true;
256 }
257 else {
258 return false;
259 }
260 }
261 else {
262 return false;
263 }
264 }
265
266 bool exclusiveExists(Address addr) {
267 if (localDirectory.isTagPresent(addr)) {
268 if (localDirectory[addr].exclusive == true) {
269 return true;
270 }
271 else {
272 return false;
273 }
274 }
275 else {
276 return false;
277 }
278 }
279
280 // assumes that caller will check to make sure tag is present
281 Set getSharers(Address addr) {
282 return localDirectory[addr].Sharers;
283 }
284
285 void setNewWriter(Address addr, NodeID id) {
286 if (localDirectory.isTagPresent(addr) == false) {
287 localDirectory.allocate(addr);
288 }
289 localDirectory[addr].Sharers.clear();
290 localDirectory[addr].Sharers.add(id);
291 localDirectory[addr].exclusive := true;
292 }
293
294 void addNewSharer(Address addr, NodeID id) {
295 if (localDirectory.isTagPresent(addr) == false) {
296 localDirectory.allocate(addr);
297 }
298 localDirectory[addr].Sharers.add(id);
299 // localDirectory[addr].exclusive := false;
300 }
301
302 void clearExclusiveBitIfExists(Address addr) {
303 if (localDirectory.isTagPresent(addr) == true) {
304 localDirectory[addr].exclusive := false;
305 }
306 }
307
308 GenericRequestType convertToGenericType(CoherenceRequestType type) {
309 if(type == CoherenceRequestType:GETS) {
310 return GenericRequestType:GETS;
311 } else if(type == CoherenceRequestType:GETX) {
312 return GenericRequestType:GETX;
313 } else {
314 DPRINTF(RubySlicc, "%s\n", type);
315 error("invalid CoherenceRequestType");
316 }
317 }
318
319 // ** OUT_PORTS **
320 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
321 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
322 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
323
324
325
326 // ** IN_PORTS **
327
328 // Persistent Network
329 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
330 if (persistentNetwork_in.isReady()) {
331 peek(persistentNetwork_in, PersistentMsg) {
332 assert(in_msg.Destination.isElement(machineID));
333
334 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
335 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
336 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
337 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
338 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
339 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
340 } else {
341 error("Unexpected message");
342 }
343
344 // React to the message based on the current state of the table
345 if (persistentTable.isLocked(in_msg.Address)) {
346
347 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
348 if (getTokens(in_msg.Address) == 1 ||
349 getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
350 trigger(Event:Persistent_GETS_Last_Token, in_msg.Address);
351 } else {
352 trigger(Event:Persistent_GETS, in_msg.Address);
353 }
354 } else {
355 trigger(Event:Persistent_GETX, in_msg.Address);
356 }
357 }
358 else {
359 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
360 }
361 }
362 }
363 }
364
365
366 // Request Network
367 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
368 if (requestNetwork_in.isReady()) {
369 peek(requestNetwork_in, RequestMsg) {
370 assert(in_msg.Destination.isElement(machineID));
371
372 if (in_msg.Type == CoherenceRequestType:GETX) {
373 trigger(Event:Transient_GETX, in_msg.Address);
374 } else if (in_msg.Type == CoherenceRequestType:GETS) {
375 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
376 trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
377 }
378 else {
379 trigger(Event:Transient_GETS, in_msg.Address);
380 }
381 } else {
382 error("Unexpected message");
383 }
384 }
385 }
386 }
387
388 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
389 if (L1requestNetwork_in.isReady()) {
390 peek(L1requestNetwork_in, RequestMsg) {
391 assert(in_msg.Destination.isElement(machineID));
392 if (in_msg.Type == CoherenceRequestType:GETX) {
393 trigger(Event:L1_GETX, in_msg.Address);
394 } else if (in_msg.Type == CoherenceRequestType:GETS) {
395 if (getTokens(in_msg.Address) == 1 ||
396 getTokens(in_msg.Address) == (max_tokens() / 2) + 1) {
397 trigger(Event:L1_GETS_Last_Token, in_msg.Address);
398 }
399 else {
400 trigger(Event:L1_GETS, in_msg.Address);
401 }
402 } else {
403 error("Unexpected message");
404 }
405 }
406 }
407 }
408
409
410 // Response Network
411 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
412 if (responseNetwork_in.isReady()) {
413 peek(responseNetwork_in, ResponseMsg) {
414 assert(in_msg.Destination.isElement(machineID));
415 if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
416 if (in_msg.Type == CoherenceResponseType:ACK) {
417 assert(in_msg.Tokens < (max_tokens() / 2));
418 trigger(Event:Ack, in_msg.Address);
419 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
420 trigger(Event:Data_Owner, in_msg.Address);
421 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
422 trigger(Event:Data_Shared, in_msg.Address);
423 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
424
425 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
426
427 // either room is available or the block is already present
428
429 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
430 assert(in_msg.Dirty == false);
431 trigger(Event:Writeback_Tokens, in_msg.Address);
432 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
433 assert(in_msg.Dirty == false);
434 trigger(Event:Writeback_Shared_Data, in_msg.Address);
435 }
436 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
437 //assert(in_msg.Dirty == false);
438 trigger(Event:Writeback_Owned, in_msg.Address);
439 }
440 }
441 else {
442 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
443 }
444 } else if (in_msg.Type == CoherenceResponseType:INV) {
445 trigger(Event:L1_INV, in_msg.Address);
446 } else {
447 error("Unexpected message");
448 }
449 } else {
450 if (in_msg.Type == CoherenceResponseType:ACK) {
451 assert(in_msg.Tokens < (max_tokens() / 2));
452 trigger(Event:Ack_All_Tokens, in_msg.Address);
453 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
454 trigger(Event:Data_All_Tokens, in_msg.Address);
455 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
456 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
457
458 // either room is available or the block is already present
459
460 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
461 assert(in_msg.Dirty == false);
462 assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
463 trigger(Event:Writeback_All_Tokens, in_msg.Address);
464 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
465 assert(in_msg.Dirty == false);
466 trigger(Event:Writeback_All_Tokens, in_msg.Address);
467 }
468 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
469 trigger(Event:Writeback_All_Tokens, in_msg.Address);
470 }
471 }
472 else {
473 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
474 }
475 } else if (in_msg.Type == CoherenceResponseType:INV) {
476 trigger(Event:L1_INV, in_msg.Address);
477 } else {
478 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
479 error("Unexpected message");
480 }
481 }
482 }
483 }
484 }
485
486
487 // ACTIONS
488
489 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
490
491 peek(L1requestNetwork_in, RequestMsg) {
492
493 // if this is a retry or no local sharers, broadcast normally
494
495 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
496 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
497 out_msg.Address := in_msg.Address;
498 out_msg.Type := in_msg.Type;
499 out_msg.Requestor := in_msg.Requestor;
500 out_msg.RetryNum := in_msg.RetryNum;
501
502 //
503 // If a statically shared L2 cache, then no other L2 caches can
504 // store the block
505 //
506 //out_msg.Destination.broadcast(MachineType:L2Cache);
507 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
508 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
509
510 out_msg.Destination.add(map_Address_to_Directory(address));
511 out_msg.MessageSize := MessageSizeType:Request_Control;
512 out_msg.AccessMode := in_msg.AccessMode;
513 out_msg.Prefetch := in_msg.Prefetch;
514 } //enqueue
515 // } // if
516
517 //profile_filter_action(0);
518 } // peek
519 } //action
520
521
522 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
523 peek(responseNetwork_in, ResponseMsg) {
524 // FIXME, should use a 3rd vnet
525 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
526 out_msg.Address := address;
527 out_msg.Type := in_msg.Type;
528 out_msg.Sender := machineID;
529 out_msg.Destination.add(map_Address_to_Directory(address));
530 out_msg.Tokens := in_msg.Tokens;
531 out_msg.MessageSize := in_msg.MessageSize;
532 out_msg.DataBlk := in_msg.DataBlk;
533 out_msg.Dirty := in_msg.Dirty;
534 }
535 }
536 }
537
538 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
539 if (getL2CacheEntry(address).Tokens > 0) {
540 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
541 out_msg.Address := address;
542 out_msg.Type := CoherenceResponseType:ACK;
543 out_msg.Sender := machineID;
544 out_msg.Destination.add(map_Address_to_Directory(address));
545 out_msg.Tokens := getL2CacheEntry(address).Tokens;
546 out_msg.MessageSize := MessageSizeType:Writeback_Control;
547 }
548 getL2CacheEntry(address).Tokens := 0;
549 }
550 }
551
552 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
553 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
554 out_msg.Address := address;
555 out_msg.Sender := machineID;
556 out_msg.Destination.add(map_Address_to_Directory(address));
557 out_msg.Tokens := getL2CacheEntry(address).Tokens;
558 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
559 out_msg.Dirty := getL2CacheEntry(address).Dirty;
560
561 if (getL2CacheEntry(address).Dirty) {
562 out_msg.MessageSize := MessageSizeType:Writeback_Data;
563 out_msg.Type := CoherenceResponseType:DATA_OWNER;
564 } else {
565 out_msg.MessageSize := MessageSizeType:Writeback_Control;
566 out_msg.Type := CoherenceResponseType:ACK_OWNER;
567 }
568 }
569 getL2CacheEntry(address).Tokens := 0;
570 }
571
572 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
573 peek(requestNetwork_in, RequestMsg) {
574 if (getL2CacheEntry(address).Tokens > (N_tokens + (max_tokens() / 2))) {
575 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
576 out_msg.Address := address;
577 out_msg.Type := CoherenceResponseType:DATA_SHARED;
578 out_msg.Sender := machineID;
579 out_msg.Destination.add(in_msg.Requestor);
580 out_msg.Tokens := N_tokens;
581 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
582 out_msg.Dirty := false;
583 out_msg.MessageSize := MessageSizeType:Response_Data;
584 }
585 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens;
586 }
587 else {
588 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
589 out_msg.Address := address;
590 out_msg.Type := CoherenceResponseType:DATA_SHARED;
591 out_msg.Sender := machineID;
592 out_msg.Destination.add(in_msg.Requestor);
593 out_msg.Tokens := 1;
594 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
595 out_msg.Dirty := false;
596 out_msg.MessageSize := MessageSizeType:Response_Data;
597 }
598 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
599 }
600 }
601 }
602
603 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
604 peek(requestNetwork_in, RequestMsg) {
605 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
606 out_msg.Address := address;
607 out_msg.Type := CoherenceResponseType:DATA_OWNER;
608 out_msg.Sender := machineID;
609 out_msg.Destination.add(in_msg.Requestor);
610 assert(getL2CacheEntry(address).Tokens >= 1);
611 out_msg.Tokens := getL2CacheEntry(address).Tokens;
612 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
613 out_msg.Dirty := getL2CacheEntry(address).Dirty;
614 out_msg.MessageSize := MessageSizeType:Response_Data;
615 }
616 }
617 getL2CacheEntry(address).Tokens := 0;
618 }
619
620 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
621 if (getL2CacheEntry(address).Tokens > 0) {
622 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
623 out_msg.Address := address;
624 out_msg.Type := CoherenceResponseType:ACK;
625 out_msg.Sender := machineID;
626 out_msg.Destination.add(persistentTable.findSmallest(address));
627 assert(getL2CacheEntry(address).Tokens >= 1);
628 out_msg.Tokens := getL2CacheEntry(address).Tokens;
629 out_msg.MessageSize := MessageSizeType:Response_Control;
630 }
631 }
632 getL2CacheEntry(address).Tokens := 0;
633 }
634
635 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
636 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
637 out_msg.Address := address;
638 out_msg.Type := CoherenceResponseType:DATA_OWNER;
639 out_msg.Sender := machineID;
640 out_msg.Destination.add(persistentTable.findSmallest(address));
641 assert(getL2CacheEntry(address).Tokens >= 1);
642 out_msg.Tokens := getL2CacheEntry(address).Tokens;
643 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
644 out_msg.Dirty := getL2CacheEntry(address).Dirty;
645 out_msg.MessageSize := MessageSizeType:Response_Data;
646 }
647 getL2CacheEntry(address).Tokens := 0;
648 }
649
650 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
651 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
652 assert(getL2CacheEntry(address).Tokens > 0);
653 if (getL2CacheEntry(address).Tokens > 1) {
654 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
655 out_msg.Address := address;
656 out_msg.Type := CoherenceResponseType:ACK;
657 out_msg.Sender := machineID;
658 out_msg.Destination.add(persistentTable.findSmallest(address));
659 assert(getL2CacheEntry(address).Tokens >= 1);
660 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
661 out_msg.MessageSize := MessageSizeType:Response_Control;
662 }
663 }
664 getL2CacheEntry(address).Tokens := 1;
665 }
666
667 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
668 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
669 assert(getL2CacheEntry(address).Tokens > (max_tokens() / 2) + 1);
670 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
671 out_msg.Address := address;
672 out_msg.Type := CoherenceResponseType:DATA_OWNER;
673 out_msg.Sender := machineID;
674 out_msg.Destination.add(persistentTable.findSmallest(address));
675 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
676 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
677 out_msg.Dirty := getL2CacheEntry(address).Dirty;
678 out_msg.MessageSize := MessageSizeType:Response_Data;
679 }
680 getL2CacheEntry(address).Tokens := 1;
681 }
682
683 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
684 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
685 assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
686 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
687 out_msg.Address := address;
688 out_msg.Type := CoherenceResponseType:DATA_OWNER;
689 out_msg.Sender := machineID;
690 out_msg.Destination.add(persistentTable.findSmallest(address));
691 out_msg.Tokens := getL2CacheEntry(address).Tokens;
692 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
693 out_msg.Dirty := getL2CacheEntry(address).Dirty;
694 out_msg.MessageSize := MessageSizeType:Response_Data;
695 }
696 getL2CacheEntry(address).Tokens := 0;
697 }
698
699
700
701 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
702 // assert(persistentTable.isLocked(address));
703 peek(responseNetwork_in, ResponseMsg) {
704 // FIXME, should use a 3rd vnet in some cases
705 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
706 out_msg.Address := address;
707 out_msg.Type := in_msg.Type;
708 out_msg.Sender := machineID;
709 out_msg.Destination.add(persistentTable.findSmallest(address));
710 out_msg.Tokens := in_msg.Tokens;
711 out_msg.DataBlk := in_msg.DataBlk;
712 out_msg.Dirty := in_msg.Dirty;
713 out_msg.MessageSize := in_msg.MessageSize;
714 }
715 }
716 }
717
718 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
719 //assert(persistentTable.isLocked(address));
720 peek(responseNetwork_in, ResponseMsg) {
721 // FIXME, should use a 3rd vnet in some cases
722 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
723 out_msg.Address := address;
724 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
725 out_msg.Type := CoherenceResponseType:DATA_SHARED;
726 } else {
727 assert(in_msg.Tokens < (max_tokens() / 2));
728 out_msg.Type := CoherenceResponseType:ACK;
729 }
730 out_msg.Sender := machineID;
731 out_msg.Destination.add(persistentTable.findSmallest(address));
732 out_msg.Tokens := in_msg.Tokens;
733 out_msg.DataBlk := in_msg.DataBlk;
734 out_msg.Dirty := in_msg.Dirty;
735 out_msg.MessageSize := in_msg.MessageSize;
736 }
737 }
738 }
739
740 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
741 // assert(persistentTable.isLocked(address));
742 peek(responseNetwork_in, ResponseMsg) {
743 // FIXME, should use a 3rd vnet in some cases
744 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
745 out_msg.Address := address;
746 out_msg.Type := CoherenceResponseType:DATA_OWNER;
747 out_msg.Sender := machineID;
748 out_msg.Destination.add(persistentTable.findSmallest(address));
749 out_msg.Tokens := in_msg.Tokens;
750 out_msg.DataBlk := in_msg.DataBlk;
751 out_msg.Dirty := in_msg.Dirty;
752 out_msg.MessageSize := in_msg.MessageSize;
753 }
754 }
755 }
756
757
758 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
759 peek(responseNetwork_in, ResponseMsg) {
760 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
761 }
762 }
763
764 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
765 peek(requestNetwork_in, RequestMsg) {
766 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
767 //profile_filter_action(1);
768 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
769 in_msg.RetryNum);
770 }
771 else {
772 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
773 out_msg.Address := in_msg.Address;
774 out_msg.Requestor := in_msg.Requestor;
775
776 //
777 // Currently assuming only one chip so all L1s are local
778 //
779 //out_msg.Destination := getLocalL1IDs(machineID);
780 out_msg.Destination.broadcast(MachineType:L1Cache);
781 out_msg.Destination.remove(in_msg.Requestor);
782
783 out_msg.Type := in_msg.Type;
784 out_msg.isLocal := false;
785 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
786 out_msg.AccessMode := in_msg.AccessMode;
787 out_msg.Prefetch := in_msg.Prefetch;
788 }
789 //profile_filter_action(0);
790 }
791 }
792 }
793
794
795 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
796 peek(L1requestNetwork_in, RequestMsg) {
797 assert(getL2CacheEntry(address).Tokens > 0);
798 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
799 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
800 out_msg.Address := address;
801 out_msg.Type := CoherenceResponseType:DATA_SHARED;
802 out_msg.Sender := machineID;
803 out_msg.Destination.add(in_msg.Requestor);
804 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
805 out_msg.Dirty := false;
806 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
807 out_msg.Tokens := 1;
808 }
809 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
810 }
811 }
812
813 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
814 peek(L1requestNetwork_in, RequestMsg) {
815 assert(getL2CacheEntry(address).Tokens == (max_tokens() / 2) + 1);
816 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
817 out_msg.Address := address;
818 out_msg.Type := CoherenceResponseType:DATA_OWNER;
819 out_msg.Sender := machineID;
820 out_msg.Destination.add(in_msg.Requestor);
821 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
822 out_msg.Dirty := getL2CacheEntry(address).Dirty;
823 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
824 out_msg.Tokens := getL2CacheEntry(address).Tokens;
825 }
826 getL2CacheEntry(address).Tokens := 0;
827 }
828 }
829
830 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
831 peek(L1requestNetwork_in, RequestMsg) {
832 // assert(getL2CacheEntry(address).Tokens == max_tokens());
833 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
834 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
835 out_msg.Address := address;
836 out_msg.Type := CoherenceResponseType:DATA_OWNER;
837 out_msg.Sender := machineID;
838 out_msg.Destination.add(in_msg.Requestor);
839 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
840 out_msg.Dirty := getL2CacheEntry(address).Dirty;
841 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
842 //out_msg.Tokens := max_tokens();
843 out_msg.Tokens := getL2CacheEntry(address).Tokens;
844 }
845 getL2CacheEntry(address).Tokens := 0;
846 }
847 }
848
849 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
850 persistentNetwork_in.dequeue();
851 }
852
853 action(m_popRequestQueue, "m", desc="Pop request queue.") {
854 requestNetwork_in.dequeue();
855 }
856
857 action(n_popResponseQueue, "n", desc="Pop response queue") {
858 responseNetwork_in.dequeue();
859 }
860
861 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
862 L1requestNetwork_in.dequeue();
863 }
864
865
866 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
867 peek(responseNetwork_in, ResponseMsg) {
868 assert(in_msg.Tokens != 0);
869 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
870
871 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
872 // may not trigger this action.
873 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
874 getL2CacheEntry(address).Dirty := true;
875 }
876 }
877 }
878
879 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
880 peek(L1requestNetwork_in, RequestMsg) {
881 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
882 if (in_msg.Type == CoherenceRequestType:GETX) {
883 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
884 } else if (in_msg.Type == CoherenceRequestType:GETS) {
885 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
886 }
887 }
888 }
889 }
890
891 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
892 clearExclusiveBitIfExists(address);
893 }
894
895 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
896 peek(L1requestNetwork_in, RequestMsg) {
897 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
898 (isCacheTagPresent(address))) {
899 L2cacheMemory.setMRU(address);
900 }
901 }
902 }
903
904 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
905 if (getL2CacheEntry(address).Tokens > 0) {
906 peek(requestNetwork_in, RequestMsg) {
907 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
908 out_msg.Address := address;
909 out_msg.Type := CoherenceResponseType:ACK;
910 out_msg.Sender := machineID;
911 out_msg.Destination.add(in_msg.Requestor);
912 assert(getL2CacheEntry(address).Tokens >= 1);
913 out_msg.Tokens := getL2CacheEntry(address).Tokens;
914 out_msg.MessageSize := MessageSizeType:Response_Control;
915 }
916 }
917 }
918 getL2CacheEntry(address).Tokens := 0;
919 }
920
921 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
922 if (getL2CacheEntry(address).Tokens > 0) {
923 peek(L1requestNetwork_in, RequestMsg) {
924 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
925 out_msg.Address := address;
926 out_msg.Type := CoherenceResponseType:ACK;
927 out_msg.Sender := machineID;
928 out_msg.Destination.add(in_msg.Requestor);
929 assert(getL2CacheEntry(address).Tokens >= 1);
930 out_msg.Tokens := getL2CacheEntry(address).Tokens;
931 out_msg.MessageSize := MessageSizeType:Response_Control;
932 }
933 }
934 }
935 getL2CacheEntry(address).Tokens := 0;
936 }
937
938 action(u_writeDataToCache, "u", desc="Write data to cache") {
939 peek(responseNetwork_in, ResponseMsg) {
940 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
941 if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
942 getL2CacheEntry(address).Dirty := in_msg.Dirty;
943 }
944 }
945 }
946
947 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
948 L2cacheMemory.allocate(address, new Entry);
949 }
950
951 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
952 L2cacheMemory.deallocate(address);
953 }
954
955 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
956 peek(L1requestNetwork_in, RequestMsg) {
957 L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
958 in_msg.AccessMode,
959 in_msg.Prefetch);
960 }
961 }
962
963
964 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
965 peek(responseNetwork_in, ResponseMsg) {
966 if (in_msg.Type != CoherenceResponseType:ACK &&
967 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
968 assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
969 }
970 }
971 }
972
973
974 //*****************************************************
975 // TRANSITIONS
976 //*****************************************************
977
978 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
979
980 h_updateFilterFromL1HintOrWB;
981 n_popResponseQueue;
982 }
983
984 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
985 l_popPersistentQueue;
986 }
987
988
989 // Transitions from NP
990
991 transition(NP, {Transient_GETX, Transient_GETS}) {
992 // forward message to local sharers
993 r_clearExclusive;
994 j_forwardTransientRequestToLocalSharers;
995 m_popRequestQueue;
996 }
997
998
999 transition(NP, {L1_GETS, L1_GETX}) {
1000 a_broadcastLocalRequest;
1001 r_markNewSharer;
1002 uu_profileMiss;
1003 o_popL1RequestQueue;
1004 }
1005
1006 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1007 bb_bounceResponse;
1008 n_popResponseQueue;
1009 }
1010
1011 transition(NP, Writeback_Shared_Data, S) {
1012 vv_allocateL2CacheBlock;
1013 u_writeDataToCache;
1014 q_updateTokensFromResponse;
1015 h_updateFilterFromL1HintOrWB;
1016 n_popResponseQueue;
1017 }
1018
1019 transition(NP, Writeback_Tokens, I) {
1020 vv_allocateL2CacheBlock;
1021 q_updateTokensFromResponse;
1022 h_updateFilterFromL1HintOrWB;
1023 n_popResponseQueue;
1024 }
1025
1026 transition(NP, Writeback_All_Tokens, M) {
1027 vv_allocateL2CacheBlock;
1028 u_writeDataToCache;
1029 q_updateTokensFromResponse;
1030 h_updateFilterFromL1HintOrWB;
1031 n_popResponseQueue;
1032 }
1033
1034 transition(NP, Writeback_Owned, O) {
1035 vv_allocateL2CacheBlock;
1036 u_writeDataToCache;
1037 q_updateTokensFromResponse;
1038 h_updateFilterFromL1HintOrWB;
1039 n_popResponseQueue;
1040 }
1041
1042
1043 transition(NP,
1044 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1045 I_L) {
1046 l_popPersistentQueue;
1047 }
1048
1049 // Transitions from Idle
1050
1051 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1052 a_broadcastLocalRequest;
1053 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1054 r_markNewSharer;
1055 uu_profileMiss;
1056 o_popL1RequestQueue;
1057 }
1058
1059 transition(I, L1_GETX) {
1060 a_broadcastLocalRequest;
1061 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1062 r_markNewSharer;
1063 uu_profileMiss;
1064 o_popL1RequestQueue;
1065 }
1066
1067 transition(I, L2_Replacement) {
1068 c_cleanReplacement; // Only needed in some cases
1069 rr_deallocateL2CacheBlock;
1070 }
1071
1072 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1073 r_clearExclusive;
1074 t_sendAckWithCollectedTokens;
1075 j_forwardTransientRequestToLocalSharers;
1076 m_popRequestQueue;
1077 }
1078
1079 transition(I,
1080 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1081 I_L) {
1082 e_sendAckWithCollectedTokens;
1083 l_popPersistentQueue;
1084 }
1085
1086
1087 transition(I, Ack) {
1088 q_updateTokensFromResponse;
1089 n_popResponseQueue;
1090 }
1091
1092 transition(I, Data_Shared, S) {
1093 u_writeDataToCache;
1094 q_updateTokensFromResponse;
1095 n_popResponseQueue;
1096 }
1097
1098 transition(I, Writeback_Shared_Data, S) {
1099 u_writeDataToCache;
1100 q_updateTokensFromResponse;
1101 h_updateFilterFromL1HintOrWB;
1102 n_popResponseQueue;
1103 }
1104
1105 transition(I, Writeback_Tokens) {
1106 q_updateTokensFromResponse;
1107 h_updateFilterFromL1HintOrWB;
1108 n_popResponseQueue;
1109 }
1110
1111 transition(I, Data_Owner, O) {
1112 u_writeDataToCache;
1113 q_updateTokensFromResponse;
1114 n_popResponseQueue;
1115 }
1116
1117 transition(I, Writeback_Owned, O) {
1118 u_writeDataToCache;
1119 q_updateTokensFromResponse;
1120 h_updateFilterFromL1HintOrWB;
1121 n_popResponseQueue;
1122 }
1123
1124 transition(I, Data_All_Tokens, M) {
1125 u_writeDataToCache;
1126 q_updateTokensFromResponse;
1127 n_popResponseQueue;
1128 }
1129
1130
1131 transition(I, Writeback_All_Tokens, M) {
1132 u_writeDataToCache;
1133 q_updateTokensFromResponse;
1134 h_updateFilterFromL1HintOrWB;
1135 n_popResponseQueue;
1136 }
1137
1138 // Transitions from Shared
1139
1140 transition(S, L2_Replacement, I) {
1141 c_cleanReplacement;
1142 rr_deallocateL2CacheBlock;
1143 }
1144
1145 transition(S, Transient_GETX, I) {
1146 r_clearExclusive;
1147 t_sendAckWithCollectedTokens;
1148 j_forwardTransientRequestToLocalSharers;
1149 m_popRequestQueue;
1150 }
1151
1152 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1153 j_forwardTransientRequestToLocalSharers;
1154 r_clearExclusive;
1155 m_popRequestQueue;
1156 }
1157
1158 transition(S, Persistent_GETX, I_L) {
1159 e_sendAckWithCollectedTokens;
1160 l_popPersistentQueue;
1161 }
1162
1163
1164 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1165 f_sendAckWithAllButOneTokens;
1166 l_popPersistentQueue;
1167 }
1168
1169
1170 transition(S, Ack) {
1171 q_updateTokensFromResponse;
1172 n_popResponseQueue;
1173 }
1174
1175 transition(S, Data_Shared) {
1176 w_assertIncomingDataAndCacheDataMatch;
1177 q_updateTokensFromResponse;
1178 n_popResponseQueue;
1179 }
1180
1181 transition(S, Writeback_Tokens) {
1182 q_updateTokensFromResponse;
1183 h_updateFilterFromL1HintOrWB;
1184 n_popResponseQueue;
1185 }
1186
1187 transition(S, Writeback_Shared_Data) {
1188 w_assertIncomingDataAndCacheDataMatch;
1189 q_updateTokensFromResponse;
1190 h_updateFilterFromL1HintOrWB;
1191 n_popResponseQueue;
1192 }
1193
1194
1195 transition(S, Data_Owner, O) {
1196 w_assertIncomingDataAndCacheDataMatch;
1197 q_updateTokensFromResponse;
1198 n_popResponseQueue;
1199 }
1200
1201 transition(S, Writeback_Owned, O) {
1202 w_assertIncomingDataAndCacheDataMatch;
1203 q_updateTokensFromResponse;
1204 h_updateFilterFromL1HintOrWB;
1205 n_popResponseQueue;
1206 }
1207
1208 transition(S, Data_All_Tokens, M) {
1209 w_assertIncomingDataAndCacheDataMatch;
1210 q_updateTokensFromResponse;
1211 n_popResponseQueue;
1212 }
1213
1214 transition(S, Writeback_All_Tokens, M) {
1215 w_assertIncomingDataAndCacheDataMatch;
1216 q_updateTokensFromResponse;
1217 h_updateFilterFromL1HintOrWB;
1218 n_popResponseQueue;
1219 }
1220
1221 transition(S, L1_GETX, I) {
1222 a_broadcastLocalRequest;
1223 tt_sendLocalAckWithCollectedTokens;
1224 r_markNewSharer;
1225 r_setMRU;
1226 uu_profileMiss;
1227 o_popL1RequestQueue;
1228 }
1229
1230
1231 transition(S, L1_GETS) {
1232 k_dataFromL2CacheToL1Requestor;
1233 r_markNewSharer;
1234 r_setMRU;
1235 o_popL1RequestQueue;
1236 }
1237
1238 transition(S, L1_GETS_Last_Token, I) {
1239
1240 k_dataFromL2CacheToL1Requestor;
1241 r_markNewSharer;
1242 r_setMRU;
1243 o_popL1RequestQueue;
1244 }
1245
1246 // Transitions from Owned
1247
1248 transition(O, L2_Replacement, I) {
1249 cc_dirtyReplacement;
1250 rr_deallocateL2CacheBlock;
1251 }
1252
1253 transition(O, Transient_GETX, I) {
1254 r_clearExclusive;
1255 dd_sendDataWithAllTokens;
1256 j_forwardTransientRequestToLocalSharers;
1257 m_popRequestQueue;
1258 }
1259
1260 transition(O, Persistent_GETX, I_L) {
1261 ee_sendDataWithAllTokens;
1262 l_popPersistentQueue;
1263 }
1264
1265 transition(O, Persistent_GETS, S_L) {
1266 ff_sendDataWithAllButOneTokens;
1267 l_popPersistentQueue;
1268 }
1269
1270 transition(O, Persistent_GETS_Last_Token, I_L) {
1271 fa_sendDataWithAllTokens;
1272 l_popPersistentQueue;
1273 }
1274
1275 transition(O, Transient_GETS) {
1276 // send multiple tokens
1277 r_clearExclusive;
1278 d_sendDataWithTokens;
1279 m_popRequestQueue;
1280 }
1281
1282 transition(O, Transient_GETS_Last_Token) {
1283 // WAIT FOR IT TO GO PERSISTENT
1284 r_clearExclusive;
1285 m_popRequestQueue;
1286 }
1287
1288 transition(O, Ack) {
1289 q_updateTokensFromResponse;
1290 n_popResponseQueue;
1291 }
1292
1293 transition(O, Ack_All_Tokens, M) {
1294 q_updateTokensFromResponse;
1295 n_popResponseQueue;
1296 }
1297
1298 transition(O, Data_Shared) {
1299 w_assertIncomingDataAndCacheDataMatch;
1300 q_updateTokensFromResponse;
1301 n_popResponseQueue;
1302 }
1303
1304
1305 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1306 w_assertIncomingDataAndCacheDataMatch;
1307 q_updateTokensFromResponse;
1308 h_updateFilterFromL1HintOrWB;
1309 n_popResponseQueue;
1310 }
1311
1312 transition(O, Data_All_Tokens, M) {
1313 w_assertIncomingDataAndCacheDataMatch;
1314 q_updateTokensFromResponse;
1315 n_popResponseQueue;
1316 }
1317
1318 transition(O, Writeback_All_Tokens, M) {
1319 w_assertIncomingDataAndCacheDataMatch;
1320 q_updateTokensFromResponse;
1321 h_updateFilterFromL1HintOrWB;
1322 n_popResponseQueue;
1323 }
1324
1325 transition(O, L1_GETS) {
1326 k_dataFromL2CacheToL1Requestor;
1327 r_markNewSharer;
1328 r_setMRU;
1329 o_popL1RequestQueue;
1330 }
1331
1332 transition(O, L1_GETS_Last_Token, I) {
1333 k_dataOwnerFromL2CacheToL1Requestor;
1334 r_markNewSharer;
1335 r_setMRU;
1336 o_popL1RequestQueue;
1337 }
1338
1339 transition(O, L1_GETX, I) {
1340 a_broadcastLocalRequest;
1341 k_dataAndAllTokensFromL2CacheToL1Requestor;
1342 r_markNewSharer;
1343 r_setMRU;
1344 uu_profileMiss;
1345 o_popL1RequestQueue;
1346 }
1347
1348 // Transitions from M
1349
1350 transition(M, L2_Replacement, I) {
1351 cc_dirtyReplacement;
1352 rr_deallocateL2CacheBlock;
1353 }
1354
1355 // MRM_DEBUG: Give up all tokens even for GETS? ???
1356 transition(M, {Transient_GETX, Transient_GETS}, I) {
1357 r_clearExclusive;
1358 dd_sendDataWithAllTokens;
1359 m_popRequestQueue;
1360 }
1361
1362 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1363 ee_sendDataWithAllTokens;
1364 l_popPersistentQueue;
1365 }
1366
1367
1368 transition(M, L1_GETS, O) {
1369 k_dataFromL2CacheToL1Requestor;
1370 r_markNewSharer;
1371 r_setMRU;
1372 o_popL1RequestQueue;
1373 }
1374
1375 transition(M, L1_GETX, I) {
1376 k_dataAndAllTokensFromL2CacheToL1Requestor;
1377 r_markNewSharer;
1378 r_setMRU;
1379 o_popL1RequestQueue;
1380 }
1381
1382
1383 //Transitions from locked states
1384
1385 transition({I_L, S_L}, Ack) {
1386 gg_bounceResponseToStarver;
1387 n_popResponseQueue;
1388 }
1389
1390 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1391 gg_bounceResponseToStarver;
1392 n_popResponseQueue;
1393 }
1394
1395 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1396 gg_bounceWBSharedToStarver;
1397 h_updateFilterFromL1HintOrWB;
1398 n_popResponseQueue;
1399 }
1400
1401 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1402 gg_bounceWBOwnedToStarver;
1403 h_updateFilterFromL1HintOrWB;
1404 n_popResponseQueue;
1405 }
1406
1407 transition(S_L, L2_Replacement, I) {
1408 c_cleanReplacement;
1409 rr_deallocateL2CacheBlock;
1410 }
1411
1412 transition(I_L, L2_Replacement, I) {
1413 rr_deallocateL2CacheBlock;
1414 }
1415
1416 transition(I_L, Own_Lock_or_Unlock, I) {
1417 l_popPersistentQueue;
1418 }
1419
1420 transition(S_L, Own_Lock_or_Unlock, S) {
1421 l_popPersistentQueue;
1422 }
1423
1424 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1425 r_clearExclusive;
1426 m_popRequestQueue;
1427 }
1428
1429 transition(I_L, {L1_GETX, L1_GETS}) {
1430 a_broadcastLocalRequest;
1431 r_markNewSharer;
1432 uu_profileMiss;
1433 o_popL1RequestQueue;
1434 }
1435
1436 transition(S_L, L1_GETX, I_L) {
1437 a_broadcastLocalRequest;
1438 tt_sendLocalAckWithCollectedTokens;
1439 r_markNewSharer;
1440 r_setMRU;
1441 uu_profileMiss;
1442 o_popL1RequestQueue;
1443 }
1444
1445 transition(S_L, L1_GETS) {
1446 k_dataFromL2CacheToL1Requestor;
1447 r_markNewSharer;
1448 r_setMRU;
1449 o_popL1RequestQueue;
1450 }
1451
1452 transition(S_L, L1_GETS_Last_Token, I_L) {
1453 k_dataFromL2CacheToL1Requestor;
1454 r_markNewSharer;
1455 r_setMRU;
1456 o_popL1RequestQueue;
1457 }
1458
1459 transition(S_L, Persistent_GETX, I_L) {
1460 e_sendAckWithCollectedTokens;
1461 l_popPersistentQueue;
1462 }
1463
1464 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1465 l_popPersistentQueue;
1466 }
1467
1468 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1469 l_popPersistentQueue;
1470 }
1471 }