SCons: Clean up some inconsistent capitalization in scons options.
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "Token protocol")
36 : CacheMemory * L2cacheMemory,
37 int N_tokens,
38 int l2_request_latency = 5,
39 int l2_response_latency = 5,
40 bool filtering_enabled = true
41 {
42
43 // L2 BANK QUEUES
44 // From local bank of L2 cache TO the network
45
46 // this L2 bank -> a local L1 || mod-directory
47 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false";
48 // this L2 bank -> mod-directory
49 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false";
50 // this L2 bank -> a local L1
51 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false";
52
53
54 // FROM the network to this local bank of L2 cache
55
56 // a local L1 || mod-directory -> this L2 bank
57 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false";
58 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
59 // mod-directory -> this L2 bank
60 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false";
61 // a local L1 -> this L2 bank
62 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false";
63
64 // STATES
65 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
66 // Base states
67 NP, AccessPermission:Invalid, desc="Not Present";
68 I, AccessPermission:Invalid, desc="Idle";
69 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
70 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
71 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
72
73 // Locked states
74 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
75 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
76 }
77
78 // EVENTS
79 enumeration(Event, desc="Cache events") {
80
81 // Requests
82 L1_GETS, desc="local L1 GETS request";
83 L1_GETS_Last_Token, desc="local L1 GETS request";
84 L1_GETX, desc="local L1 GETX request";
85 L1_INV, desc="L1 no longer has tokens";
86 Transient_GETX, desc="A GetX from another processor";
87 Transient_GETS, desc="A GetS from another processor";
88 Transient_GETS_Last_Token, desc="A GetS from another processor";
89
90 // events initiated by this L2
91 L2_Replacement, desc="L2 Replacement", format="!r";
92
93 // events of external L2 responses
94
95 // Responses
96 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
97 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
98 Writeback_All_Tokens, desc="Received a writeback from L1";
99 Writeback_Owned, desc="Received a writeback from L1";
100
101
102 Data_Shared, desc="Received a data message, we are now a sharer";
103 Data_Owner, desc="Received a data message, we are now the owner";
104 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
105 Ack, desc="Received an ack message";
106 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
107
108 // Lock/Unlock
109 Persistent_GETX, desc="Another processor has priority to read/write";
110 Persistent_GETS, desc="Another processor has priority to read";
111 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
112 Own_Lock_or_Unlock, desc="This processor now has priority";
113 }
114
115 // TYPES
116
117 // CacheEntry
118 structure(Entry, desc="...", interface="AbstractCacheEntry") {
119 State CacheState, desc="cache state";
120 bool Dirty, desc="Is the data dirty (different than memory)?";
121 int Tokens, desc="The number of tokens we're holding for the line";
122 DataBlock DataBlk, desc="data for the block";
123 }
124
125 structure(DirEntry, desc="...") {
126 Set Sharers, desc="Set of the internal processors that want the block in shared state";
127 bool exclusive, default="false", desc="if local exclusive is likely";
128 }
129
130 external_type(PerfectCacheMemory) {
131 void allocate(Address);
132 void deallocate(Address);
133 DirEntry lookup(Address);
134 bool isTagPresent(Address);
135 }
136
137 external_type(PersistentTable) {
138 void persistentRequestLock(Address, MachineID, AccessType);
139 void persistentRequestUnlock(Address, MachineID);
140 MachineID findSmallest(Address);
141 AccessType typeOfSmallest(Address);
142 void markEntries(Address);
143 bool isLocked(Address);
144 int countStarvingForAddress(Address);
145 int countReadStarvingForAddress(Address);
146 }
147
148 PersistentTable persistentTable;
149 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
150
151 void set_cache_entry(AbstractCacheEntry b);
152 void unset_cache_entry();
153
154 Entry getCacheEntry(Address address), return_by_pointer="yes" {
155 Entry cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
156 return cache_entry;
157 }
158
159 int getTokens(Entry cache_entry) {
160 if (is_valid(cache_entry)) {
161 return cache_entry.Tokens;
162 } else {
163 return 0;
164 }
165 }
166
167 State getState(Entry cache_entry, Address addr) {
168 if (is_valid(cache_entry)) {
169 return cache_entry.CacheState;
170 } else if (persistentTable.isLocked(addr) == true) {
171 return State:I_L;
172 } else {
173 return State:NP;
174 }
175 }
176
177 void setState(Entry cache_entry, Address addr, State state) {
178
179 if (is_valid(cache_entry)) {
180 // Make sure the token count is in range
181 assert(cache_entry.Tokens >= 0);
182 assert(cache_entry.Tokens <= max_tokens());
183 assert(cache_entry.Tokens != (max_tokens() / 2));
184
185 // Make sure we have no tokens in L
186 if ((state == State:I_L) ) {
187 assert(cache_entry.Tokens == 0);
188 }
189
190 // in M and E you have all the tokens
191 if (state == State:M ) {
192 assert(cache_entry.Tokens == max_tokens());
193 }
194
195 // in NP you have no tokens
196 if (state == State:NP) {
197 assert(cache_entry.Tokens == 0);
198 }
199
200 // You have at least one token in S-like states
201 if (state == State:S ) {
202 assert(cache_entry.Tokens > 0);
203 }
204
205 // You have at least half the token in O-like states
206 if (state == State:O ) {
207 assert(cache_entry.Tokens > (max_tokens() / 2));
208 }
209
210 cache_entry.CacheState := state;
211 }
212 }
213
214 void removeSharer(Address addr, NodeID id) {
215
216 if (localDirectory.isTagPresent(addr)) {
217 localDirectory[addr].Sharers.remove(id);
218 if (localDirectory[addr].Sharers.count() == 0) {
219 localDirectory.deallocate(addr);
220 }
221 }
222 }
223
224 bool sharersExist(Address addr) {
225 if (localDirectory.isTagPresent(addr)) {
226 if (localDirectory[addr].Sharers.count() > 0) {
227 return true;
228 }
229 else {
230 return false;
231 }
232 }
233 else {
234 return false;
235 }
236 }
237
238 bool exclusiveExists(Address addr) {
239 if (localDirectory.isTagPresent(addr)) {
240 if (localDirectory[addr].exclusive == true) {
241 return true;
242 }
243 else {
244 return false;
245 }
246 }
247 else {
248 return false;
249 }
250 }
251
252 // assumes that caller will check to make sure tag is present
253 Set getSharers(Address addr) {
254 return localDirectory[addr].Sharers;
255 }
256
257 void setNewWriter(Address addr, NodeID id) {
258 if (localDirectory.isTagPresent(addr) == false) {
259 localDirectory.allocate(addr);
260 }
261 localDirectory[addr].Sharers.clear();
262 localDirectory[addr].Sharers.add(id);
263 localDirectory[addr].exclusive := true;
264 }
265
266 void addNewSharer(Address addr, NodeID id) {
267 if (localDirectory.isTagPresent(addr) == false) {
268 localDirectory.allocate(addr);
269 }
270 localDirectory[addr].Sharers.add(id);
271 // localDirectory[addr].exclusive := false;
272 }
273
274 void clearExclusiveBitIfExists(Address addr) {
275 if (localDirectory.isTagPresent(addr) == true) {
276 localDirectory[addr].exclusive := false;
277 }
278 }
279
280 GenericRequestType convertToGenericType(CoherenceRequestType type) {
281 if(type == CoherenceRequestType:GETS) {
282 return GenericRequestType:GETS;
283 } else if(type == CoherenceRequestType:GETX) {
284 return GenericRequestType:GETX;
285 } else {
286 DPRINTF(RubySlicc, "%s\n", type);
287 error("invalid CoherenceRequestType");
288 }
289 }
290
291 // ** OUT_PORTS **
292 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
293 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
294 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
295
296
297
298 // ** IN_PORTS **
299
300 // Persistent Network
301 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
302 if (persistentNetwork_in.isReady()) {
303 peek(persistentNetwork_in, PersistentMsg) {
304 assert(in_msg.Destination.isElement(machineID));
305
306 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
307 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
308 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
309 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
310 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
311 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
312 } else {
313 error("Unexpected message");
314 }
315
316 Entry cache_entry := getCacheEntry(in_msg.Address);
317 // React to the message based on the current state of the table
318 if (persistentTable.isLocked(in_msg.Address)) {
319
320 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
321 if (getTokens(cache_entry) == 1 ||
322 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
323 trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
324 cache_entry);
325 } else {
326 trigger(Event:Persistent_GETS, in_msg.Address, cache_entry);
327 }
328 } else {
329 trigger(Event:Persistent_GETX, in_msg.Address, cache_entry);
330 }
331 }
332 else {
333 trigger(Event:Own_Lock_or_Unlock, in_msg.Address, cache_entry);
334 }
335 }
336 }
337 }
338
339
340 // Request Network
341 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
342 if (requestNetwork_in.isReady()) {
343 peek(requestNetwork_in, RequestMsg) {
344 assert(in_msg.Destination.isElement(machineID));
345
346 Entry cache_entry := getCacheEntry(in_msg.Address);
347 if (in_msg.Type == CoherenceRequestType:GETX) {
348 trigger(Event:Transient_GETX, in_msg.Address, cache_entry);
349 } else if (in_msg.Type == CoherenceRequestType:GETS) {
350 if (getTokens(cache_entry) == 1) {
351 trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
352 cache_entry);
353 }
354 else {
355 trigger(Event:Transient_GETS, in_msg.Address, cache_entry);
356 }
357 } else {
358 error("Unexpected message");
359 }
360 }
361 }
362 }
363
364 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
365 if (L1requestNetwork_in.isReady()) {
366 peek(L1requestNetwork_in, RequestMsg) {
367 assert(in_msg.Destination.isElement(machineID));
368 Entry cache_entry := getCacheEntry(in_msg.Address);
369 if (in_msg.Type == CoherenceRequestType:GETX) {
370 trigger(Event:L1_GETX, in_msg.Address, cache_entry);
371 } else if (in_msg.Type == CoherenceRequestType:GETS) {
372 if (getTokens(cache_entry) == 1 ||
373 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
374 trigger(Event:L1_GETS_Last_Token, in_msg.Address, cache_entry);
375 }
376 else {
377 trigger(Event:L1_GETS, in_msg.Address, cache_entry);
378 }
379 } else {
380 error("Unexpected message");
381 }
382 }
383 }
384 }
385
386
387 // Response Network
388 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
389 if (responseNetwork_in.isReady()) {
390 peek(responseNetwork_in, ResponseMsg) {
391 assert(in_msg.Destination.isElement(machineID));
392 Entry cache_entry := getCacheEntry(in_msg.Address);
393
394 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
395 if (in_msg.Type == CoherenceResponseType:ACK) {
396 assert(in_msg.Tokens < (max_tokens() / 2));
397 trigger(Event:Ack, in_msg.Address, cache_entry);
398 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
399 trigger(Event:Data_Owner, in_msg.Address, cache_entry);
400 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
401 trigger(Event:Data_Shared, in_msg.Address, cache_entry);
402 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
403 in_msg.Type == CoherenceResponseType:WB_OWNED ||
404 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
405
406 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
407
408 // either room is available or the block is already present
409
410 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
411 assert(in_msg.Dirty == false);
412 trigger(Event:Writeback_Tokens, in_msg.Address, cache_entry);
413 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
414 assert(in_msg.Dirty == false);
415 trigger(Event:Writeback_Shared_Data, in_msg.Address, cache_entry);
416 }
417 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
418 //assert(in_msg.Dirty == false);
419 trigger(Event:Writeback_Owned, in_msg.Address, cache_entry);
420 }
421 }
422 else {
423 trigger(Event:L2_Replacement,
424 L2cacheMemory.cacheProbe(in_msg.Address),
425 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
426 }
427 } else if (in_msg.Type == CoherenceResponseType:INV) {
428 trigger(Event:L1_INV, in_msg.Address, cache_entry);
429 } else {
430 error("Unexpected message");
431 }
432 } else {
433 if (in_msg.Type == CoherenceResponseType:ACK) {
434 assert(in_msg.Tokens < (max_tokens() / 2));
435 trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry);
436 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
437 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
438 trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry);
439 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
440 in_msg.Type == CoherenceResponseType:WB_OWNED ||
441 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
442 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
443
444 // either room is available or the block is already present
445
446 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
447 assert(in_msg.Dirty == false);
448 assert( (getState(cache_entry, in_msg.Address) != State:NP)
449 && (getState(cache_entry, in_msg.Address) != State:I) );
450 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
451 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
452 assert(in_msg.Dirty == false);
453 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
454 }
455 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
456 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
457 }
458 }
459 else {
460 trigger(Event:L2_Replacement,
461 L2cacheMemory.cacheProbe(in_msg.Address),
462 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
463 }
464 } else if (in_msg.Type == CoherenceResponseType:INV) {
465 trigger(Event:L1_INV, in_msg.Address, cache_entry);
466 } else {
467 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
468 error("Unexpected message");
469 }
470 }
471 }
472 }
473 }
474
475
476 // ACTIONS
477
478 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
479
480 peek(L1requestNetwork_in, RequestMsg) {
481
482 // if this is a retry or no local sharers, broadcast normally
483
484 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
485 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
486 out_msg.Address := in_msg.Address;
487 out_msg.Type := in_msg.Type;
488 out_msg.Requestor := in_msg.Requestor;
489 out_msg.RetryNum := in_msg.RetryNum;
490
491 //
492 // If a statically shared L2 cache, then no other L2 caches can
493 // store the block
494 //
495 //out_msg.Destination.broadcast(MachineType:L2Cache);
496 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
497 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
498
499 out_msg.Destination.add(map_Address_to_Directory(address));
500 out_msg.MessageSize := MessageSizeType:Request_Control;
501 out_msg.AccessMode := in_msg.AccessMode;
502 out_msg.Prefetch := in_msg.Prefetch;
503 } //enqueue
504 // } // if
505
506 //profile_filter_action(0);
507 } // peek
508 } //action
509
510
511 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
512 peek(responseNetwork_in, ResponseMsg) {
513 // FIXME, should use a 3rd vnet
514 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
515 out_msg.Address := address;
516 out_msg.Type := in_msg.Type;
517 out_msg.Sender := machineID;
518 out_msg.Destination.add(map_Address_to_Directory(address));
519 out_msg.Tokens := in_msg.Tokens;
520 out_msg.MessageSize := in_msg.MessageSize;
521 out_msg.DataBlk := in_msg.DataBlk;
522 out_msg.Dirty := in_msg.Dirty;
523 }
524 }
525 }
526
527 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
528 assert(is_valid(cache_entry));
529 if (cache_entry.Tokens > 0) {
530 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
531 out_msg.Address := address;
532 out_msg.Type := CoherenceResponseType:ACK;
533 out_msg.Sender := machineID;
534 out_msg.Destination.add(map_Address_to_Directory(address));
535 out_msg.Tokens := cache_entry.Tokens;
536 out_msg.MessageSize := MessageSizeType:Writeback_Control;
537 }
538 cache_entry.Tokens := 0;
539 }
540 }
541
542 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
543 assert(is_valid(cache_entry));
544 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
545 out_msg.Address := address;
546 out_msg.Sender := machineID;
547 out_msg.Destination.add(map_Address_to_Directory(address));
548 out_msg.Tokens := cache_entry.Tokens;
549 out_msg.DataBlk := cache_entry.DataBlk;
550 out_msg.Dirty := cache_entry.Dirty;
551
552 if (cache_entry.Dirty) {
553 out_msg.MessageSize := MessageSizeType:Writeback_Data;
554 out_msg.Type := CoherenceResponseType:DATA_OWNER;
555 } else {
556 out_msg.MessageSize := MessageSizeType:Writeback_Control;
557 out_msg.Type := CoherenceResponseType:ACK_OWNER;
558 }
559 }
560 cache_entry.Tokens := 0;
561 }
562
563 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
564 peek(requestNetwork_in, RequestMsg) {
565 assert(is_valid(cache_entry));
566 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
567 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
568 out_msg.Address := address;
569 out_msg.Type := CoherenceResponseType:DATA_SHARED;
570 out_msg.Sender := machineID;
571 out_msg.Destination.add(in_msg.Requestor);
572 out_msg.Tokens := N_tokens;
573 out_msg.DataBlk := cache_entry.DataBlk;
574 out_msg.Dirty := false;
575 out_msg.MessageSize := MessageSizeType:Response_Data;
576 }
577 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
578 }
579 else {
580 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
581 out_msg.Address := address;
582 out_msg.Type := CoherenceResponseType:DATA_SHARED;
583 out_msg.Sender := machineID;
584 out_msg.Destination.add(in_msg.Requestor);
585 out_msg.Tokens := 1;
586 out_msg.DataBlk := cache_entry.DataBlk;
587 out_msg.Dirty := false;
588 out_msg.MessageSize := MessageSizeType:Response_Data;
589 }
590 cache_entry.Tokens := cache_entry.Tokens - 1;
591 }
592 }
593 }
594
595 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
596 assert(is_valid(cache_entry));
597 peek(requestNetwork_in, RequestMsg) {
598 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
599 out_msg.Address := address;
600 out_msg.Type := CoherenceResponseType:DATA_OWNER;
601 out_msg.Sender := machineID;
602 out_msg.Destination.add(in_msg.Requestor);
603 assert(cache_entry.Tokens >= 1);
604 out_msg.Tokens := cache_entry.Tokens;
605 out_msg.DataBlk := cache_entry.DataBlk;
606 out_msg.Dirty := cache_entry.Dirty;
607 out_msg.MessageSize := MessageSizeType:Response_Data;
608 }
609 }
610 cache_entry.Tokens := 0;
611 }
612
613 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
614 assert(is_valid(cache_entry));
615 if (cache_entry.Tokens > 0) {
616 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
617 out_msg.Address := address;
618 out_msg.Type := CoherenceResponseType:ACK;
619 out_msg.Sender := machineID;
620 out_msg.Destination.add(persistentTable.findSmallest(address));
621 assert(cache_entry.Tokens >= 1);
622 out_msg.Tokens := cache_entry.Tokens;
623 out_msg.MessageSize := MessageSizeType:Response_Control;
624 }
625 }
626 cache_entry.Tokens := 0;
627 }
628
629 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
630 assert(is_valid(cache_entry));
631 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
632 out_msg.Address := address;
633 out_msg.Type := CoherenceResponseType:DATA_OWNER;
634 out_msg.Sender := machineID;
635 out_msg.Destination.add(persistentTable.findSmallest(address));
636 assert(cache_entry.Tokens >= 1);
637 out_msg.Tokens := cache_entry.Tokens;
638 out_msg.DataBlk := cache_entry.DataBlk;
639 out_msg.Dirty := cache_entry.Dirty;
640 out_msg.MessageSize := MessageSizeType:Response_Data;
641 }
642 cache_entry.Tokens := 0;
643 }
644
645 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
646 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
647 assert(is_valid(cache_entry));
648 assert(cache_entry.Tokens > 0);
649 if (cache_entry.Tokens > 1) {
650 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
651 out_msg.Address := address;
652 out_msg.Type := CoherenceResponseType:ACK;
653 out_msg.Sender := machineID;
654 out_msg.Destination.add(persistentTable.findSmallest(address));
655 assert(cache_entry.Tokens >= 1);
656 out_msg.Tokens := cache_entry.Tokens - 1;
657 out_msg.MessageSize := MessageSizeType:Response_Control;
658 }
659 }
660 cache_entry.Tokens := 1;
661 }
662
663 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
664 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
665 assert(is_valid(cache_entry));
666 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
667 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
668 out_msg.Address := address;
669 out_msg.Type := CoherenceResponseType:DATA_OWNER;
670 out_msg.Sender := machineID;
671 out_msg.Destination.add(persistentTable.findSmallest(address));
672 out_msg.Tokens := cache_entry.Tokens - 1;
673 out_msg.DataBlk := cache_entry.DataBlk;
674 out_msg.Dirty := cache_entry.Dirty;
675 out_msg.MessageSize := MessageSizeType:Response_Data;
676 }
677 cache_entry.Tokens := 1;
678 }
679
680 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
681 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
682 assert(is_valid(cache_entry));
683 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
684 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
685 out_msg.Address := address;
686 out_msg.Type := CoherenceResponseType:DATA_OWNER;
687 out_msg.Sender := machineID;
688 out_msg.Destination.add(persistentTable.findSmallest(address));
689 out_msg.Tokens := cache_entry.Tokens;
690 out_msg.DataBlk := cache_entry.DataBlk;
691 out_msg.Dirty := cache_entry.Dirty;
692 out_msg.MessageSize := MessageSizeType:Response_Data;
693 }
694 cache_entry.Tokens := 0;
695 }
696
697
698
699 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
700 // assert(persistentTable.isLocked(address));
701 peek(responseNetwork_in, ResponseMsg) {
702 // FIXME, should use a 3rd vnet in some cases
703 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
704 out_msg.Address := address;
705 out_msg.Type := in_msg.Type;
706 out_msg.Sender := machineID;
707 out_msg.Destination.add(persistentTable.findSmallest(address));
708 out_msg.Tokens := in_msg.Tokens;
709 out_msg.DataBlk := in_msg.DataBlk;
710 out_msg.Dirty := in_msg.Dirty;
711 out_msg.MessageSize := in_msg.MessageSize;
712 }
713 }
714 }
715
716 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
717 //assert(persistentTable.isLocked(address));
718 peek(responseNetwork_in, ResponseMsg) {
719 // FIXME, should use a 3rd vnet in some cases
720 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
721 out_msg.Address := address;
722 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
723 out_msg.Type := CoherenceResponseType:DATA_SHARED;
724 } else {
725 assert(in_msg.Tokens < (max_tokens() / 2));
726 out_msg.Type := CoherenceResponseType:ACK;
727 }
728 out_msg.Sender := machineID;
729 out_msg.Destination.add(persistentTable.findSmallest(address));
730 out_msg.Tokens := in_msg.Tokens;
731 out_msg.DataBlk := in_msg.DataBlk;
732 out_msg.Dirty := in_msg.Dirty;
733 out_msg.MessageSize := in_msg.MessageSize;
734 }
735 }
736 }
737
738 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
739 // assert(persistentTable.isLocked(address));
740 peek(responseNetwork_in, ResponseMsg) {
741 // FIXME, should use a 3rd vnet in some cases
742 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
743 out_msg.Address := address;
744 out_msg.Type := CoherenceResponseType:DATA_OWNER;
745 out_msg.Sender := machineID;
746 out_msg.Destination.add(persistentTable.findSmallest(address));
747 out_msg.Tokens := in_msg.Tokens;
748 out_msg.DataBlk := in_msg.DataBlk;
749 out_msg.Dirty := in_msg.Dirty;
750 out_msg.MessageSize := in_msg.MessageSize;
751 }
752 }
753 }
754
755
756 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
757 peek(responseNetwork_in, ResponseMsg) {
758 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
759 }
760 }
761
762 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
763 peek(requestNetwork_in, RequestMsg) {
764 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
765 //profile_filter_action(1);
766 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
767 in_msg.RetryNum);
768 }
769 else {
770 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
771 out_msg.Address := in_msg.Address;
772 out_msg.Requestor := in_msg.Requestor;
773
774 //
775 // Currently assuming only one chip so all L1s are local
776 //
777 //out_msg.Destination := getLocalL1IDs(machineID);
778 out_msg.Destination.broadcast(MachineType:L1Cache);
779 out_msg.Destination.remove(in_msg.Requestor);
780
781 out_msg.Type := in_msg.Type;
782 out_msg.isLocal := false;
783 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
784 out_msg.AccessMode := in_msg.AccessMode;
785 out_msg.Prefetch := in_msg.Prefetch;
786 }
787 //profile_filter_action(0);
788 }
789 }
790 }
791
792 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
793 peek(L1requestNetwork_in, RequestMsg) {
794 assert(is_valid(cache_entry));
795 assert(cache_entry.Tokens > 0);
796 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
797 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
798 out_msg.Address := address;
799 out_msg.Type := CoherenceResponseType:DATA_SHARED;
800 out_msg.Sender := machineID;
801 out_msg.Destination.add(in_msg.Requestor);
802 out_msg.DataBlk := cache_entry.DataBlk;
803 out_msg.Dirty := false;
804 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
805 out_msg.Tokens := 1;
806 }
807 cache_entry.Tokens := cache_entry.Tokens - 1;
808 }
809 }
810
811 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
812 peek(L1requestNetwork_in, RequestMsg) {
813 assert(is_valid(cache_entry));
814 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
815 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
816 out_msg.Address := address;
817 out_msg.Type := CoherenceResponseType:DATA_OWNER;
818 out_msg.Sender := machineID;
819 out_msg.Destination.add(in_msg.Requestor);
820 out_msg.DataBlk := cache_entry.DataBlk;
821 out_msg.Dirty := cache_entry.Dirty;
822 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
823 out_msg.Tokens := cache_entry.Tokens;
824 }
825 cache_entry.Tokens := 0;
826 }
827 }
828
829 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
830 peek(L1requestNetwork_in, RequestMsg) {
831 assert(is_valid(cache_entry));
832 // assert(cache_entry.Tokens == max_tokens());
833 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
834 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
835 out_msg.Address := address;
836 out_msg.Type := CoherenceResponseType:DATA_OWNER;
837 out_msg.Sender := machineID;
838 out_msg.Destination.add(in_msg.Requestor);
839 out_msg.DataBlk := cache_entry.DataBlk;
840 out_msg.Dirty := cache_entry.Dirty;
841 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
842 //out_msg.Tokens := max_tokens();
843 out_msg.Tokens := cache_entry.Tokens;
844 }
845 cache_entry.Tokens := 0;
846 }
847 }
848
849 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
850 persistentNetwork_in.dequeue();
851 }
852
853 action(m_popRequestQueue, "m", desc="Pop request queue.") {
854 requestNetwork_in.dequeue();
855 }
856
857 action(n_popResponseQueue, "n", desc="Pop response queue") {
858 responseNetwork_in.dequeue();
859 }
860
861 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
862 L1requestNetwork_in.dequeue();
863 }
864
865
866 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
867 peek(responseNetwork_in, ResponseMsg) {
868 assert(is_valid(cache_entry));
869 assert(in_msg.Tokens != 0);
870 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
871
872 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
873 // may not trigger this action.
874 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
875 cache_entry.Dirty := true;
876 }
877 }
878 }
879
880 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
881 peek(L1requestNetwork_in, RequestMsg) {
882 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
883 if (in_msg.Type == CoherenceRequestType:GETX) {
884 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
885 } else if (in_msg.Type == CoherenceRequestType:GETS) {
886 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
887 }
888 }
889 }
890 }
891
892 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
893 clearExclusiveBitIfExists(address);
894 }
895
896 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
897 peek(L1requestNetwork_in, RequestMsg) {
898 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
899 (is_valid(cache_entry))) {
900 L2cacheMemory.setMRU(address);
901 }
902 }
903 }
904
905 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
906 assert(is_valid(cache_entry));
907 if (cache_entry.Tokens > 0) {
908 peek(requestNetwork_in, RequestMsg) {
909 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
910 out_msg.Address := address;
911 out_msg.Type := CoherenceResponseType:ACK;
912 out_msg.Sender := machineID;
913 out_msg.Destination.add(in_msg.Requestor);
914 assert(cache_entry.Tokens >= 1);
915 out_msg.Tokens := cache_entry.Tokens;
916 out_msg.MessageSize := MessageSizeType:Response_Control;
917 }
918 }
919 }
920 cache_entry.Tokens := 0;
921 }
922
923 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
924 assert(is_valid(cache_entry));
925 if (cache_entry.Tokens > 0) {
926 peek(L1requestNetwork_in, RequestMsg) {
927 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
928 out_msg.Address := address;
929 out_msg.Type := CoherenceResponseType:ACK;
930 out_msg.Sender := machineID;
931 out_msg.Destination.add(in_msg.Requestor);
932 assert(cache_entry.Tokens >= 1);
933 out_msg.Tokens := cache_entry.Tokens;
934 out_msg.MessageSize := MessageSizeType:Response_Control;
935 }
936 }
937 }
938 cache_entry.Tokens := 0;
939 }
940
941 action(u_writeDataToCache, "u", desc="Write data to cache") {
942 peek(responseNetwork_in, ResponseMsg) {
943 assert(is_valid(cache_entry));
944 cache_entry.DataBlk := in_msg.DataBlk;
945 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
946 cache_entry.Dirty := in_msg.Dirty;
947 }
948 }
949 }
950
951 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
952 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
953 }
954
955 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
956 L2cacheMemory.deallocate(address);
957 unset_cache_entry();
958 }
959
960 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
961 peek(L1requestNetwork_in, RequestMsg) {
962 L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
963 in_msg.AccessMode,
964 in_msg.Prefetch);
965 }
966 }
967
968
969 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
970 peek(responseNetwork_in, ResponseMsg) {
971 if (in_msg.Type != CoherenceResponseType:ACK &&
972 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
973 assert(is_valid(cache_entry));
974 assert(cache_entry.DataBlk == in_msg.DataBlk);
975 }
976 }
977 }
978
979
980 //*****************************************************
981 // TRANSITIONS
982 //*****************************************************
983
984 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
985
986 h_updateFilterFromL1HintOrWB;
987 n_popResponseQueue;
988 }
989
990 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
991 l_popPersistentQueue;
992 }
993
994
995 // Transitions from NP
996
997 transition(NP, {Transient_GETX, Transient_GETS}) {
998 // forward message to local sharers
999 r_clearExclusive;
1000 j_forwardTransientRequestToLocalSharers;
1001 m_popRequestQueue;
1002 }
1003
1004
1005 transition(NP, {L1_GETS, L1_GETX}) {
1006 a_broadcastLocalRequest;
1007 r_markNewSharer;
1008 uu_profileMiss;
1009 o_popL1RequestQueue;
1010 }
1011
1012 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1013 bb_bounceResponse;
1014 n_popResponseQueue;
1015 }
1016
1017 transition(NP, Writeback_Shared_Data, S) {
1018 vv_allocateL2CacheBlock;
1019 u_writeDataToCache;
1020 q_updateTokensFromResponse;
1021 h_updateFilterFromL1HintOrWB;
1022 n_popResponseQueue;
1023 }
1024
1025 transition(NP, Writeback_Tokens, I) {
1026 vv_allocateL2CacheBlock;
1027 q_updateTokensFromResponse;
1028 h_updateFilterFromL1HintOrWB;
1029 n_popResponseQueue;
1030 }
1031
1032 transition(NP, Writeback_All_Tokens, M) {
1033 vv_allocateL2CacheBlock;
1034 u_writeDataToCache;
1035 q_updateTokensFromResponse;
1036 h_updateFilterFromL1HintOrWB;
1037 n_popResponseQueue;
1038 }
1039
1040 transition(NP, Writeback_Owned, O) {
1041 vv_allocateL2CacheBlock;
1042 u_writeDataToCache;
1043 q_updateTokensFromResponse;
1044 h_updateFilterFromL1HintOrWB;
1045 n_popResponseQueue;
1046 }
1047
1048
1049 transition(NP,
1050 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1051 I_L) {
1052 l_popPersistentQueue;
1053 }
1054
1055 // Transitions from Idle
1056
1057 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1058 a_broadcastLocalRequest;
1059 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1060 r_markNewSharer;
1061 uu_profileMiss;
1062 o_popL1RequestQueue;
1063 }
1064
1065 transition(I, L1_GETX) {
1066 a_broadcastLocalRequest;
1067 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1068 r_markNewSharer;
1069 uu_profileMiss;
1070 o_popL1RequestQueue;
1071 }
1072
1073 transition(I, L2_Replacement) {
1074 c_cleanReplacement; // Only needed in some cases
1075 rr_deallocateL2CacheBlock;
1076 }
1077
1078 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1079 r_clearExclusive;
1080 t_sendAckWithCollectedTokens;
1081 j_forwardTransientRequestToLocalSharers;
1082 m_popRequestQueue;
1083 }
1084
1085 transition(I,
1086 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1087 I_L) {
1088 e_sendAckWithCollectedTokens;
1089 l_popPersistentQueue;
1090 }
1091
1092
1093 transition(I, Ack) {
1094 q_updateTokensFromResponse;
1095 n_popResponseQueue;
1096 }
1097
1098 transition(I, Data_Shared, S) {
1099 u_writeDataToCache;
1100 q_updateTokensFromResponse;
1101 n_popResponseQueue;
1102 }
1103
1104 transition(I, Writeback_Shared_Data, S) {
1105 u_writeDataToCache;
1106 q_updateTokensFromResponse;
1107 h_updateFilterFromL1HintOrWB;
1108 n_popResponseQueue;
1109 }
1110
1111 transition(I, Writeback_Tokens) {
1112 q_updateTokensFromResponse;
1113 h_updateFilterFromL1HintOrWB;
1114 n_popResponseQueue;
1115 }
1116
1117 transition(I, Data_Owner, O) {
1118 u_writeDataToCache;
1119 q_updateTokensFromResponse;
1120 n_popResponseQueue;
1121 }
1122
1123 transition(I, Writeback_Owned, O) {
1124 u_writeDataToCache;
1125 q_updateTokensFromResponse;
1126 h_updateFilterFromL1HintOrWB;
1127 n_popResponseQueue;
1128 }
1129
1130 transition(I, Data_All_Tokens, M) {
1131 u_writeDataToCache;
1132 q_updateTokensFromResponse;
1133 n_popResponseQueue;
1134 }
1135
1136
1137 transition(I, Writeback_All_Tokens, M) {
1138 u_writeDataToCache;
1139 q_updateTokensFromResponse;
1140 h_updateFilterFromL1HintOrWB;
1141 n_popResponseQueue;
1142 }
1143
1144 // Transitions from Shared
1145
1146 transition(S, L2_Replacement, I) {
1147 c_cleanReplacement;
1148 rr_deallocateL2CacheBlock;
1149 }
1150
1151 transition(S, Transient_GETX, I) {
1152 r_clearExclusive;
1153 t_sendAckWithCollectedTokens;
1154 j_forwardTransientRequestToLocalSharers;
1155 m_popRequestQueue;
1156 }
1157
1158 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1159 j_forwardTransientRequestToLocalSharers;
1160 r_clearExclusive;
1161 m_popRequestQueue;
1162 }
1163
1164 transition(S, Persistent_GETX, I_L) {
1165 e_sendAckWithCollectedTokens;
1166 l_popPersistentQueue;
1167 }
1168
1169
1170 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1171 f_sendAckWithAllButOneTokens;
1172 l_popPersistentQueue;
1173 }
1174
1175
1176 transition(S, Ack) {
1177 q_updateTokensFromResponse;
1178 n_popResponseQueue;
1179 }
1180
1181 transition(S, Data_Shared) {
1182 w_assertIncomingDataAndCacheDataMatch;
1183 q_updateTokensFromResponse;
1184 n_popResponseQueue;
1185 }
1186
1187 transition(S, Writeback_Tokens) {
1188 q_updateTokensFromResponse;
1189 h_updateFilterFromL1HintOrWB;
1190 n_popResponseQueue;
1191 }
1192
1193 transition(S, Writeback_Shared_Data) {
1194 w_assertIncomingDataAndCacheDataMatch;
1195 q_updateTokensFromResponse;
1196 h_updateFilterFromL1HintOrWB;
1197 n_popResponseQueue;
1198 }
1199
1200
1201 transition(S, Data_Owner, O) {
1202 w_assertIncomingDataAndCacheDataMatch;
1203 q_updateTokensFromResponse;
1204 n_popResponseQueue;
1205 }
1206
1207 transition(S, Writeback_Owned, O) {
1208 w_assertIncomingDataAndCacheDataMatch;
1209 q_updateTokensFromResponse;
1210 h_updateFilterFromL1HintOrWB;
1211 n_popResponseQueue;
1212 }
1213
1214 transition(S, Data_All_Tokens, M) {
1215 w_assertIncomingDataAndCacheDataMatch;
1216 q_updateTokensFromResponse;
1217 n_popResponseQueue;
1218 }
1219
1220 transition(S, Writeback_All_Tokens, M) {
1221 w_assertIncomingDataAndCacheDataMatch;
1222 q_updateTokensFromResponse;
1223 h_updateFilterFromL1HintOrWB;
1224 n_popResponseQueue;
1225 }
1226
1227 transition(S, L1_GETX, I) {
1228 a_broadcastLocalRequest;
1229 tt_sendLocalAckWithCollectedTokens;
1230 r_markNewSharer;
1231 r_setMRU;
1232 uu_profileMiss;
1233 o_popL1RequestQueue;
1234 }
1235
1236
1237 transition(S, L1_GETS) {
1238 k_dataFromL2CacheToL1Requestor;
1239 r_markNewSharer;
1240 r_setMRU;
1241 o_popL1RequestQueue;
1242 }
1243
1244 transition(S, L1_GETS_Last_Token, I) {
1245
1246 k_dataFromL2CacheToL1Requestor;
1247 r_markNewSharer;
1248 r_setMRU;
1249 o_popL1RequestQueue;
1250 }
1251
1252 // Transitions from Owned
1253
1254 transition(O, L2_Replacement, I) {
1255 cc_dirtyReplacement;
1256 rr_deallocateL2CacheBlock;
1257 }
1258
1259 transition(O, Transient_GETX, I) {
1260 r_clearExclusive;
1261 dd_sendDataWithAllTokens;
1262 j_forwardTransientRequestToLocalSharers;
1263 m_popRequestQueue;
1264 }
1265
1266 transition(O, Persistent_GETX, I_L) {
1267 ee_sendDataWithAllTokens;
1268 l_popPersistentQueue;
1269 }
1270
1271 transition(O, Persistent_GETS, S_L) {
1272 ff_sendDataWithAllButOneTokens;
1273 l_popPersistentQueue;
1274 }
1275
1276 transition(O, Persistent_GETS_Last_Token, I_L) {
1277 fa_sendDataWithAllTokens;
1278 l_popPersistentQueue;
1279 }
1280
1281 transition(O, Transient_GETS) {
1282 // send multiple tokens
1283 r_clearExclusive;
1284 d_sendDataWithTokens;
1285 m_popRequestQueue;
1286 }
1287
1288 transition(O, Transient_GETS_Last_Token) {
1289 // WAIT FOR IT TO GO PERSISTENT
1290 r_clearExclusive;
1291 m_popRequestQueue;
1292 }
1293
1294 transition(O, Ack) {
1295 q_updateTokensFromResponse;
1296 n_popResponseQueue;
1297 }
1298
1299 transition(O, Ack_All_Tokens, M) {
1300 q_updateTokensFromResponse;
1301 n_popResponseQueue;
1302 }
1303
1304 transition(O, Data_Shared) {
1305 w_assertIncomingDataAndCacheDataMatch;
1306 q_updateTokensFromResponse;
1307 n_popResponseQueue;
1308 }
1309
1310
1311 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1312 w_assertIncomingDataAndCacheDataMatch;
1313 q_updateTokensFromResponse;
1314 h_updateFilterFromL1HintOrWB;
1315 n_popResponseQueue;
1316 }
1317
1318 transition(O, Data_All_Tokens, M) {
1319 w_assertIncomingDataAndCacheDataMatch;
1320 q_updateTokensFromResponse;
1321 n_popResponseQueue;
1322 }
1323
1324 transition(O, Writeback_All_Tokens, M) {
1325 w_assertIncomingDataAndCacheDataMatch;
1326 q_updateTokensFromResponse;
1327 h_updateFilterFromL1HintOrWB;
1328 n_popResponseQueue;
1329 }
1330
1331 transition(O, L1_GETS) {
1332 k_dataFromL2CacheToL1Requestor;
1333 r_markNewSharer;
1334 r_setMRU;
1335 o_popL1RequestQueue;
1336 }
1337
1338 transition(O, L1_GETS_Last_Token, I) {
1339 k_dataOwnerFromL2CacheToL1Requestor;
1340 r_markNewSharer;
1341 r_setMRU;
1342 o_popL1RequestQueue;
1343 }
1344
1345 transition(O, L1_GETX, I) {
1346 a_broadcastLocalRequest;
1347 k_dataAndAllTokensFromL2CacheToL1Requestor;
1348 r_markNewSharer;
1349 r_setMRU;
1350 uu_profileMiss;
1351 o_popL1RequestQueue;
1352 }
1353
1354 // Transitions from M
1355
1356 transition(M, L2_Replacement, I) {
1357 cc_dirtyReplacement;
1358 rr_deallocateL2CacheBlock;
1359 }
1360
1361 // MRM_DEBUG: Give up all tokens even for GETS? ???
1362 transition(M, {Transient_GETX, Transient_GETS}, I) {
1363 r_clearExclusive;
1364 dd_sendDataWithAllTokens;
1365 m_popRequestQueue;
1366 }
1367
1368 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1369 ee_sendDataWithAllTokens;
1370 l_popPersistentQueue;
1371 }
1372
1373
1374 transition(M, L1_GETS, O) {
1375 k_dataFromL2CacheToL1Requestor;
1376 r_markNewSharer;
1377 r_setMRU;
1378 o_popL1RequestQueue;
1379 }
1380
1381 transition(M, L1_GETX, I) {
1382 k_dataAndAllTokensFromL2CacheToL1Requestor;
1383 r_markNewSharer;
1384 r_setMRU;
1385 o_popL1RequestQueue;
1386 }
1387
1388
1389 //Transitions from locked states
1390
1391 transition({I_L, S_L}, Ack) {
1392 gg_bounceResponseToStarver;
1393 n_popResponseQueue;
1394 }
1395
1396 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1397 gg_bounceResponseToStarver;
1398 n_popResponseQueue;
1399 }
1400
1401 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1402 gg_bounceWBSharedToStarver;
1403 h_updateFilterFromL1HintOrWB;
1404 n_popResponseQueue;
1405 }
1406
1407 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1408 gg_bounceWBOwnedToStarver;
1409 h_updateFilterFromL1HintOrWB;
1410 n_popResponseQueue;
1411 }
1412
1413 transition(S_L, L2_Replacement, I) {
1414 c_cleanReplacement;
1415 rr_deallocateL2CacheBlock;
1416 }
1417
1418 transition(I_L, L2_Replacement, I) {
1419 rr_deallocateL2CacheBlock;
1420 }
1421
1422 transition(I_L, Own_Lock_or_Unlock, I) {
1423 l_popPersistentQueue;
1424 }
1425
1426 transition(S_L, Own_Lock_or_Unlock, S) {
1427 l_popPersistentQueue;
1428 }
1429
1430 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1431 r_clearExclusive;
1432 m_popRequestQueue;
1433 }
1434
1435 transition(I_L, {L1_GETX, L1_GETS}) {
1436 a_broadcastLocalRequest;
1437 r_markNewSharer;
1438 uu_profileMiss;
1439 o_popL1RequestQueue;
1440 }
1441
1442 transition(S_L, L1_GETX, I_L) {
1443 a_broadcastLocalRequest;
1444 tt_sendLocalAckWithCollectedTokens;
1445 r_markNewSharer;
1446 r_setMRU;
1447 uu_profileMiss;
1448 o_popL1RequestQueue;
1449 }
1450
1451 transition(S_L, L1_GETS) {
1452 k_dataFromL2CacheToL1Requestor;
1453 r_markNewSharer;
1454 r_setMRU;
1455 o_popL1RequestQueue;
1456 }
1457
1458 transition(S_L, L1_GETS_Last_Token, I_L) {
1459 k_dataFromL2CacheToL1Requestor;
1460 r_markNewSharer;
1461 r_setMRU;
1462 o_popL1RequestQueue;
1463 }
1464
1465 transition(S_L, Persistent_GETX, I_L) {
1466 e_sendAckWithCollectedTokens;
1467 l_popPersistentQueue;
1468 }
1469
1470 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1471 l_popPersistentQueue;
1472 }
1473
1474 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1475 l_popPersistentQueue;
1476 }
1477 }