ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "Token protocol")
36 : CacheMemory * L2cacheMemory,
37 int N_tokens,
38 int l2_request_latency = 5,
39 int l2_response_latency = 5,
40 bool filtering_enabled = true
41 {
42
43 // L2 BANK QUEUES
44 // From local bank of L2 cache TO the network
45
46 // this L2 bank -> a local L1 || mod-directory
47 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false";
48 // this L2 bank -> mod-directory
49 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false";
50 // this L2 bank -> a local L1
51 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false";
52
53
54 // FROM the network to this local bank of L2 cache
55
56 // a local L1 || mod-directory -> this L2 bank
57 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false";
58 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
59 // mod-directory -> this L2 bank
60 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false";
61 // a local L1 -> this L2 bank
62 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false";
63
64 // STATES
65 enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
66 // Base states
67 NP, desc="Not Present";
68 I, desc="Idle";
69 S, desc="Shared, not present in any local L1s";
70 O, desc="Owned, not present in any L1s";
71 M, desc="Modified, not present in any L1s";
72
73 // Locked states
74 I_L, "I^L", desc="Invalid, Locked";
75 S_L, "S^L", desc="Shared, Locked";
76 }
77
78 // EVENTS
79 enumeration(Event, desc="Cache events") {
80
81 // Requests
82 L1_GETS, desc="local L1 GETS request";
83 L1_GETS_Last_Token, desc="local L1 GETS request";
84 L1_GETX, desc="local L1 GETX request";
85 L1_INV, desc="L1 no longer has tokens";
86 Transient_GETX, desc="A GetX from another processor";
87 Transient_GETS, desc="A GetS from another processor";
88 Transient_GETS_Last_Token, desc="A GetS from another processor";
89
90 // events initiated by this L2
91 L2_Replacement, desc="L2 Replacement", format="!r";
92
93 // events of external L2 responses
94
95 // Responses
96 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
97 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
98 Writeback_All_Tokens, desc="Received a writeback from L1";
99 Writeback_Owned, desc="Received a writeback from L1";
100
101
102 Data_Shared, desc="Received a data message, we are now a sharer";
103 Data_Owner, desc="Received a data message, we are now the owner";
104 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
105 Ack, desc="Received an ack message";
106 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
107
108 // Lock/Unlock
109 Persistent_GETX, desc="Another processor has priority to read/write";
110 Persistent_GETS, desc="Another processor has priority to read";
111 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
112 Own_Lock_or_Unlock, desc="This processor now has priority";
113 }
114
115 // TYPES
116
117 // CacheEntry
118 structure(Entry, desc="...", interface="AbstractCacheEntry") {
119 State CacheState, desc="cache state";
120 bool Dirty, desc="Is the data dirty (different than memory)?";
121 int Tokens, desc="The number of tokens we're holding for the line";
122 DataBlock DataBlk, desc="data for the block";
123 }
124
125 structure(DirEntry, desc="...") {
126 Set Sharers, desc="Set of the internal processors that want the block in shared state";
127 bool exclusive, default="false", desc="if local exclusive is likely";
128 }
129
130 external_type(PerfectCacheMemory) {
131 void allocate(Address);
132 void deallocate(Address);
133 DirEntry lookup(Address);
134 bool isTagPresent(Address);
135 }
136
137 external_type(PersistentTable) {
138 void persistentRequestLock(Address, MachineID, AccessType);
139 void persistentRequestUnlock(Address, MachineID);
140 MachineID findSmallest(Address);
141 AccessType typeOfSmallest(Address);
142 void markEntries(Address);
143 bool isLocked(Address);
144 int countStarvingForAddress(Address);
145 int countReadStarvingForAddress(Address);
146 }
147
148 PersistentTable persistentTable;
149 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
150
151 void set_cache_entry(AbstractCacheEntry b);
152 void unset_cache_entry();
153
154 Entry getCacheEntry(Address address), return_by_pointer="yes" {
155 Entry cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
156 return cache_entry;
157 }
158
159 int getTokens(Entry cache_entry) {
160 if (is_valid(cache_entry)) {
161 return cache_entry.Tokens;
162 } else {
163 return 0;
164 }
165 }
166
167 State getState(Entry cache_entry, Address addr) {
168 if (is_valid(cache_entry)) {
169 return cache_entry.CacheState;
170 } else if (persistentTable.isLocked(addr) == true) {
171 return State:I_L;
172 } else {
173 return State:NP;
174 }
175 }
176
177 void setState(Entry cache_entry, Address addr, State state) {
178
179 if (is_valid(cache_entry)) {
180 // Make sure the token count is in range
181 assert(cache_entry.Tokens >= 0);
182 assert(cache_entry.Tokens <= max_tokens());
183 assert(cache_entry.Tokens != (max_tokens() / 2));
184
185 // Make sure we have no tokens in L
186 if ((state == State:I_L) ) {
187 assert(cache_entry.Tokens == 0);
188 }
189
190 // in M and E you have all the tokens
191 if (state == State:M ) {
192 assert(cache_entry.Tokens == max_tokens());
193 }
194
195 // in NP you have no tokens
196 if (state == State:NP) {
197 assert(cache_entry.Tokens == 0);
198 }
199
200 // You have at least one token in S-like states
201 if (state == State:S ) {
202 assert(cache_entry.Tokens > 0);
203 }
204
205 // You have at least half the token in O-like states
206 if (state == State:O ) {
207 assert(cache_entry.Tokens > (max_tokens() / 2));
208 }
209
210 cache_entry.CacheState := state;
211
212 // Set permission
213 if (state == State:I) {
214 cache_entry.changePermission(AccessPermission:Invalid);
215 } else if (state == State:S || state == State:O ) {
216 cache_entry.changePermission(AccessPermission:Read_Only);
217 } else if (state == State:M ) {
218 cache_entry.changePermission(AccessPermission:Read_Write);
219 } else {
220 cache_entry.changePermission(AccessPermission:Invalid);
221 }
222 }
223 }
224
225 void removeSharer(Address addr, NodeID id) {
226
227 if (localDirectory.isTagPresent(addr)) {
228 localDirectory[addr].Sharers.remove(id);
229 if (localDirectory[addr].Sharers.count() == 0) {
230 localDirectory.deallocate(addr);
231 }
232 }
233 }
234
235 bool sharersExist(Address addr) {
236 if (localDirectory.isTagPresent(addr)) {
237 if (localDirectory[addr].Sharers.count() > 0) {
238 return true;
239 }
240 else {
241 return false;
242 }
243 }
244 else {
245 return false;
246 }
247 }
248
249 bool exclusiveExists(Address addr) {
250 if (localDirectory.isTagPresent(addr)) {
251 if (localDirectory[addr].exclusive == true) {
252 return true;
253 }
254 else {
255 return false;
256 }
257 }
258 else {
259 return false;
260 }
261 }
262
263 // assumes that caller will check to make sure tag is present
264 Set getSharers(Address addr) {
265 return localDirectory[addr].Sharers;
266 }
267
268 void setNewWriter(Address addr, NodeID id) {
269 if (localDirectory.isTagPresent(addr) == false) {
270 localDirectory.allocate(addr);
271 }
272 localDirectory[addr].Sharers.clear();
273 localDirectory[addr].Sharers.add(id);
274 localDirectory[addr].exclusive := true;
275 }
276
277 void addNewSharer(Address addr, NodeID id) {
278 if (localDirectory.isTagPresent(addr) == false) {
279 localDirectory.allocate(addr);
280 }
281 localDirectory[addr].Sharers.add(id);
282 // localDirectory[addr].exclusive := false;
283 }
284
285 void clearExclusiveBitIfExists(Address addr) {
286 if (localDirectory.isTagPresent(addr) == true) {
287 localDirectory[addr].exclusive := false;
288 }
289 }
290
291 GenericRequestType convertToGenericType(CoherenceRequestType type) {
292 if(type == CoherenceRequestType:GETS) {
293 return GenericRequestType:GETS;
294 } else if(type == CoherenceRequestType:GETX) {
295 return GenericRequestType:GETX;
296 } else {
297 DPRINTF(RubySlicc, "%s\n", type);
298 error("invalid CoherenceRequestType");
299 }
300 }
301
302 // ** OUT_PORTS **
303 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
304 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
305 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
306
307
308
309 // ** IN_PORTS **
310
311 // Persistent Network
312 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
313 if (persistentNetwork_in.isReady()) {
314 peek(persistentNetwork_in, PersistentMsg) {
315 assert(in_msg.Destination.isElement(machineID));
316
317 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
318 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
319 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
320 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
321 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
322 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
323 } else {
324 error("Unexpected message");
325 }
326
327 Entry cache_entry := getCacheEntry(in_msg.Address);
328 // React to the message based on the current state of the table
329 if (persistentTable.isLocked(in_msg.Address)) {
330
331 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
332 if (getTokens(cache_entry) == 1 ||
333 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
334 trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
335 cache_entry);
336 } else {
337 trigger(Event:Persistent_GETS, in_msg.Address, cache_entry);
338 }
339 } else {
340 trigger(Event:Persistent_GETX, in_msg.Address, cache_entry);
341 }
342 }
343 else {
344 trigger(Event:Own_Lock_or_Unlock, in_msg.Address, cache_entry);
345 }
346 }
347 }
348 }
349
350
351 // Request Network
352 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
353 if (requestNetwork_in.isReady()) {
354 peek(requestNetwork_in, RequestMsg) {
355 assert(in_msg.Destination.isElement(machineID));
356
357 Entry cache_entry := getCacheEntry(in_msg.Address);
358 if (in_msg.Type == CoherenceRequestType:GETX) {
359 trigger(Event:Transient_GETX, in_msg.Address, cache_entry);
360 } else if (in_msg.Type == CoherenceRequestType:GETS) {
361 if (getTokens(cache_entry) == 1) {
362 trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
363 cache_entry);
364 }
365 else {
366 trigger(Event:Transient_GETS, in_msg.Address, cache_entry);
367 }
368 } else {
369 error("Unexpected message");
370 }
371 }
372 }
373 }
374
375 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
376 if (L1requestNetwork_in.isReady()) {
377 peek(L1requestNetwork_in, RequestMsg) {
378 assert(in_msg.Destination.isElement(machineID));
379 Entry cache_entry := getCacheEntry(in_msg.Address);
380 if (in_msg.Type == CoherenceRequestType:GETX) {
381 trigger(Event:L1_GETX, in_msg.Address, cache_entry);
382 } else if (in_msg.Type == CoherenceRequestType:GETS) {
383 if (getTokens(cache_entry) == 1 ||
384 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
385 trigger(Event:L1_GETS_Last_Token, in_msg.Address, cache_entry);
386 }
387 else {
388 trigger(Event:L1_GETS, in_msg.Address, cache_entry);
389 }
390 } else {
391 error("Unexpected message");
392 }
393 }
394 }
395 }
396
397
398 // Response Network
399 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
400 if (responseNetwork_in.isReady()) {
401 peek(responseNetwork_in, ResponseMsg) {
402 assert(in_msg.Destination.isElement(machineID));
403 Entry cache_entry := getCacheEntry(in_msg.Address);
404
405 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
406 if (in_msg.Type == CoherenceResponseType:ACK) {
407 assert(in_msg.Tokens < (max_tokens() / 2));
408 trigger(Event:Ack, in_msg.Address, cache_entry);
409 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
410 trigger(Event:Data_Owner, in_msg.Address, cache_entry);
411 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
412 trigger(Event:Data_Shared, in_msg.Address, cache_entry);
413 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
414 in_msg.Type == CoherenceResponseType:WB_OWNED ||
415 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
416
417 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
418
419 // either room is available or the block is already present
420
421 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
422 assert(in_msg.Dirty == false);
423 trigger(Event:Writeback_Tokens, in_msg.Address, cache_entry);
424 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
425 assert(in_msg.Dirty == false);
426 trigger(Event:Writeback_Shared_Data, in_msg.Address, cache_entry);
427 }
428 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
429 //assert(in_msg.Dirty == false);
430 trigger(Event:Writeback_Owned, in_msg.Address, cache_entry);
431 }
432 }
433 else {
434 trigger(Event:L2_Replacement,
435 L2cacheMemory.cacheProbe(in_msg.Address),
436 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
437 }
438 } else if (in_msg.Type == CoherenceResponseType:INV) {
439 trigger(Event:L1_INV, in_msg.Address, cache_entry);
440 } else {
441 error("Unexpected message");
442 }
443 } else {
444 if (in_msg.Type == CoherenceResponseType:ACK) {
445 assert(in_msg.Tokens < (max_tokens() / 2));
446 trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry);
447 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
448 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
449 trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry);
450 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
451 in_msg.Type == CoherenceResponseType:WB_OWNED ||
452 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
453 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
454
455 // either room is available or the block is already present
456
457 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
458 assert(in_msg.Dirty == false);
459 assert( (getState(cache_entry, in_msg.Address) != State:NP)
460 && (getState(cache_entry, in_msg.Address) != State:I) );
461 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
462 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
463 assert(in_msg.Dirty == false);
464 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
465 }
466 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
467 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
468 }
469 }
470 else {
471 trigger(Event:L2_Replacement,
472 L2cacheMemory.cacheProbe(in_msg.Address),
473 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
474 }
475 } else if (in_msg.Type == CoherenceResponseType:INV) {
476 trigger(Event:L1_INV, in_msg.Address, cache_entry);
477 } else {
478 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
479 error("Unexpected message");
480 }
481 }
482 }
483 }
484 }
485
486
487 // ACTIONS
488
489 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
490
491 peek(L1requestNetwork_in, RequestMsg) {
492
493 // if this is a retry or no local sharers, broadcast normally
494
495 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
496 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
497 out_msg.Address := in_msg.Address;
498 out_msg.Type := in_msg.Type;
499 out_msg.Requestor := in_msg.Requestor;
500 out_msg.RetryNum := in_msg.RetryNum;
501
502 //
503 // If a statically shared L2 cache, then no other L2 caches can
504 // store the block
505 //
506 //out_msg.Destination.broadcast(MachineType:L2Cache);
507 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
508 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
509
510 out_msg.Destination.add(map_Address_to_Directory(address));
511 out_msg.MessageSize := MessageSizeType:Request_Control;
512 out_msg.AccessMode := in_msg.AccessMode;
513 out_msg.Prefetch := in_msg.Prefetch;
514 } //enqueue
515 // } // if
516
517 //profile_filter_action(0);
518 } // peek
519 } //action
520
521
522 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
523 peek(responseNetwork_in, ResponseMsg) {
524 // FIXME, should use a 3rd vnet
525 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
526 out_msg.Address := address;
527 out_msg.Type := in_msg.Type;
528 out_msg.Sender := machineID;
529 out_msg.Destination.add(map_Address_to_Directory(address));
530 out_msg.Tokens := in_msg.Tokens;
531 out_msg.MessageSize := in_msg.MessageSize;
532 out_msg.DataBlk := in_msg.DataBlk;
533 out_msg.Dirty := in_msg.Dirty;
534 }
535 }
536 }
537
538 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
539 assert(is_valid(cache_entry));
540 if (cache_entry.Tokens > 0) {
541 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
542 out_msg.Address := address;
543 out_msg.Type := CoherenceResponseType:ACK;
544 out_msg.Sender := machineID;
545 out_msg.Destination.add(map_Address_to_Directory(address));
546 out_msg.Tokens := cache_entry.Tokens;
547 out_msg.MessageSize := MessageSizeType:Writeback_Control;
548 }
549 cache_entry.Tokens := 0;
550 }
551 }
552
553 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
554 assert(is_valid(cache_entry));
555 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
556 out_msg.Address := address;
557 out_msg.Sender := machineID;
558 out_msg.Destination.add(map_Address_to_Directory(address));
559 out_msg.Tokens := cache_entry.Tokens;
560 out_msg.DataBlk := cache_entry.DataBlk;
561 out_msg.Dirty := cache_entry.Dirty;
562
563 if (cache_entry.Dirty) {
564 out_msg.MessageSize := MessageSizeType:Writeback_Data;
565 out_msg.Type := CoherenceResponseType:DATA_OWNER;
566 } else {
567 out_msg.MessageSize := MessageSizeType:Writeback_Control;
568 out_msg.Type := CoherenceResponseType:ACK_OWNER;
569 }
570 }
571 cache_entry.Tokens := 0;
572 }
573
574 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
575 peek(requestNetwork_in, RequestMsg) {
576 assert(is_valid(cache_entry));
577 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
578 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
579 out_msg.Address := address;
580 out_msg.Type := CoherenceResponseType:DATA_SHARED;
581 out_msg.Sender := machineID;
582 out_msg.Destination.add(in_msg.Requestor);
583 out_msg.Tokens := N_tokens;
584 out_msg.DataBlk := cache_entry.DataBlk;
585 out_msg.Dirty := false;
586 out_msg.MessageSize := MessageSizeType:Response_Data;
587 }
588 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
589 }
590 else {
591 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
592 out_msg.Address := address;
593 out_msg.Type := CoherenceResponseType:DATA_SHARED;
594 out_msg.Sender := machineID;
595 out_msg.Destination.add(in_msg.Requestor);
596 out_msg.Tokens := 1;
597 out_msg.DataBlk := cache_entry.DataBlk;
598 out_msg.Dirty := false;
599 out_msg.MessageSize := MessageSizeType:Response_Data;
600 }
601 cache_entry.Tokens := cache_entry.Tokens - 1;
602 }
603 }
604 }
605
606 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
607 assert(is_valid(cache_entry));
608 peek(requestNetwork_in, RequestMsg) {
609 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
610 out_msg.Address := address;
611 out_msg.Type := CoherenceResponseType:DATA_OWNER;
612 out_msg.Sender := machineID;
613 out_msg.Destination.add(in_msg.Requestor);
614 assert(cache_entry.Tokens >= 1);
615 out_msg.Tokens := cache_entry.Tokens;
616 out_msg.DataBlk := cache_entry.DataBlk;
617 out_msg.Dirty := cache_entry.Dirty;
618 out_msg.MessageSize := MessageSizeType:Response_Data;
619 }
620 }
621 cache_entry.Tokens := 0;
622 }
623
624 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
625 assert(is_valid(cache_entry));
626 if (cache_entry.Tokens > 0) {
627 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
628 out_msg.Address := address;
629 out_msg.Type := CoherenceResponseType:ACK;
630 out_msg.Sender := machineID;
631 out_msg.Destination.add(persistentTable.findSmallest(address));
632 assert(cache_entry.Tokens >= 1);
633 out_msg.Tokens := cache_entry.Tokens;
634 out_msg.MessageSize := MessageSizeType:Response_Control;
635 }
636 }
637 cache_entry.Tokens := 0;
638 }
639
640 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
641 assert(is_valid(cache_entry));
642 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
643 out_msg.Address := address;
644 out_msg.Type := CoherenceResponseType:DATA_OWNER;
645 out_msg.Sender := machineID;
646 out_msg.Destination.add(persistentTable.findSmallest(address));
647 assert(cache_entry.Tokens >= 1);
648 out_msg.Tokens := cache_entry.Tokens;
649 out_msg.DataBlk := cache_entry.DataBlk;
650 out_msg.Dirty := cache_entry.Dirty;
651 out_msg.MessageSize := MessageSizeType:Response_Data;
652 }
653 cache_entry.Tokens := 0;
654 }
655
656 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
657 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
658 assert(is_valid(cache_entry));
659 assert(cache_entry.Tokens > 0);
660 if (cache_entry.Tokens > 1) {
661 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
662 out_msg.Address := address;
663 out_msg.Type := CoherenceResponseType:ACK;
664 out_msg.Sender := machineID;
665 out_msg.Destination.add(persistentTable.findSmallest(address));
666 assert(cache_entry.Tokens >= 1);
667 out_msg.Tokens := cache_entry.Tokens - 1;
668 out_msg.MessageSize := MessageSizeType:Response_Control;
669 }
670 }
671 cache_entry.Tokens := 1;
672 }
673
674 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
675 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
676 assert(is_valid(cache_entry));
677 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
678 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
679 out_msg.Address := address;
680 out_msg.Type := CoherenceResponseType:DATA_OWNER;
681 out_msg.Sender := machineID;
682 out_msg.Destination.add(persistentTable.findSmallest(address));
683 out_msg.Tokens := cache_entry.Tokens - 1;
684 out_msg.DataBlk := cache_entry.DataBlk;
685 out_msg.Dirty := cache_entry.Dirty;
686 out_msg.MessageSize := MessageSizeType:Response_Data;
687 }
688 cache_entry.Tokens := 1;
689 }
690
691 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
692 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
693 assert(is_valid(cache_entry));
694 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
695 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
696 out_msg.Address := address;
697 out_msg.Type := CoherenceResponseType:DATA_OWNER;
698 out_msg.Sender := machineID;
699 out_msg.Destination.add(persistentTable.findSmallest(address));
700 out_msg.Tokens := cache_entry.Tokens;
701 out_msg.DataBlk := cache_entry.DataBlk;
702 out_msg.Dirty := cache_entry.Dirty;
703 out_msg.MessageSize := MessageSizeType:Response_Data;
704 }
705 cache_entry.Tokens := 0;
706 }
707
708
709
710 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
711 // assert(persistentTable.isLocked(address));
712 peek(responseNetwork_in, ResponseMsg) {
713 // FIXME, should use a 3rd vnet in some cases
714 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
715 out_msg.Address := address;
716 out_msg.Type := in_msg.Type;
717 out_msg.Sender := machineID;
718 out_msg.Destination.add(persistentTable.findSmallest(address));
719 out_msg.Tokens := in_msg.Tokens;
720 out_msg.DataBlk := in_msg.DataBlk;
721 out_msg.Dirty := in_msg.Dirty;
722 out_msg.MessageSize := in_msg.MessageSize;
723 }
724 }
725 }
726
727 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
728 //assert(persistentTable.isLocked(address));
729 peek(responseNetwork_in, ResponseMsg) {
730 // FIXME, should use a 3rd vnet in some cases
731 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
732 out_msg.Address := address;
733 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
734 out_msg.Type := CoherenceResponseType:DATA_SHARED;
735 } else {
736 assert(in_msg.Tokens < (max_tokens() / 2));
737 out_msg.Type := CoherenceResponseType:ACK;
738 }
739 out_msg.Sender := machineID;
740 out_msg.Destination.add(persistentTable.findSmallest(address));
741 out_msg.Tokens := in_msg.Tokens;
742 out_msg.DataBlk := in_msg.DataBlk;
743 out_msg.Dirty := in_msg.Dirty;
744 out_msg.MessageSize := in_msg.MessageSize;
745 }
746 }
747 }
748
749 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
750 // assert(persistentTable.isLocked(address));
751 peek(responseNetwork_in, ResponseMsg) {
752 // FIXME, should use a 3rd vnet in some cases
753 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
754 out_msg.Address := address;
755 out_msg.Type := CoherenceResponseType:DATA_OWNER;
756 out_msg.Sender := machineID;
757 out_msg.Destination.add(persistentTable.findSmallest(address));
758 out_msg.Tokens := in_msg.Tokens;
759 out_msg.DataBlk := in_msg.DataBlk;
760 out_msg.Dirty := in_msg.Dirty;
761 out_msg.MessageSize := in_msg.MessageSize;
762 }
763 }
764 }
765
766
767 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
768 peek(responseNetwork_in, ResponseMsg) {
769 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
770 }
771 }
772
773 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
774 peek(requestNetwork_in, RequestMsg) {
775 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
776 //profile_filter_action(1);
777 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
778 in_msg.RetryNum);
779 }
780 else {
781 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
782 out_msg.Address := in_msg.Address;
783 out_msg.Requestor := in_msg.Requestor;
784
785 //
786 // Currently assuming only one chip so all L1s are local
787 //
788 //out_msg.Destination := getLocalL1IDs(machineID);
789 out_msg.Destination.broadcast(MachineType:L1Cache);
790 out_msg.Destination.remove(in_msg.Requestor);
791
792 out_msg.Type := in_msg.Type;
793 out_msg.isLocal := false;
794 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
795 out_msg.AccessMode := in_msg.AccessMode;
796 out_msg.Prefetch := in_msg.Prefetch;
797 }
798 //profile_filter_action(0);
799 }
800 }
801 }
802
803 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
804 peek(L1requestNetwork_in, RequestMsg) {
805 assert(is_valid(cache_entry));
806 assert(cache_entry.Tokens > 0);
807 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
808 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
809 out_msg.Address := address;
810 out_msg.Type := CoherenceResponseType:DATA_SHARED;
811 out_msg.Sender := machineID;
812 out_msg.Destination.add(in_msg.Requestor);
813 out_msg.DataBlk := cache_entry.DataBlk;
814 out_msg.Dirty := false;
815 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
816 out_msg.Tokens := 1;
817 }
818 cache_entry.Tokens := cache_entry.Tokens - 1;
819 }
820 }
821
822 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
823 peek(L1requestNetwork_in, RequestMsg) {
824 assert(is_valid(cache_entry));
825 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
826 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
827 out_msg.Address := address;
828 out_msg.Type := CoherenceResponseType:DATA_OWNER;
829 out_msg.Sender := machineID;
830 out_msg.Destination.add(in_msg.Requestor);
831 out_msg.DataBlk := cache_entry.DataBlk;
832 out_msg.Dirty := cache_entry.Dirty;
833 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
834 out_msg.Tokens := cache_entry.Tokens;
835 }
836 cache_entry.Tokens := 0;
837 }
838 }
839
840 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
841 peek(L1requestNetwork_in, RequestMsg) {
842 assert(is_valid(cache_entry));
843 // assert(cache_entry.Tokens == max_tokens());
844 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
845 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
846 out_msg.Address := address;
847 out_msg.Type := CoherenceResponseType:DATA_OWNER;
848 out_msg.Sender := machineID;
849 out_msg.Destination.add(in_msg.Requestor);
850 out_msg.DataBlk := cache_entry.DataBlk;
851 out_msg.Dirty := cache_entry.Dirty;
852 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
853 //out_msg.Tokens := max_tokens();
854 out_msg.Tokens := cache_entry.Tokens;
855 }
856 cache_entry.Tokens := 0;
857 }
858 }
859
860 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
861 persistentNetwork_in.dequeue();
862 }
863
864 action(m_popRequestQueue, "m", desc="Pop request queue.") {
865 requestNetwork_in.dequeue();
866 }
867
868 action(n_popResponseQueue, "n", desc="Pop response queue") {
869 responseNetwork_in.dequeue();
870 }
871
872 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
873 L1requestNetwork_in.dequeue();
874 }
875
876
877 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
878 peek(responseNetwork_in, ResponseMsg) {
879 assert(is_valid(cache_entry));
880 assert(in_msg.Tokens != 0);
881 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
882
883 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
884 // may not trigger this action.
885 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
886 cache_entry.Dirty := true;
887 }
888 }
889 }
890
891 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
892 peek(L1requestNetwork_in, RequestMsg) {
893 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
894 if (in_msg.Type == CoherenceRequestType:GETX) {
895 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
896 } else if (in_msg.Type == CoherenceRequestType:GETS) {
897 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
898 }
899 }
900 }
901 }
902
903 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
904 clearExclusiveBitIfExists(address);
905 }
906
907 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
908 peek(L1requestNetwork_in, RequestMsg) {
909 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
910 (is_valid(cache_entry))) {
911 L2cacheMemory.setMRU(address);
912 }
913 }
914 }
915
916 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
917 assert(is_valid(cache_entry));
918 if (cache_entry.Tokens > 0) {
919 peek(requestNetwork_in, RequestMsg) {
920 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
921 out_msg.Address := address;
922 out_msg.Type := CoherenceResponseType:ACK;
923 out_msg.Sender := machineID;
924 out_msg.Destination.add(in_msg.Requestor);
925 assert(cache_entry.Tokens >= 1);
926 out_msg.Tokens := cache_entry.Tokens;
927 out_msg.MessageSize := MessageSizeType:Response_Control;
928 }
929 }
930 }
931 cache_entry.Tokens := 0;
932 }
933
934 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
935 assert(is_valid(cache_entry));
936 if (cache_entry.Tokens > 0) {
937 peek(L1requestNetwork_in, RequestMsg) {
938 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
939 out_msg.Address := address;
940 out_msg.Type := CoherenceResponseType:ACK;
941 out_msg.Sender := machineID;
942 out_msg.Destination.add(in_msg.Requestor);
943 assert(cache_entry.Tokens >= 1);
944 out_msg.Tokens := cache_entry.Tokens;
945 out_msg.MessageSize := MessageSizeType:Response_Control;
946 }
947 }
948 }
949 cache_entry.Tokens := 0;
950 }
951
952 action(u_writeDataToCache, "u", desc="Write data to cache") {
953 peek(responseNetwork_in, ResponseMsg) {
954 assert(is_valid(cache_entry));
955 cache_entry.DataBlk := in_msg.DataBlk;
956 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
957 cache_entry.Dirty := in_msg.Dirty;
958 }
959 }
960 }
961
962 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
963 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
964 }
965
966 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
967 L2cacheMemory.deallocate(address);
968 unset_cache_entry();
969 }
970
971 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
972 peek(L1requestNetwork_in, RequestMsg) {
973 L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
974 in_msg.AccessMode,
975 in_msg.Prefetch);
976 }
977 }
978
979
980 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
981 peek(responseNetwork_in, ResponseMsg) {
982 if (in_msg.Type != CoherenceResponseType:ACK &&
983 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
984 assert(is_valid(cache_entry));
985 assert(cache_entry.DataBlk == in_msg.DataBlk);
986 }
987 }
988 }
989
990
991 //*****************************************************
992 // TRANSITIONS
993 //*****************************************************
994
995 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
996
997 h_updateFilterFromL1HintOrWB;
998 n_popResponseQueue;
999 }
1000
1001 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
1002 l_popPersistentQueue;
1003 }
1004
1005
1006 // Transitions from NP
1007
1008 transition(NP, {Transient_GETX, Transient_GETS}) {
1009 // forward message to local sharers
1010 r_clearExclusive;
1011 j_forwardTransientRequestToLocalSharers;
1012 m_popRequestQueue;
1013 }
1014
1015
1016 transition(NP, {L1_GETS, L1_GETX}) {
1017 a_broadcastLocalRequest;
1018 r_markNewSharer;
1019 uu_profileMiss;
1020 o_popL1RequestQueue;
1021 }
1022
1023 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1024 bb_bounceResponse;
1025 n_popResponseQueue;
1026 }
1027
1028 transition(NP, Writeback_Shared_Data, S) {
1029 vv_allocateL2CacheBlock;
1030 u_writeDataToCache;
1031 q_updateTokensFromResponse;
1032 h_updateFilterFromL1HintOrWB;
1033 n_popResponseQueue;
1034 }
1035
1036 transition(NP, Writeback_Tokens, I) {
1037 vv_allocateL2CacheBlock;
1038 q_updateTokensFromResponse;
1039 h_updateFilterFromL1HintOrWB;
1040 n_popResponseQueue;
1041 }
1042
1043 transition(NP, Writeback_All_Tokens, M) {
1044 vv_allocateL2CacheBlock;
1045 u_writeDataToCache;
1046 q_updateTokensFromResponse;
1047 h_updateFilterFromL1HintOrWB;
1048 n_popResponseQueue;
1049 }
1050
1051 transition(NP, Writeback_Owned, O) {
1052 vv_allocateL2CacheBlock;
1053 u_writeDataToCache;
1054 q_updateTokensFromResponse;
1055 h_updateFilterFromL1HintOrWB;
1056 n_popResponseQueue;
1057 }
1058
1059
1060 transition(NP,
1061 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1062 I_L) {
1063 l_popPersistentQueue;
1064 }
1065
1066 // Transitions from Idle
1067
1068 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1069 a_broadcastLocalRequest;
1070 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1071 r_markNewSharer;
1072 uu_profileMiss;
1073 o_popL1RequestQueue;
1074 }
1075
1076 transition(I, L1_GETX) {
1077 a_broadcastLocalRequest;
1078 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1079 r_markNewSharer;
1080 uu_profileMiss;
1081 o_popL1RequestQueue;
1082 }
1083
1084 transition(I, L2_Replacement) {
1085 c_cleanReplacement; // Only needed in some cases
1086 rr_deallocateL2CacheBlock;
1087 }
1088
1089 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1090 r_clearExclusive;
1091 t_sendAckWithCollectedTokens;
1092 j_forwardTransientRequestToLocalSharers;
1093 m_popRequestQueue;
1094 }
1095
1096 transition(I,
1097 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1098 I_L) {
1099 e_sendAckWithCollectedTokens;
1100 l_popPersistentQueue;
1101 }
1102
1103
1104 transition(I, Ack) {
1105 q_updateTokensFromResponse;
1106 n_popResponseQueue;
1107 }
1108
1109 transition(I, Data_Shared, S) {
1110 u_writeDataToCache;
1111 q_updateTokensFromResponse;
1112 n_popResponseQueue;
1113 }
1114
1115 transition(I, Writeback_Shared_Data, S) {
1116 u_writeDataToCache;
1117 q_updateTokensFromResponse;
1118 h_updateFilterFromL1HintOrWB;
1119 n_popResponseQueue;
1120 }
1121
1122 transition(I, Writeback_Tokens) {
1123 q_updateTokensFromResponse;
1124 h_updateFilterFromL1HintOrWB;
1125 n_popResponseQueue;
1126 }
1127
1128 transition(I, Data_Owner, O) {
1129 u_writeDataToCache;
1130 q_updateTokensFromResponse;
1131 n_popResponseQueue;
1132 }
1133
1134 transition(I, Writeback_Owned, O) {
1135 u_writeDataToCache;
1136 q_updateTokensFromResponse;
1137 h_updateFilterFromL1HintOrWB;
1138 n_popResponseQueue;
1139 }
1140
1141 transition(I, Data_All_Tokens, M) {
1142 u_writeDataToCache;
1143 q_updateTokensFromResponse;
1144 n_popResponseQueue;
1145 }
1146
1147
1148 transition(I, Writeback_All_Tokens, M) {
1149 u_writeDataToCache;
1150 q_updateTokensFromResponse;
1151 h_updateFilterFromL1HintOrWB;
1152 n_popResponseQueue;
1153 }
1154
1155 // Transitions from Shared
1156
1157 transition(S, L2_Replacement, I) {
1158 c_cleanReplacement;
1159 rr_deallocateL2CacheBlock;
1160 }
1161
1162 transition(S, Transient_GETX, I) {
1163 r_clearExclusive;
1164 t_sendAckWithCollectedTokens;
1165 j_forwardTransientRequestToLocalSharers;
1166 m_popRequestQueue;
1167 }
1168
1169 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1170 j_forwardTransientRequestToLocalSharers;
1171 r_clearExclusive;
1172 m_popRequestQueue;
1173 }
1174
1175 transition(S, Persistent_GETX, I_L) {
1176 e_sendAckWithCollectedTokens;
1177 l_popPersistentQueue;
1178 }
1179
1180
1181 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1182 f_sendAckWithAllButOneTokens;
1183 l_popPersistentQueue;
1184 }
1185
1186
1187 transition(S, Ack) {
1188 q_updateTokensFromResponse;
1189 n_popResponseQueue;
1190 }
1191
1192 transition(S, Data_Shared) {
1193 w_assertIncomingDataAndCacheDataMatch;
1194 q_updateTokensFromResponse;
1195 n_popResponseQueue;
1196 }
1197
1198 transition(S, Writeback_Tokens) {
1199 q_updateTokensFromResponse;
1200 h_updateFilterFromL1HintOrWB;
1201 n_popResponseQueue;
1202 }
1203
1204 transition(S, Writeback_Shared_Data) {
1205 w_assertIncomingDataAndCacheDataMatch;
1206 q_updateTokensFromResponse;
1207 h_updateFilterFromL1HintOrWB;
1208 n_popResponseQueue;
1209 }
1210
1211
1212 transition(S, Data_Owner, O) {
1213 w_assertIncomingDataAndCacheDataMatch;
1214 q_updateTokensFromResponse;
1215 n_popResponseQueue;
1216 }
1217
1218 transition(S, Writeback_Owned, O) {
1219 w_assertIncomingDataAndCacheDataMatch;
1220 q_updateTokensFromResponse;
1221 h_updateFilterFromL1HintOrWB;
1222 n_popResponseQueue;
1223 }
1224
1225 transition(S, Data_All_Tokens, M) {
1226 w_assertIncomingDataAndCacheDataMatch;
1227 q_updateTokensFromResponse;
1228 n_popResponseQueue;
1229 }
1230
1231 transition(S, Writeback_All_Tokens, M) {
1232 w_assertIncomingDataAndCacheDataMatch;
1233 q_updateTokensFromResponse;
1234 h_updateFilterFromL1HintOrWB;
1235 n_popResponseQueue;
1236 }
1237
1238 transition(S, L1_GETX, I) {
1239 a_broadcastLocalRequest;
1240 tt_sendLocalAckWithCollectedTokens;
1241 r_markNewSharer;
1242 r_setMRU;
1243 uu_profileMiss;
1244 o_popL1RequestQueue;
1245 }
1246
1247
1248 transition(S, L1_GETS) {
1249 k_dataFromL2CacheToL1Requestor;
1250 r_markNewSharer;
1251 r_setMRU;
1252 o_popL1RequestQueue;
1253 }
1254
1255 transition(S, L1_GETS_Last_Token, I) {
1256
1257 k_dataFromL2CacheToL1Requestor;
1258 r_markNewSharer;
1259 r_setMRU;
1260 o_popL1RequestQueue;
1261 }
1262
1263 // Transitions from Owned
1264
1265 transition(O, L2_Replacement, I) {
1266 cc_dirtyReplacement;
1267 rr_deallocateL2CacheBlock;
1268 }
1269
1270 transition(O, Transient_GETX, I) {
1271 r_clearExclusive;
1272 dd_sendDataWithAllTokens;
1273 j_forwardTransientRequestToLocalSharers;
1274 m_popRequestQueue;
1275 }
1276
1277 transition(O, Persistent_GETX, I_L) {
1278 ee_sendDataWithAllTokens;
1279 l_popPersistentQueue;
1280 }
1281
1282 transition(O, Persistent_GETS, S_L) {
1283 ff_sendDataWithAllButOneTokens;
1284 l_popPersistentQueue;
1285 }
1286
1287 transition(O, Persistent_GETS_Last_Token, I_L) {
1288 fa_sendDataWithAllTokens;
1289 l_popPersistentQueue;
1290 }
1291
1292 transition(O, Transient_GETS) {
1293 // send multiple tokens
1294 r_clearExclusive;
1295 d_sendDataWithTokens;
1296 m_popRequestQueue;
1297 }
1298
1299 transition(O, Transient_GETS_Last_Token) {
1300 // WAIT FOR IT TO GO PERSISTENT
1301 r_clearExclusive;
1302 m_popRequestQueue;
1303 }
1304
1305 transition(O, Ack) {
1306 q_updateTokensFromResponse;
1307 n_popResponseQueue;
1308 }
1309
1310 transition(O, Ack_All_Tokens, M) {
1311 q_updateTokensFromResponse;
1312 n_popResponseQueue;
1313 }
1314
1315 transition(O, Data_Shared) {
1316 w_assertIncomingDataAndCacheDataMatch;
1317 q_updateTokensFromResponse;
1318 n_popResponseQueue;
1319 }
1320
1321
1322 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1323 w_assertIncomingDataAndCacheDataMatch;
1324 q_updateTokensFromResponse;
1325 h_updateFilterFromL1HintOrWB;
1326 n_popResponseQueue;
1327 }
1328
1329 transition(O, Data_All_Tokens, M) {
1330 w_assertIncomingDataAndCacheDataMatch;
1331 q_updateTokensFromResponse;
1332 n_popResponseQueue;
1333 }
1334
1335 transition(O, Writeback_All_Tokens, M) {
1336 w_assertIncomingDataAndCacheDataMatch;
1337 q_updateTokensFromResponse;
1338 h_updateFilterFromL1HintOrWB;
1339 n_popResponseQueue;
1340 }
1341
1342 transition(O, L1_GETS) {
1343 k_dataFromL2CacheToL1Requestor;
1344 r_markNewSharer;
1345 r_setMRU;
1346 o_popL1RequestQueue;
1347 }
1348
1349 transition(O, L1_GETS_Last_Token, I) {
1350 k_dataOwnerFromL2CacheToL1Requestor;
1351 r_markNewSharer;
1352 r_setMRU;
1353 o_popL1RequestQueue;
1354 }
1355
1356 transition(O, L1_GETX, I) {
1357 a_broadcastLocalRequest;
1358 k_dataAndAllTokensFromL2CacheToL1Requestor;
1359 r_markNewSharer;
1360 r_setMRU;
1361 uu_profileMiss;
1362 o_popL1RequestQueue;
1363 }
1364
1365 // Transitions from M
1366
1367 transition(M, L2_Replacement, I) {
1368 cc_dirtyReplacement;
1369 rr_deallocateL2CacheBlock;
1370 }
1371
1372 // MRM_DEBUG: Give up all tokens even for GETS? ???
1373 transition(M, {Transient_GETX, Transient_GETS}, I) {
1374 r_clearExclusive;
1375 dd_sendDataWithAllTokens;
1376 m_popRequestQueue;
1377 }
1378
1379 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1380 ee_sendDataWithAllTokens;
1381 l_popPersistentQueue;
1382 }
1383
1384
1385 transition(M, L1_GETS, O) {
1386 k_dataFromL2CacheToL1Requestor;
1387 r_markNewSharer;
1388 r_setMRU;
1389 o_popL1RequestQueue;
1390 }
1391
1392 transition(M, L1_GETX, I) {
1393 k_dataAndAllTokensFromL2CacheToL1Requestor;
1394 r_markNewSharer;
1395 r_setMRU;
1396 o_popL1RequestQueue;
1397 }
1398
1399
1400 //Transitions from locked states
1401
1402 transition({I_L, S_L}, Ack) {
1403 gg_bounceResponseToStarver;
1404 n_popResponseQueue;
1405 }
1406
1407 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1408 gg_bounceResponseToStarver;
1409 n_popResponseQueue;
1410 }
1411
1412 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1413 gg_bounceWBSharedToStarver;
1414 h_updateFilterFromL1HintOrWB;
1415 n_popResponseQueue;
1416 }
1417
1418 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1419 gg_bounceWBOwnedToStarver;
1420 h_updateFilterFromL1HintOrWB;
1421 n_popResponseQueue;
1422 }
1423
1424 transition(S_L, L2_Replacement, I) {
1425 c_cleanReplacement;
1426 rr_deallocateL2CacheBlock;
1427 }
1428
1429 transition(I_L, L2_Replacement, I) {
1430 rr_deallocateL2CacheBlock;
1431 }
1432
1433 transition(I_L, Own_Lock_or_Unlock, I) {
1434 l_popPersistentQueue;
1435 }
1436
1437 transition(S_L, Own_Lock_or_Unlock, S) {
1438 l_popPersistentQueue;
1439 }
1440
1441 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1442 r_clearExclusive;
1443 m_popRequestQueue;
1444 }
1445
1446 transition(I_L, {L1_GETX, L1_GETS}) {
1447 a_broadcastLocalRequest;
1448 r_markNewSharer;
1449 uu_profileMiss;
1450 o_popL1RequestQueue;
1451 }
1452
1453 transition(S_L, L1_GETX, I_L) {
1454 a_broadcastLocalRequest;
1455 tt_sendLocalAckWithCollectedTokens;
1456 r_markNewSharer;
1457 r_setMRU;
1458 uu_profileMiss;
1459 o_popL1RequestQueue;
1460 }
1461
1462 transition(S_L, L1_GETS) {
1463 k_dataFromL2CacheToL1Requestor;
1464 r_markNewSharer;
1465 r_setMRU;
1466 o_popL1RequestQueue;
1467 }
1468
1469 transition(S_L, L1_GETS_Last_Token, I_L) {
1470 k_dataFromL2CacheToL1Requestor;
1471 r_markNewSharer;
1472 r_setMRU;
1473 o_popL1RequestQueue;
1474 }
1475
1476 transition(S_L, Persistent_GETX, I_L) {
1477 e_sendAckWithCollectedTokens;
1478 l_popPersistentQueue;
1479 }
1480
1481 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1482 l_popPersistentQueue;
1483 }
1484
1485 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1486 l_popPersistentQueue;
1487 }
1488 }