x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "Token protocol")
36 : CacheMemory * L2cacheMemory,
37 int N_tokens,
38 Cycles l2_request_latency = 5,
39 Cycles l2_response_latency = 5,
40 bool filtering_enabled = true
41 {
42
43 // L2 BANK QUEUES
44 // From local bank of L2 cache TO the network
45
46 // this L2 bank -> a local L1 || mod-directory
47 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
48 // this L2 bank -> mod-directory
49 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
50 // this L2 bank -> a local L1
51 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
52
53
54 // FROM the network to this local bank of L2 cache
55
56 // a local L1 || mod-directory -> this L2 bank
57 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
58 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
59 // mod-directory -> this L2 bank
60 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
61 // a local L1 -> this L2 bank
62 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
63
64 // STATES
65 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
66 // Base states
67 NP, AccessPermission:Invalid, desc="Not Present";
68 I, AccessPermission:Invalid, desc="Idle";
69 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
70 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
71 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
72
73 // Locked states
74 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
75 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
76 }
77
78 // EVENTS
79 enumeration(Event, desc="Cache events") {
80
81 // Requests
82 L1_GETS, desc="local L1 GETS request";
83 L1_GETS_Last_Token, desc="local L1 GETS request";
84 L1_GETX, desc="local L1 GETX request";
85 L1_INV, desc="L1 no longer has tokens";
86 Transient_GETX, desc="A GetX from another processor";
87 Transient_GETS, desc="A GetS from another processor";
88 Transient_GETS_Last_Token, desc="A GetS from another processor";
89
90 // events initiated by this L2
91 L2_Replacement, desc="L2 Replacement", format="!r";
92
93 // events of external L2 responses
94
95 // Responses
96 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
97 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
98 Writeback_All_Tokens, desc="Received a writeback from L1";
99 Writeback_Owned, desc="Received a writeback from L1";
100
101
102 Data_Shared, desc="Received a data message, we are now a sharer";
103 Data_Owner, desc="Received a data message, we are now the owner";
104 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
105 Ack, desc="Received an ack message";
106 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
107
108 // Lock/Unlock
109 Persistent_GETX, desc="Another processor has priority to read/write";
110 Persistent_GETS, desc="Another processor has priority to read";
111 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
112 Own_Lock_or_Unlock, desc="This processor now has priority";
113 }
114
115 // TYPES
116
117 // CacheEntry
118 structure(Entry, desc="...", interface="AbstractCacheEntry") {
119 State CacheState, desc="cache state";
120 bool Dirty, desc="Is the data dirty (different than memory)?";
121 int Tokens, desc="The number of tokens we're holding for the line";
122 DataBlock DataBlk, desc="data for the block";
123 }
124
125 structure(DirEntry, desc="...") {
126 Set Sharers, desc="Set of the internal processors that want the block in shared state";
127 bool exclusive, default="false", desc="if local exclusive is likely";
128 }
129
130 structure(PerfectCacheMemory, external="yes") {
131 void allocate(Address);
132 void deallocate(Address);
133 DirEntry lookup(Address);
134 bool isTagPresent(Address);
135 }
136
137 structure(PersistentTable, external="yes") {
138 void persistentRequestLock(Address, MachineID, AccessType);
139 void persistentRequestUnlock(Address, MachineID);
140 MachineID findSmallest(Address);
141 AccessType typeOfSmallest(Address);
142 void markEntries(Address);
143 bool isLocked(Address);
144 int countStarvingForAddress(Address);
145 int countReadStarvingForAddress(Address);
146 }
147
148 PersistentTable persistentTable;
149 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
150
151 void set_cache_entry(AbstractCacheEntry b);
152 void unset_cache_entry();
153
154 Entry getCacheEntry(Address address), return_by_pointer="yes" {
155 Entry cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
156 return cache_entry;
157 }
158
159 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
160 return getCacheEntry(addr).DataBlk;
161 }
162
163 int getTokens(Entry cache_entry) {
164 if (is_valid(cache_entry)) {
165 return cache_entry.Tokens;
166 } else {
167 return 0;
168 }
169 }
170
171 State getState(Entry cache_entry, Address addr) {
172 if (is_valid(cache_entry)) {
173 return cache_entry.CacheState;
174 } else if (persistentTable.isLocked(addr) == true) {
175 return State:I_L;
176 } else {
177 return State:NP;
178 }
179 }
180
181 void setState(Entry cache_entry, Address addr, State state) {
182
183 if (is_valid(cache_entry)) {
184 // Make sure the token count is in range
185 assert(cache_entry.Tokens >= 0);
186 assert(cache_entry.Tokens <= max_tokens());
187 assert(cache_entry.Tokens != (max_tokens() / 2));
188
189 // Make sure we have no tokens in L
190 if ((state == State:I_L) ) {
191 assert(cache_entry.Tokens == 0);
192 }
193
194 // in M and E you have all the tokens
195 if (state == State:M ) {
196 assert(cache_entry.Tokens == max_tokens());
197 }
198
199 // in NP you have no tokens
200 if (state == State:NP) {
201 assert(cache_entry.Tokens == 0);
202 }
203
204 // You have at least one token in S-like states
205 if (state == State:S ) {
206 assert(cache_entry.Tokens > 0);
207 }
208
209 // You have at least half the token in O-like states
210 if (state == State:O ) {
211 assert(cache_entry.Tokens > (max_tokens() / 2));
212 }
213
214 cache_entry.CacheState := state;
215 }
216 }
217
218 AccessPermission getAccessPermission(Address addr) {
219 Entry cache_entry := getCacheEntry(addr);
220 if(is_valid(cache_entry)) {
221 return L2Cache_State_to_permission(cache_entry.CacheState);
222 }
223
224 return AccessPermission:NotPresent;
225 }
226
227 void setAccessPermission(Entry cache_entry, Address addr, State state) {
228 if (is_valid(cache_entry)) {
229 cache_entry.changePermission(L2Cache_State_to_permission(state));
230 }
231 }
232
233 void removeSharer(Address addr, NodeID id) {
234
235 if (localDirectory.isTagPresent(addr)) {
236 localDirectory[addr].Sharers.remove(id);
237 if (localDirectory[addr].Sharers.count() == 0) {
238 localDirectory.deallocate(addr);
239 }
240 }
241 }
242
243 bool sharersExist(Address addr) {
244 if (localDirectory.isTagPresent(addr)) {
245 if (localDirectory[addr].Sharers.count() > 0) {
246 return true;
247 }
248 else {
249 return false;
250 }
251 }
252 else {
253 return false;
254 }
255 }
256
257 bool exclusiveExists(Address addr) {
258 if (localDirectory.isTagPresent(addr)) {
259 if (localDirectory[addr].exclusive == true) {
260 return true;
261 }
262 else {
263 return false;
264 }
265 }
266 else {
267 return false;
268 }
269 }
270
271 // assumes that caller will check to make sure tag is present
272 Set getSharers(Address addr) {
273 return localDirectory[addr].Sharers;
274 }
275
276 void setNewWriter(Address addr, NodeID id) {
277 if (localDirectory.isTagPresent(addr) == false) {
278 localDirectory.allocate(addr);
279 }
280 localDirectory[addr].Sharers.clear();
281 localDirectory[addr].Sharers.add(id);
282 localDirectory[addr].exclusive := true;
283 }
284
285 void addNewSharer(Address addr, NodeID id) {
286 if (localDirectory.isTagPresent(addr) == false) {
287 localDirectory.allocate(addr);
288 }
289 localDirectory[addr].Sharers.add(id);
290 // localDirectory[addr].exclusive := false;
291 }
292
293 void clearExclusiveBitIfExists(Address addr) {
294 if (localDirectory.isTagPresent(addr) == true) {
295 localDirectory[addr].exclusive := false;
296 }
297 }
298
299 GenericRequestType convertToGenericType(CoherenceRequestType type) {
300 if(type == CoherenceRequestType:GETS) {
301 return GenericRequestType:GETS;
302 } else if(type == CoherenceRequestType:GETX) {
303 return GenericRequestType:GETX;
304 } else {
305 DPRINTF(RubySlicc, "%s\n", type);
306 error("invalid CoherenceRequestType");
307 }
308 }
309
310 // ** OUT_PORTS **
311 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
312 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
313 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
314
315
316
317 // ** IN_PORTS **
318
319 // Persistent Network
320 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
321 if (persistentNetwork_in.isReady()) {
322 peek(persistentNetwork_in, PersistentMsg) {
323 assert(in_msg.Destination.isElement(machineID));
324
325 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
326 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
327 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
328 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
329 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
330 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
331 } else {
332 error("Unexpected message");
333 }
334
335 Entry cache_entry := getCacheEntry(in_msg.Address);
336 // React to the message based on the current state of the table
337 if (persistentTable.isLocked(in_msg.Address)) {
338
339 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
340 if (getTokens(cache_entry) == 1 ||
341 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
342 trigger(Event:Persistent_GETS_Last_Token, in_msg.Address,
343 cache_entry);
344 } else {
345 trigger(Event:Persistent_GETS, in_msg.Address, cache_entry);
346 }
347 } else {
348 trigger(Event:Persistent_GETX, in_msg.Address, cache_entry);
349 }
350 }
351 else {
352 trigger(Event:Own_Lock_or_Unlock, in_msg.Address, cache_entry);
353 }
354 }
355 }
356 }
357
358
359 // Request Network
360 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
361 if (requestNetwork_in.isReady()) {
362 peek(requestNetwork_in, RequestMsg) {
363 assert(in_msg.Destination.isElement(machineID));
364
365 Entry cache_entry := getCacheEntry(in_msg.Address);
366 if (in_msg.Type == CoherenceRequestType:GETX) {
367 trigger(Event:Transient_GETX, in_msg.Address, cache_entry);
368 } else if (in_msg.Type == CoherenceRequestType:GETS) {
369 if (getTokens(cache_entry) == 1) {
370 trigger(Event:Transient_GETS_Last_Token, in_msg.Address,
371 cache_entry);
372 }
373 else {
374 trigger(Event:Transient_GETS, in_msg.Address, cache_entry);
375 }
376 } else {
377 error("Unexpected message");
378 }
379 }
380 }
381 }
382
383 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
384 if (L1requestNetwork_in.isReady()) {
385 peek(L1requestNetwork_in, RequestMsg) {
386 assert(in_msg.Destination.isElement(machineID));
387 Entry cache_entry := getCacheEntry(in_msg.Address);
388 if (in_msg.Type == CoherenceRequestType:GETX) {
389 trigger(Event:L1_GETX, in_msg.Address, cache_entry);
390 } else if (in_msg.Type == CoherenceRequestType:GETS) {
391 if (getTokens(cache_entry) == 1 ||
392 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
393 trigger(Event:L1_GETS_Last_Token, in_msg.Address, cache_entry);
394 }
395 else {
396 trigger(Event:L1_GETS, in_msg.Address, cache_entry);
397 }
398 } else {
399 error("Unexpected message");
400 }
401 }
402 }
403 }
404
405
406 // Response Network
407 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
408 if (responseNetwork_in.isReady()) {
409 peek(responseNetwork_in, ResponseMsg) {
410 assert(in_msg.Destination.isElement(machineID));
411 Entry cache_entry := getCacheEntry(in_msg.Address);
412
413 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
414 if (in_msg.Type == CoherenceResponseType:ACK) {
415 assert(in_msg.Tokens < (max_tokens() / 2));
416 trigger(Event:Ack, in_msg.Address, cache_entry);
417 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
418 trigger(Event:Data_Owner, in_msg.Address, cache_entry);
419 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
420 trigger(Event:Data_Shared, in_msg.Address, cache_entry);
421 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
422 in_msg.Type == CoherenceResponseType:WB_OWNED ||
423 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
424
425 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
426
427 // either room is available or the block is already present
428
429 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
430 assert(in_msg.Dirty == false);
431 trigger(Event:Writeback_Tokens, in_msg.Address, cache_entry);
432 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
433 assert(in_msg.Dirty == false);
434 trigger(Event:Writeback_Shared_Data, in_msg.Address, cache_entry);
435 }
436 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
437 //assert(in_msg.Dirty == false);
438 trigger(Event:Writeback_Owned, in_msg.Address, cache_entry);
439 }
440 }
441 else {
442 trigger(Event:L2_Replacement,
443 L2cacheMemory.cacheProbe(in_msg.Address),
444 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
445 }
446 } else if (in_msg.Type == CoherenceResponseType:INV) {
447 trigger(Event:L1_INV, in_msg.Address, cache_entry);
448 } else {
449 error("Unexpected message");
450 }
451 } else {
452 if (in_msg.Type == CoherenceResponseType:ACK) {
453 assert(in_msg.Tokens < (max_tokens() / 2));
454 trigger(Event:Ack_All_Tokens, in_msg.Address, cache_entry);
455 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
456 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
457 trigger(Event:Data_All_Tokens, in_msg.Address, cache_entry);
458 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
459 in_msg.Type == CoherenceResponseType:WB_OWNED ||
460 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
461 if (L2cacheMemory.cacheAvail(in_msg.Address) || is_valid(cache_entry)) {
462
463 // either room is available or the block is already present
464
465 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
466 assert(in_msg.Dirty == false);
467 assert( (getState(cache_entry, in_msg.Address) != State:NP)
468 && (getState(cache_entry, in_msg.Address) != State:I) );
469 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
470 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
471 assert(in_msg.Dirty == false);
472 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
473 }
474 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
475 trigger(Event:Writeback_All_Tokens, in_msg.Address, cache_entry);
476 }
477 }
478 else {
479 trigger(Event:L2_Replacement,
480 L2cacheMemory.cacheProbe(in_msg.Address),
481 getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address)));
482 }
483 } else if (in_msg.Type == CoherenceResponseType:INV) {
484 trigger(Event:L1_INV, in_msg.Address, cache_entry);
485 } else {
486 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
487 error("Unexpected message");
488 }
489 }
490 }
491 }
492 }
493
494
495 // ACTIONS
496
497 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
498
499 peek(L1requestNetwork_in, RequestMsg) {
500
501 // if this is a retry or no local sharers, broadcast normally
502
503 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
504 enqueue(globalRequestNetwork_out, RequestMsg, latency=l2_request_latency) {
505 out_msg.Address := in_msg.Address;
506 out_msg.Type := in_msg.Type;
507 out_msg.Requestor := in_msg.Requestor;
508 out_msg.RetryNum := in_msg.RetryNum;
509
510 //
511 // If a statically shared L2 cache, then no other L2 caches can
512 // store the block
513 //
514 //out_msg.Destination.broadcast(MachineType:L2Cache);
515 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
516 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
517
518 out_msg.Destination.add(map_Address_to_Directory(address));
519 out_msg.MessageSize := MessageSizeType:Request_Control;
520 out_msg.AccessMode := in_msg.AccessMode;
521 out_msg.Prefetch := in_msg.Prefetch;
522 } //enqueue
523 // } // if
524
525 //profile_filter_action(0);
526 } // peek
527 } //action
528
529
530 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
531 peek(responseNetwork_in, ResponseMsg) {
532 // FIXME, should use a 3rd vnet
533 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
534 out_msg.Address := address;
535 out_msg.Type := in_msg.Type;
536 out_msg.Sender := machineID;
537 out_msg.Destination.add(map_Address_to_Directory(address));
538 out_msg.Tokens := in_msg.Tokens;
539 out_msg.MessageSize := in_msg.MessageSize;
540 out_msg.DataBlk := in_msg.DataBlk;
541 out_msg.Dirty := in_msg.Dirty;
542 }
543 }
544 }
545
546 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
547 assert(is_valid(cache_entry));
548 if (cache_entry.Tokens > 0) {
549 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
550 out_msg.Address := address;
551 out_msg.Type := CoherenceResponseType:ACK;
552 out_msg.Sender := machineID;
553 out_msg.Destination.add(map_Address_to_Directory(address));
554 out_msg.Tokens := cache_entry.Tokens;
555 out_msg.MessageSize := MessageSizeType:Writeback_Control;
556 }
557 cache_entry.Tokens := 0;
558 }
559 }
560
561 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
562 assert(is_valid(cache_entry));
563 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
564 out_msg.Address := address;
565 out_msg.Sender := machineID;
566 out_msg.Destination.add(map_Address_to_Directory(address));
567 out_msg.Tokens := cache_entry.Tokens;
568 out_msg.DataBlk := cache_entry.DataBlk;
569 out_msg.Dirty := cache_entry.Dirty;
570
571 if (cache_entry.Dirty) {
572 out_msg.MessageSize := MessageSizeType:Writeback_Data;
573 out_msg.Type := CoherenceResponseType:DATA_OWNER;
574 } else {
575 out_msg.MessageSize := MessageSizeType:Writeback_Control;
576 out_msg.Type := CoherenceResponseType:ACK_OWNER;
577 }
578 }
579 cache_entry.Tokens := 0;
580 }
581
582 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
583 peek(requestNetwork_in, RequestMsg) {
584 assert(is_valid(cache_entry));
585 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
586 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
587 out_msg.Address := address;
588 out_msg.Type := CoherenceResponseType:DATA_SHARED;
589 out_msg.Sender := machineID;
590 out_msg.Destination.add(in_msg.Requestor);
591 out_msg.Tokens := N_tokens;
592 out_msg.DataBlk := cache_entry.DataBlk;
593 out_msg.Dirty := false;
594 out_msg.MessageSize := MessageSizeType:Response_Data;
595 }
596 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
597 }
598 else {
599 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
600 out_msg.Address := address;
601 out_msg.Type := CoherenceResponseType:DATA_SHARED;
602 out_msg.Sender := machineID;
603 out_msg.Destination.add(in_msg.Requestor);
604 out_msg.Tokens := 1;
605 out_msg.DataBlk := cache_entry.DataBlk;
606 out_msg.Dirty := false;
607 out_msg.MessageSize := MessageSizeType:Response_Data;
608 }
609 cache_entry.Tokens := cache_entry.Tokens - 1;
610 }
611 }
612 }
613
614 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
615 assert(is_valid(cache_entry));
616 peek(requestNetwork_in, RequestMsg) {
617 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
618 out_msg.Address := address;
619 out_msg.Type := CoherenceResponseType:DATA_OWNER;
620 out_msg.Sender := machineID;
621 out_msg.Destination.add(in_msg.Requestor);
622 assert(cache_entry.Tokens >= 1);
623 out_msg.Tokens := cache_entry.Tokens;
624 out_msg.DataBlk := cache_entry.DataBlk;
625 out_msg.Dirty := cache_entry.Dirty;
626 out_msg.MessageSize := MessageSizeType:Response_Data;
627 }
628 }
629 cache_entry.Tokens := 0;
630 }
631
632 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
633 assert(is_valid(cache_entry));
634 if (cache_entry.Tokens > 0) {
635 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
636 out_msg.Address := address;
637 out_msg.Type := CoherenceResponseType:ACK;
638 out_msg.Sender := machineID;
639 out_msg.Destination.add(persistentTable.findSmallest(address));
640 assert(cache_entry.Tokens >= 1);
641 out_msg.Tokens := cache_entry.Tokens;
642 out_msg.MessageSize := MessageSizeType:Response_Control;
643 }
644 }
645 cache_entry.Tokens := 0;
646 }
647
648 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
649 assert(is_valid(cache_entry));
650 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
651 out_msg.Address := address;
652 out_msg.Type := CoherenceResponseType:DATA_OWNER;
653 out_msg.Sender := machineID;
654 out_msg.Destination.add(persistentTable.findSmallest(address));
655 assert(cache_entry.Tokens >= 1);
656 out_msg.Tokens := cache_entry.Tokens;
657 out_msg.DataBlk := cache_entry.DataBlk;
658 out_msg.Dirty := cache_entry.Dirty;
659 out_msg.MessageSize := MessageSizeType:Response_Data;
660 }
661 cache_entry.Tokens := 0;
662 }
663
664 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
665 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
666 assert(is_valid(cache_entry));
667 assert(cache_entry.Tokens > 0);
668 if (cache_entry.Tokens > 1) {
669 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
670 out_msg.Address := address;
671 out_msg.Type := CoherenceResponseType:ACK;
672 out_msg.Sender := machineID;
673 out_msg.Destination.add(persistentTable.findSmallest(address));
674 assert(cache_entry.Tokens >= 1);
675 out_msg.Tokens := cache_entry.Tokens - 1;
676 out_msg.MessageSize := MessageSizeType:Response_Control;
677 }
678 }
679 cache_entry.Tokens := 1;
680 }
681
682 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
683 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
684 assert(is_valid(cache_entry));
685 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
686 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
687 out_msg.Address := address;
688 out_msg.Type := CoherenceResponseType:DATA_OWNER;
689 out_msg.Sender := machineID;
690 out_msg.Destination.add(persistentTable.findSmallest(address));
691 out_msg.Tokens := cache_entry.Tokens - 1;
692 out_msg.DataBlk := cache_entry.DataBlk;
693 out_msg.Dirty := cache_entry.Dirty;
694 out_msg.MessageSize := MessageSizeType:Response_Data;
695 }
696 cache_entry.Tokens := 1;
697 }
698
699 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
700 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
701 assert(is_valid(cache_entry));
702 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
703 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
704 out_msg.Address := address;
705 out_msg.Type := CoherenceResponseType:DATA_OWNER;
706 out_msg.Sender := machineID;
707 out_msg.Destination.add(persistentTable.findSmallest(address));
708 out_msg.Tokens := cache_entry.Tokens;
709 out_msg.DataBlk := cache_entry.DataBlk;
710 out_msg.Dirty := cache_entry.Dirty;
711 out_msg.MessageSize := MessageSizeType:Response_Data;
712 }
713 cache_entry.Tokens := 0;
714 }
715
716
717
718 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
719 // assert(persistentTable.isLocked(address));
720 peek(responseNetwork_in, ResponseMsg) {
721 // FIXME, should use a 3rd vnet in some cases
722 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
723 out_msg.Address := address;
724 out_msg.Type := in_msg.Type;
725 out_msg.Sender := machineID;
726 out_msg.Destination.add(persistentTable.findSmallest(address));
727 out_msg.Tokens := in_msg.Tokens;
728 out_msg.DataBlk := in_msg.DataBlk;
729 out_msg.Dirty := in_msg.Dirty;
730 out_msg.MessageSize := in_msg.MessageSize;
731 }
732 }
733 }
734
735 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
736 //assert(persistentTable.isLocked(address));
737 peek(responseNetwork_in, ResponseMsg) {
738 // FIXME, should use a 3rd vnet in some cases
739 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
740 out_msg.Address := address;
741 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
742 out_msg.Type := CoherenceResponseType:DATA_SHARED;
743 } else {
744 assert(in_msg.Tokens < (max_tokens() / 2));
745 out_msg.Type := CoherenceResponseType:ACK;
746 }
747 out_msg.Sender := machineID;
748 out_msg.Destination.add(persistentTable.findSmallest(address));
749 out_msg.Tokens := in_msg.Tokens;
750 out_msg.DataBlk := in_msg.DataBlk;
751 out_msg.Dirty := in_msg.Dirty;
752 out_msg.MessageSize := in_msg.MessageSize;
753 }
754 }
755 }
756
757 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
758 // assert(persistentTable.isLocked(address));
759 peek(responseNetwork_in, ResponseMsg) {
760 // FIXME, should use a 3rd vnet in some cases
761 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
762 out_msg.Address := address;
763 out_msg.Type := CoherenceResponseType:DATA_OWNER;
764 out_msg.Sender := machineID;
765 out_msg.Destination.add(persistentTable.findSmallest(address));
766 out_msg.Tokens := in_msg.Tokens;
767 out_msg.DataBlk := in_msg.DataBlk;
768 out_msg.Dirty := in_msg.Dirty;
769 out_msg.MessageSize := in_msg.MessageSize;
770 }
771 }
772 }
773
774
775 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
776 peek(responseNetwork_in, ResponseMsg) {
777 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
778 }
779 }
780
781 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
782 peek(requestNetwork_in, RequestMsg) {
783 if (filtering_enabled == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
784 //profile_filter_action(1);
785 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
786 in_msg.RetryNum);
787 }
788 else {
789 enqueue(localRequestNetwork_out, RequestMsg, latency=l2_response_latency ) {
790 out_msg.Address := in_msg.Address;
791 out_msg.Requestor := in_msg.Requestor;
792
793 //
794 // Currently assuming only one chip so all L1s are local
795 //
796 //out_msg.Destination := getLocalL1IDs(machineID);
797 out_msg.Destination.broadcast(MachineType:L1Cache);
798 out_msg.Destination.remove(in_msg.Requestor);
799
800 out_msg.Type := in_msg.Type;
801 out_msg.isLocal := false;
802 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
803 out_msg.AccessMode := in_msg.AccessMode;
804 out_msg.Prefetch := in_msg.Prefetch;
805 }
806 //profile_filter_action(0);
807 }
808 }
809 }
810
811 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
812 peek(L1requestNetwork_in, RequestMsg) {
813 assert(is_valid(cache_entry));
814 assert(cache_entry.Tokens > 0);
815 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
816 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
817 out_msg.Address := address;
818 out_msg.Type := CoherenceResponseType:DATA_SHARED;
819 out_msg.Sender := machineID;
820 out_msg.Destination.add(in_msg.Requestor);
821 out_msg.DataBlk := cache_entry.DataBlk;
822 out_msg.Dirty := false;
823 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
824 out_msg.Tokens := 1;
825 }
826 cache_entry.Tokens := cache_entry.Tokens - 1;
827 }
828 }
829
830 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
831 peek(L1requestNetwork_in, RequestMsg) {
832 assert(is_valid(cache_entry));
833 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
834 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
835 out_msg.Address := address;
836 out_msg.Type := CoherenceResponseType:DATA_OWNER;
837 out_msg.Sender := machineID;
838 out_msg.Destination.add(in_msg.Requestor);
839 out_msg.DataBlk := cache_entry.DataBlk;
840 out_msg.Dirty := cache_entry.Dirty;
841 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
842 out_msg.Tokens := cache_entry.Tokens;
843 }
844 cache_entry.Tokens := 0;
845 }
846 }
847
848 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
849 peek(L1requestNetwork_in, RequestMsg) {
850 assert(is_valid(cache_entry));
851 // assert(cache_entry.Tokens == max_tokens());
852 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
853 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
854 out_msg.Address := address;
855 out_msg.Type := CoherenceResponseType:DATA_OWNER;
856 out_msg.Sender := machineID;
857 out_msg.Destination.add(in_msg.Requestor);
858 out_msg.DataBlk := cache_entry.DataBlk;
859 out_msg.Dirty := cache_entry.Dirty;
860 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
861 //out_msg.Tokens := max_tokens();
862 out_msg.Tokens := cache_entry.Tokens;
863 }
864 cache_entry.Tokens := 0;
865 }
866 }
867
868 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
869 persistentNetwork_in.dequeue();
870 }
871
872 action(m_popRequestQueue, "m", desc="Pop request queue.") {
873 requestNetwork_in.dequeue();
874 }
875
876 action(n_popResponseQueue, "n", desc="Pop response queue") {
877 responseNetwork_in.dequeue();
878 }
879
880 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
881 L1requestNetwork_in.dequeue();
882 }
883
884
885 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
886 peek(responseNetwork_in, ResponseMsg) {
887 assert(is_valid(cache_entry));
888 assert(in_msg.Tokens != 0);
889 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
890
891 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
892 // may not trigger this action.
893 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
894 cache_entry.Dirty := true;
895 }
896 }
897 }
898
899 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
900 peek(L1requestNetwork_in, RequestMsg) {
901 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
902 if (in_msg.Type == CoherenceRequestType:GETX) {
903 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
904 } else if (in_msg.Type == CoherenceRequestType:GETS) {
905 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
906 }
907 }
908 }
909 }
910
911 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
912 clearExclusiveBitIfExists(address);
913 }
914
915 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
916 peek(L1requestNetwork_in, RequestMsg) {
917 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
918 (is_valid(cache_entry))) {
919 L2cacheMemory.setMRU(address);
920 }
921 }
922 }
923
924 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
925 assert(is_valid(cache_entry));
926 if (cache_entry.Tokens > 0) {
927 peek(requestNetwork_in, RequestMsg) {
928 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
929 out_msg.Address := address;
930 out_msg.Type := CoherenceResponseType:ACK;
931 out_msg.Sender := machineID;
932 out_msg.Destination.add(in_msg.Requestor);
933 assert(cache_entry.Tokens >= 1);
934 out_msg.Tokens := cache_entry.Tokens;
935 out_msg.MessageSize := MessageSizeType:Response_Control;
936 }
937 }
938 }
939 cache_entry.Tokens := 0;
940 }
941
942 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
943 assert(is_valid(cache_entry));
944 if (cache_entry.Tokens > 0) {
945 peek(L1requestNetwork_in, RequestMsg) {
946 enqueue(responseNetwork_out, ResponseMsg, latency=l2_response_latency) {
947 out_msg.Address := address;
948 out_msg.Type := CoherenceResponseType:ACK;
949 out_msg.Sender := machineID;
950 out_msg.Destination.add(in_msg.Requestor);
951 assert(cache_entry.Tokens >= 1);
952 out_msg.Tokens := cache_entry.Tokens;
953 out_msg.MessageSize := MessageSizeType:Response_Control;
954 }
955 }
956 }
957 cache_entry.Tokens := 0;
958 }
959
960 action(u_writeDataToCache, "u", desc="Write data to cache") {
961 peek(responseNetwork_in, ResponseMsg) {
962 assert(is_valid(cache_entry));
963 cache_entry.DataBlk := in_msg.DataBlk;
964 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
965 cache_entry.Dirty := in_msg.Dirty;
966 }
967 }
968 }
969
970 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
971 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
972 }
973
974 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
975 L2cacheMemory.deallocate(address);
976 unset_cache_entry();
977 }
978
979 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
980 peek(L1requestNetwork_in, RequestMsg) {
981 L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
982 in_msg.AccessMode,
983 in_msg.Prefetch);
984 }
985 }
986
987
988 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
989 peek(responseNetwork_in, ResponseMsg) {
990 if (in_msg.Type != CoherenceResponseType:ACK &&
991 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
992 assert(is_valid(cache_entry));
993 assert(cache_entry.DataBlk == in_msg.DataBlk);
994 }
995 }
996 }
997
998
999 //*****************************************************
1000 // TRANSITIONS
1001 //*****************************************************
1002
1003 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
1004
1005 h_updateFilterFromL1HintOrWB;
1006 n_popResponseQueue;
1007 }
1008
1009 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
1010 l_popPersistentQueue;
1011 }
1012
1013
1014 // Transitions from NP
1015
1016 transition(NP, {Transient_GETX, Transient_GETS}) {
1017 // forward message to local sharers
1018 r_clearExclusive;
1019 j_forwardTransientRequestToLocalSharers;
1020 m_popRequestQueue;
1021 }
1022
1023
1024 transition(NP, {L1_GETS, L1_GETX}) {
1025 a_broadcastLocalRequest;
1026 r_markNewSharer;
1027 uu_profileMiss;
1028 o_popL1RequestQueue;
1029 }
1030
1031 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1032 bb_bounceResponse;
1033 n_popResponseQueue;
1034 }
1035
1036 transition(NP, Writeback_Shared_Data, S) {
1037 vv_allocateL2CacheBlock;
1038 u_writeDataToCache;
1039 q_updateTokensFromResponse;
1040 h_updateFilterFromL1HintOrWB;
1041 n_popResponseQueue;
1042 }
1043
1044 transition(NP, Writeback_Tokens, I) {
1045 vv_allocateL2CacheBlock;
1046 q_updateTokensFromResponse;
1047 h_updateFilterFromL1HintOrWB;
1048 n_popResponseQueue;
1049 }
1050
1051 transition(NP, Writeback_All_Tokens, M) {
1052 vv_allocateL2CacheBlock;
1053 u_writeDataToCache;
1054 q_updateTokensFromResponse;
1055 h_updateFilterFromL1HintOrWB;
1056 n_popResponseQueue;
1057 }
1058
1059 transition(NP, Writeback_Owned, O) {
1060 vv_allocateL2CacheBlock;
1061 u_writeDataToCache;
1062 q_updateTokensFromResponse;
1063 h_updateFilterFromL1HintOrWB;
1064 n_popResponseQueue;
1065 }
1066
1067
1068 transition(NP,
1069 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1070 I_L) {
1071 l_popPersistentQueue;
1072 }
1073
1074 // Transitions from Idle
1075
1076 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1077 a_broadcastLocalRequest;
1078 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1079 r_markNewSharer;
1080 uu_profileMiss;
1081 o_popL1RequestQueue;
1082 }
1083
1084 transition(I, L1_GETX) {
1085 a_broadcastLocalRequest;
1086 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1087 r_markNewSharer;
1088 uu_profileMiss;
1089 o_popL1RequestQueue;
1090 }
1091
1092 transition(I, L2_Replacement) {
1093 c_cleanReplacement; // Only needed in some cases
1094 rr_deallocateL2CacheBlock;
1095 }
1096
1097 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1098 r_clearExclusive;
1099 t_sendAckWithCollectedTokens;
1100 j_forwardTransientRequestToLocalSharers;
1101 m_popRequestQueue;
1102 }
1103
1104 transition(I,
1105 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1106 I_L) {
1107 e_sendAckWithCollectedTokens;
1108 l_popPersistentQueue;
1109 }
1110
1111
1112 transition(I, Ack) {
1113 q_updateTokensFromResponse;
1114 n_popResponseQueue;
1115 }
1116
1117 transition(I, Data_Shared, S) {
1118 u_writeDataToCache;
1119 q_updateTokensFromResponse;
1120 n_popResponseQueue;
1121 }
1122
1123 transition(I, Writeback_Shared_Data, S) {
1124 u_writeDataToCache;
1125 q_updateTokensFromResponse;
1126 h_updateFilterFromL1HintOrWB;
1127 n_popResponseQueue;
1128 }
1129
1130 transition(I, Writeback_Tokens) {
1131 q_updateTokensFromResponse;
1132 h_updateFilterFromL1HintOrWB;
1133 n_popResponseQueue;
1134 }
1135
1136 transition(I, Data_Owner, O) {
1137 u_writeDataToCache;
1138 q_updateTokensFromResponse;
1139 n_popResponseQueue;
1140 }
1141
1142 transition(I, Writeback_Owned, O) {
1143 u_writeDataToCache;
1144 q_updateTokensFromResponse;
1145 h_updateFilterFromL1HintOrWB;
1146 n_popResponseQueue;
1147 }
1148
1149 transition(I, Data_All_Tokens, M) {
1150 u_writeDataToCache;
1151 q_updateTokensFromResponse;
1152 n_popResponseQueue;
1153 }
1154
1155
1156 transition(I, Writeback_All_Tokens, M) {
1157 u_writeDataToCache;
1158 q_updateTokensFromResponse;
1159 h_updateFilterFromL1HintOrWB;
1160 n_popResponseQueue;
1161 }
1162
1163 // Transitions from Shared
1164
1165 transition(S, L2_Replacement, I) {
1166 c_cleanReplacement;
1167 rr_deallocateL2CacheBlock;
1168 }
1169
1170 transition(S, Transient_GETX, I) {
1171 r_clearExclusive;
1172 t_sendAckWithCollectedTokens;
1173 j_forwardTransientRequestToLocalSharers;
1174 m_popRequestQueue;
1175 }
1176
1177 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1178 j_forwardTransientRequestToLocalSharers;
1179 r_clearExclusive;
1180 m_popRequestQueue;
1181 }
1182
1183 transition(S, Persistent_GETX, I_L) {
1184 e_sendAckWithCollectedTokens;
1185 l_popPersistentQueue;
1186 }
1187
1188
1189 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1190 f_sendAckWithAllButOneTokens;
1191 l_popPersistentQueue;
1192 }
1193
1194
1195 transition(S, Ack) {
1196 q_updateTokensFromResponse;
1197 n_popResponseQueue;
1198 }
1199
1200 transition(S, Data_Shared) {
1201 w_assertIncomingDataAndCacheDataMatch;
1202 q_updateTokensFromResponse;
1203 n_popResponseQueue;
1204 }
1205
1206 transition(S, Writeback_Tokens) {
1207 q_updateTokensFromResponse;
1208 h_updateFilterFromL1HintOrWB;
1209 n_popResponseQueue;
1210 }
1211
1212 transition(S, Writeback_Shared_Data) {
1213 w_assertIncomingDataAndCacheDataMatch;
1214 q_updateTokensFromResponse;
1215 h_updateFilterFromL1HintOrWB;
1216 n_popResponseQueue;
1217 }
1218
1219
1220 transition(S, Data_Owner, O) {
1221 w_assertIncomingDataAndCacheDataMatch;
1222 q_updateTokensFromResponse;
1223 n_popResponseQueue;
1224 }
1225
1226 transition(S, Writeback_Owned, O) {
1227 w_assertIncomingDataAndCacheDataMatch;
1228 q_updateTokensFromResponse;
1229 h_updateFilterFromL1HintOrWB;
1230 n_popResponseQueue;
1231 }
1232
1233 transition(S, Data_All_Tokens, M) {
1234 w_assertIncomingDataAndCacheDataMatch;
1235 q_updateTokensFromResponse;
1236 n_popResponseQueue;
1237 }
1238
1239 transition(S, Writeback_All_Tokens, M) {
1240 w_assertIncomingDataAndCacheDataMatch;
1241 q_updateTokensFromResponse;
1242 h_updateFilterFromL1HintOrWB;
1243 n_popResponseQueue;
1244 }
1245
1246 transition(S, L1_GETX, I) {
1247 a_broadcastLocalRequest;
1248 tt_sendLocalAckWithCollectedTokens;
1249 r_markNewSharer;
1250 r_setMRU;
1251 uu_profileMiss;
1252 o_popL1RequestQueue;
1253 }
1254
1255
1256 transition(S, L1_GETS) {
1257 k_dataFromL2CacheToL1Requestor;
1258 r_markNewSharer;
1259 r_setMRU;
1260 o_popL1RequestQueue;
1261 }
1262
1263 transition(S, L1_GETS_Last_Token, I) {
1264
1265 k_dataFromL2CacheToL1Requestor;
1266 r_markNewSharer;
1267 r_setMRU;
1268 o_popL1RequestQueue;
1269 }
1270
1271 // Transitions from Owned
1272
1273 transition(O, L2_Replacement, I) {
1274 cc_dirtyReplacement;
1275 rr_deallocateL2CacheBlock;
1276 }
1277
1278 transition(O, Transient_GETX, I) {
1279 r_clearExclusive;
1280 dd_sendDataWithAllTokens;
1281 j_forwardTransientRequestToLocalSharers;
1282 m_popRequestQueue;
1283 }
1284
1285 transition(O, Persistent_GETX, I_L) {
1286 ee_sendDataWithAllTokens;
1287 l_popPersistentQueue;
1288 }
1289
1290 transition(O, Persistent_GETS, S_L) {
1291 ff_sendDataWithAllButOneTokens;
1292 l_popPersistentQueue;
1293 }
1294
1295 transition(O, Persistent_GETS_Last_Token, I_L) {
1296 fa_sendDataWithAllTokens;
1297 l_popPersistentQueue;
1298 }
1299
1300 transition(O, Transient_GETS) {
1301 // send multiple tokens
1302 r_clearExclusive;
1303 d_sendDataWithTokens;
1304 m_popRequestQueue;
1305 }
1306
1307 transition(O, Transient_GETS_Last_Token) {
1308 // WAIT FOR IT TO GO PERSISTENT
1309 r_clearExclusive;
1310 m_popRequestQueue;
1311 }
1312
1313 transition(O, Ack) {
1314 q_updateTokensFromResponse;
1315 n_popResponseQueue;
1316 }
1317
1318 transition(O, Ack_All_Tokens, M) {
1319 q_updateTokensFromResponse;
1320 n_popResponseQueue;
1321 }
1322
1323 transition(O, Data_Shared) {
1324 w_assertIncomingDataAndCacheDataMatch;
1325 q_updateTokensFromResponse;
1326 n_popResponseQueue;
1327 }
1328
1329
1330 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1331 w_assertIncomingDataAndCacheDataMatch;
1332 q_updateTokensFromResponse;
1333 h_updateFilterFromL1HintOrWB;
1334 n_popResponseQueue;
1335 }
1336
1337 transition(O, Data_All_Tokens, M) {
1338 w_assertIncomingDataAndCacheDataMatch;
1339 q_updateTokensFromResponse;
1340 n_popResponseQueue;
1341 }
1342
1343 transition(O, Writeback_All_Tokens, M) {
1344 w_assertIncomingDataAndCacheDataMatch;
1345 q_updateTokensFromResponse;
1346 h_updateFilterFromL1HintOrWB;
1347 n_popResponseQueue;
1348 }
1349
1350 transition(O, L1_GETS) {
1351 k_dataFromL2CacheToL1Requestor;
1352 r_markNewSharer;
1353 r_setMRU;
1354 o_popL1RequestQueue;
1355 }
1356
1357 transition(O, L1_GETS_Last_Token, I) {
1358 k_dataOwnerFromL2CacheToL1Requestor;
1359 r_markNewSharer;
1360 r_setMRU;
1361 o_popL1RequestQueue;
1362 }
1363
1364 transition(O, L1_GETX, I) {
1365 a_broadcastLocalRequest;
1366 k_dataAndAllTokensFromL2CacheToL1Requestor;
1367 r_markNewSharer;
1368 r_setMRU;
1369 uu_profileMiss;
1370 o_popL1RequestQueue;
1371 }
1372
1373 // Transitions from M
1374
1375 transition(M, L2_Replacement, I) {
1376 cc_dirtyReplacement;
1377 rr_deallocateL2CacheBlock;
1378 }
1379
1380 // MRM_DEBUG: Give up all tokens even for GETS? ???
1381 transition(M, {Transient_GETX, Transient_GETS}, I) {
1382 r_clearExclusive;
1383 dd_sendDataWithAllTokens;
1384 m_popRequestQueue;
1385 }
1386
1387 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1388 ee_sendDataWithAllTokens;
1389 l_popPersistentQueue;
1390 }
1391
1392
1393 transition(M, L1_GETS, O) {
1394 k_dataFromL2CacheToL1Requestor;
1395 r_markNewSharer;
1396 r_setMRU;
1397 o_popL1RequestQueue;
1398 }
1399
1400 transition(M, L1_GETX, I) {
1401 k_dataAndAllTokensFromL2CacheToL1Requestor;
1402 r_markNewSharer;
1403 r_setMRU;
1404 o_popL1RequestQueue;
1405 }
1406
1407
1408 //Transitions from locked states
1409
1410 transition({I_L, S_L}, Ack) {
1411 gg_bounceResponseToStarver;
1412 n_popResponseQueue;
1413 }
1414
1415 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1416 gg_bounceResponseToStarver;
1417 n_popResponseQueue;
1418 }
1419
1420 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1421 gg_bounceWBSharedToStarver;
1422 h_updateFilterFromL1HintOrWB;
1423 n_popResponseQueue;
1424 }
1425
1426 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1427 gg_bounceWBOwnedToStarver;
1428 h_updateFilterFromL1HintOrWB;
1429 n_popResponseQueue;
1430 }
1431
1432 transition(S_L, L2_Replacement, I) {
1433 c_cleanReplacement;
1434 rr_deallocateL2CacheBlock;
1435 }
1436
1437 transition(I_L, L2_Replacement, I) {
1438 rr_deallocateL2CacheBlock;
1439 }
1440
1441 transition(I_L, Own_Lock_or_Unlock, I) {
1442 l_popPersistentQueue;
1443 }
1444
1445 transition(S_L, Own_Lock_or_Unlock, S) {
1446 l_popPersistentQueue;
1447 }
1448
1449 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1450 r_clearExclusive;
1451 m_popRequestQueue;
1452 }
1453
1454 transition(I_L, {L1_GETX, L1_GETS}) {
1455 a_broadcastLocalRequest;
1456 r_markNewSharer;
1457 uu_profileMiss;
1458 o_popL1RequestQueue;
1459 }
1460
1461 transition(S_L, L1_GETX, I_L) {
1462 a_broadcastLocalRequest;
1463 tt_sendLocalAckWithCollectedTokens;
1464 r_markNewSharer;
1465 r_setMRU;
1466 uu_profileMiss;
1467 o_popL1RequestQueue;
1468 }
1469
1470 transition(S_L, L1_GETS) {
1471 k_dataFromL2CacheToL1Requestor;
1472 r_markNewSharer;
1473 r_setMRU;
1474 o_popL1RequestQueue;
1475 }
1476
1477 transition(S_L, L1_GETS_Last_Token, I_L) {
1478 k_dataFromL2CacheToL1Requestor;
1479 r_markNewSharer;
1480 r_setMRU;
1481 o_popL1RequestQueue;
1482 }
1483
1484 transition(S_L, Persistent_GETX, I_L) {
1485 e_sendAckWithCollectedTokens;
1486 l_popPersistentQueue;
1487 }
1488
1489 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1490 l_popPersistentQueue;
1491 }
1492
1493 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1494 l_popPersistentQueue;
1495 }
1496 }