bug fix for data_msg_size in network/Network.cc
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "Token protocol") {
36
37 // L2 BANK QUEUES
38 // From local bank of L2 cache TO the network
39 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false"; // this L2 bank -> a local L1
40 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> mod-directory
41 MessageBuffer responseFromL2Cache, network="To", virtual_network="2", ordered="false"; // this L2 bank -> a local L1 || mod-directory
42
43
44 // FROM the network to this local bank of L2 cache
45 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false"; // a local L1 -> this L2 bank
46 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="1", ordered="false"; // mod-directory -> this L2 bank
47 MessageBuffer responseToL2Cache, network="From", virtual_network="2", ordered="false"; // a local L1 || mod-directory -> this L2 bank
48 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true";
49
50 // STATES
51 enumeration(State, desc="L2 Cache states", default="L2Cache_State_I") {
52 // Base states
53 NP, desc="Not Present";
54 I, desc="Idle";
55 S, desc="Shared, not present in any local L1s";
56 O, desc="Owned, not present in any L1s";
57 M, desc="Modified, not present in any L1s";
58
59 // Locked states
60 I_L, "I^L", desc="Invalid, Locked";
61 S_L, "S^L", desc="Shared, Locked";
62 }
63
64 // EVENTS
65 enumeration(Event, desc="Cache events") {
66
67 // Requests
68 L1_GETS, desc="local L1 GETS request";
69 L1_GETS_Last_Token, desc="local L1 GETS request";
70 L1_GETX, desc="local L1 GETX request";
71 L1_INV, desc="L1 no longer has tokens";
72 Transient_GETX, desc="A GetX from another processor";
73 Transient_GETS, desc="A GetS from another processor";
74 Transient_GETS_Last_Token, desc="A GetS from another processor";
75
76 // events initiated by this L2
77 L2_Replacement, desc="L2 Replacement", format="!r";
78
79 // events of external L2 responses
80
81 // Responses
82 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
83 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
84 Writeback_All_Tokens, desc="Received a writeback from L1";
85 Writeback_Owned, desc="Received a writeback from L1";
86
87
88 Data_Shared, desc="Received a data message, we are now a sharer";
89 Data_Owner, desc="Received a data message, we are now the owner";
90 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
91 Ack, desc="Received an ack message";
92 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
93
94 // Lock/Unlock
95 Persistent_GETX, desc="Another processor has priority to read/write";
96 Persistent_GETS, desc="Another processor has priority to read";
97 Own_Lock_or_Unlock, desc="This processor now has priority";
98 }
99
100 // TYPES
101
102 // CacheEntry
103 structure(Entry, desc="...", interface="AbstractCacheEntry") {
104 State CacheState, desc="cache state";
105 bool Dirty, desc="Is the data dirty (different than memory)?";
106 int Tokens, desc="The number of tokens we're holding for the line";
107 DataBlock DataBlk, desc="data for the block";
108 }
109
110
111
112 structure(DirEntry, desc="...") {
113 Set Sharers, desc="Set of the internal processors that want the block in shared state";
114 bool exclusive, default="false", desc="if local exclusive is likely";
115 }
116
117 external_type(CacheMemory) {
118 bool cacheAvail(Address);
119 Address cacheProbe(Address);
120 void allocate(Address);
121 void deallocate(Address);
122 Entry lookup(Address);
123 void changePermission(Address, AccessPermission);
124 bool isTagPresent(Address);
125 void setMRU(Address);
126 }
127
128 external_type(PerfectCacheMemory) {
129 void allocate(Address);
130 void deallocate(Address);
131 DirEntry lookup(Address);
132 bool isTagPresent(Address);
133 }
134
135
136 CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)+"_L2"';
137
138 PersistentTable persistentTable, constructor_hack="i";
139 PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
140
141
142 bool getFilteringEnabled();
143
144 Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
145 if (L2cacheMemory.isTagPresent(addr)) {
146 return L2cacheMemory[addr];
147 }
148 }
149
150 int getTokens(Address addr) {
151 if (L2cacheMemory.isTagPresent(addr)) {
152 return L2cacheMemory[addr].Tokens;
153 } else {
154 return 0;
155 }
156 }
157
158 void changePermission(Address addr, AccessPermission permission) {
159 if (L2cacheMemory.isTagPresent(addr)) {
160 return L2cacheMemory.changePermission(addr, permission);
161 }
162 }
163
164 bool isCacheTagPresent(Address addr) {
165 return (L2cacheMemory.isTagPresent(addr) );
166 }
167
168 State getState(Address addr) {
169 if (isCacheTagPresent(addr)) {
170 return getL2CacheEntry(addr).CacheState;
171 } else if (persistentTable.isLocked(addr) == true) {
172 return State:I_L;
173 } else {
174 return State:NP;
175 }
176 }
177
178 string getStateStr(Address addr) {
179 return L2Cache_State_to_string(getState(addr));
180 }
181
182 void setState(Address addr, State state) {
183
184
185 if (isCacheTagPresent(addr)) {
186 // Make sure the token count is in range
187 assert(getL2CacheEntry(addr).Tokens >= 0);
188 assert(getL2CacheEntry(addr).Tokens <= max_tokens());
189
190 // Make sure we have no tokens in L
191 if ((state == State:I_L) ) {
192 if (isCacheTagPresent(addr)) {
193 assert(getL2CacheEntry(addr).Tokens == 0);
194 }
195 }
196
197 // in M and E you have all the tokens
198 if (state == State:M ) {
199 assert(getL2CacheEntry(addr).Tokens == max_tokens());
200 }
201
202 // in NP you have no tokens
203 if (state == State:NP) {
204 assert(getL2CacheEntry(addr).Tokens == 0);
205 }
206
207 // You have at least one token in S-like states
208 if (state == State:S ) {
209 assert(getL2CacheEntry(addr).Tokens > 0);
210 }
211
212 // You have at least half the token in O-like states
213 if (state == State:O ) {
214 assert(getL2CacheEntry(addr).Tokens >= 1); // Must have at least one token
215 // assert(getL2CacheEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
216 }
217
218 getL2CacheEntry(addr).CacheState := state;
219
220 // Set permission
221 if (state == State:I) {
222 changePermission(addr, AccessPermission:Invalid);
223 } else if (state == State:S || state == State:O ) {
224 changePermission(addr, AccessPermission:Read_Only);
225 } else if (state == State:M ) {
226 changePermission(addr, AccessPermission:Read_Write);
227 } else {
228 changePermission(addr, AccessPermission:Invalid);
229 }
230 }
231 }
232
233 void removeSharer(Address addr, NodeID id) {
234
235 if (localDirectory.isTagPresent(addr)) {
236 localDirectory[addr].Sharers.remove(id);
237 if (localDirectory[addr].Sharers.count() == 0) {
238 localDirectory.deallocate(addr);
239 }
240 }
241 }
242
243 bool sharersExist(Address addr) {
244 if (localDirectory.isTagPresent(addr)) {
245 if (localDirectory[addr].Sharers.count() > 0) {
246 return true;
247 }
248 else {
249 return false;
250 }
251 }
252 else {
253 return false;
254 }
255 }
256
257 bool exclusiveExists(Address addr) {
258 if (localDirectory.isTagPresent(addr)) {
259 if (localDirectory[addr].exclusive == true) {
260 return true;
261 }
262 else {
263 return false;
264 }
265 }
266 else {
267 return false;
268 }
269 }
270
271 // assumes that caller will check to make sure tag is present
272 Set getSharers(Address addr) {
273 return localDirectory[addr].Sharers;
274 }
275
276 void setNewWriter(Address addr, NodeID id) {
277 if (localDirectory.isTagPresent(addr) == false) {
278 localDirectory.allocate(addr);
279 }
280 localDirectory[addr].Sharers.clear();
281 localDirectory[addr].Sharers.add(id);
282 localDirectory[addr].exclusive := true;
283 }
284
285 void addNewSharer(Address addr, NodeID id) {
286 if (localDirectory.isTagPresent(addr) == false) {
287 localDirectory.allocate(addr);
288 }
289 localDirectory[addr].Sharers.add(id);
290 // localDirectory[addr].exclusive := false;
291 }
292
293 void clearExclusiveBitIfExists(Address addr) {
294 if (localDirectory.isTagPresent(addr) == true) {
295 localDirectory[addr].exclusive := false;
296 }
297 }
298
299 // ** OUT_PORTS **
300 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
301 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
302 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
303
304
305
306 // ** IN_PORTS **
307
308 // Persistent Network
309 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
310 if (persistentNetwork_in.isReady()) {
311 peek(persistentNetwork_in, PersistentMsg) {
312 assert(in_msg.Destination.isElement(machineID));
313
314 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
315 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
316 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
317 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
318 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
319 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
320 } else {
321 error("Unexpected message");
322 }
323
324 // React to the message based on the current state of the table
325 if (persistentTable.isLocked(in_msg.Address)) {
326
327 if (persistentTable.typeOfSmallest(in_msg.Address) == AccessType:Read) {
328 trigger(Event:Persistent_GETS, in_msg.Address);
329 } else {
330 trigger(Event:Persistent_GETX, in_msg.Address);
331 }
332 }
333 else {
334 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
335 }
336 }
337 }
338 }
339
340
341 // Request Network
342 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
343 if (requestNetwork_in.isReady()) {
344 peek(requestNetwork_in, RequestMsg) {
345 assert(in_msg.Destination.isElement(machineID));
346
347 if (in_msg.Type == CoherenceRequestType:GETX) {
348 trigger(Event:Transient_GETX, in_msg.Address);
349 } else if (in_msg.Type == CoherenceRequestType:GETS) {
350 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
351 trigger(Event:Transient_GETS_Last_Token, in_msg.Address);
352 }
353 else {
354 trigger(Event:Transient_GETS, in_msg.Address);
355 }
356 } else {
357 error("Unexpected message");
358 }
359 }
360 }
361 }
362
363 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
364 if (L1requestNetwork_in.isReady()) {
365 peek(L1requestNetwork_in, RequestMsg) {
366 assert(in_msg.Destination.isElement(machineID));
367 if (in_msg.Type == CoherenceRequestType:GETX) {
368 trigger(Event:L1_GETX, in_msg.Address);
369 } else if (in_msg.Type == CoherenceRequestType:GETS) {
370 if (L2cacheMemory.isTagPresent(in_msg.Address) && getL2CacheEntry(in_msg.Address).Tokens == 1) {
371 trigger(Event:L1_GETS_Last_Token, in_msg.Address);
372 }
373 else {
374 trigger(Event:L1_GETS, in_msg.Address);
375 }
376 } else {
377 error("Unexpected message");
378 }
379 }
380 }
381 }
382
383
384 // Response Network
385 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
386 if (responseNetwork_in.isReady()) {
387 peek(responseNetwork_in, ResponseMsg) {
388 assert(in_msg.Destination.isElement(machineID));
389 if (getTokens(in_msg.Address) + in_msg.Tokens != max_tokens()) {
390 if (in_msg.Type == CoherenceResponseType:ACK) {
391 trigger(Event:Ack, in_msg.Address);
392 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
393 trigger(Event:Data_Owner, in_msg.Address);
394 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
395 trigger(Event:Data_Shared, in_msg.Address);
396 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
397
398 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
399
400 // either room is available or the block is already present
401
402 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
403 assert(in_msg.Dirty == false);
404 trigger(Event:Writeback_Tokens, in_msg.Address);
405 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
406 assert(in_msg.Dirty == false);
407 trigger(Event:Writeback_Shared_Data, in_msg.Address);
408 }
409 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
410 //assert(in_msg.Dirty == false);
411 trigger(Event:Writeback_Owned, in_msg.Address);
412 }
413 }
414 else {
415 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
416 }
417 } else if (in_msg.Type == CoherenceResponseType:INV) {
418 trigger(Event:L1_INV, in_msg.Address);
419 } else {
420 error("Unexpected message");
421 }
422 } else {
423 if (in_msg.Type == CoherenceResponseType:ACK) {
424 trigger(Event:Ack_All_Tokens, in_msg.Address);
425 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:DATA_SHARED) {
426 trigger(Event:Data_All_Tokens, in_msg.Address);
427 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || in_msg.Type == CoherenceResponseType:WB_OWNED || in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
428 if (L2cacheMemory.cacheAvail(in_msg.Address) || L2cacheMemory.isTagPresent(in_msg.Address)) {
429
430 // either room is available or the block is already present
431
432 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
433 assert(in_msg.Dirty == false);
434 assert( (getState(in_msg.Address) != State:NP) && (getState(in_msg.Address) != State:I) );
435 trigger(Event:Writeback_All_Tokens, in_msg.Address);
436 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
437 assert(in_msg.Dirty == false);
438 trigger(Event:Writeback_All_Tokens, in_msg.Address);
439 }
440 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
441 trigger(Event:Writeback_All_Tokens, in_msg.Address);
442 }
443 }
444 else {
445 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
446 }
447 } else if (in_msg.Type == CoherenceResponseType:INV) {
448 trigger(Event:L1_INV, in_msg.Address);
449 } else {
450 DEBUG_EXPR(in_msg.Type);
451 error("Unexpected message");
452 }
453 }
454 }
455 }
456 }
457
458
459 // ACTIONS
460
461 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
462
463 peek(L1requestNetwork_in, RequestMsg) {
464
465 // if this is a retry or no local sharers, broadcast normally
466
467 // if (in_msg.RetryNum > 0 || (in_msg.Type == CoherenceRequestType:GETX && exclusiveExists(in_msg.Address) == false) || (in_msg.Type == CoherenceRequestType:GETS && sharersExist(in_msg.Address) == false)) {
468 enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
469 out_msg.Address := in_msg.Address;
470 out_msg.Type := in_msg.Type;
471 out_msg.Requestor := in_msg.Requestor;
472 out_msg.RequestorMachine := in_msg.RequestorMachine;
473 //out_msg.Destination.broadcast(MachineType:L2Cache);
474 out_msg.RetryNum := in_msg.RetryNum;
475 out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
476 out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
477 out_msg.Destination.add(map_Address_to_Directory(address));
478 out_msg.MessageSize := MessageSizeType:Request_Control;
479 out_msg.AccessMode := in_msg.AccessMode;
480 out_msg.Prefetch := in_msg.Prefetch;
481 } //enqueue
482 // } // if
483
484 //profile_filter_action(0);
485 } // peek
486 } //action
487
488
489 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
490 peek(responseNetwork_in, ResponseMsg) {
491 // FIXME, should use a 3rd vnet
492 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
493 out_msg.Address := address;
494 out_msg.Type := in_msg.Type;
495 out_msg.Sender := machineID;
496 out_msg.SenderMachine := MachineType:L2Cache;
497 out_msg.Destination.add(map_Address_to_Directory(address));
498 out_msg.Tokens := in_msg.Tokens;
499 out_msg.MessageSize := in_msg.MessageSize;
500 out_msg.DataBlk := in_msg.DataBlk;
501 out_msg.Dirty := in_msg.Dirty;
502 }
503 }
504 }
505
506 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
507 if (getL2CacheEntry(address).Tokens > 0) {
508 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
509 out_msg.Address := address;
510 out_msg.Type := CoherenceResponseType:ACK;
511 out_msg.Sender := machineID;
512 out_msg.SenderMachine := MachineType:L2Cache;
513 out_msg.Destination.add(map_Address_to_Directory(address));
514 out_msg.Tokens := getL2CacheEntry(address).Tokens;
515 out_msg.MessageSize := MessageSizeType:Writeback_Control;
516 }
517 getL2CacheEntry(address).Tokens := 0;
518 }
519 }
520
521 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
522 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
523 out_msg.Address := address;
524 out_msg.Sender := machineID;
525 out_msg.SenderMachine := MachineType:L2Cache;
526 out_msg.Destination.add(map_Address_to_Directory(address));
527 out_msg.Tokens := getL2CacheEntry(address).Tokens;
528 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
529 out_msg.Dirty := getL2CacheEntry(address).Dirty;
530
531 if (getL2CacheEntry(address).Dirty) {
532 out_msg.MessageSize := MessageSizeType:Writeback_Data;
533 out_msg.Type := CoherenceResponseType:DATA_OWNER;
534 } else {
535 out_msg.MessageSize := MessageSizeType:Writeback_Control;
536 out_msg.Type := CoherenceResponseType:ACK_OWNER;
537 }
538 }
539 getL2CacheEntry(address).Tokens := 0;
540 }
541
542 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
543 peek(requestNetwork_in, RequestMsg) {
544 if (getL2CacheEntry(address).Tokens > N_tokens()) {
545 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
546 out_msg.Address := address;
547 out_msg.Type := CoherenceResponseType:DATA_SHARED;
548 out_msg.Sender := machineID;
549 out_msg.SenderMachine := MachineType:L2Cache;
550 out_msg.Destination.add(in_msg.Requestor);
551 out_msg.Tokens := N_tokens();
552 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
553 out_msg.Dirty := false;
554 out_msg.MessageSize := MessageSizeType:Response_Data;
555 }
556 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - N_tokens();
557 }
558 else {
559 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
560 out_msg.Address := address;
561 out_msg.Type := CoherenceResponseType:DATA_SHARED;
562 out_msg.Sender := machineID;
563 out_msg.SenderMachine := MachineType:L2Cache;
564 out_msg.Destination.add(in_msg.Requestor);
565 out_msg.Tokens := 1;
566 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
567 out_msg.Dirty := false;
568 out_msg.MessageSize := MessageSizeType:Response_Data;
569 }
570 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
571 }
572 }
573 }
574
575 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
576 peek(requestNetwork_in, RequestMsg) {
577 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
578 out_msg.Address := address;
579 out_msg.Type := CoherenceResponseType:DATA_OWNER;
580 out_msg.Sender := machineID;
581 out_msg.SenderMachine := MachineType:L2Cache;
582 out_msg.Destination.add(in_msg.Requestor);
583 assert(getL2CacheEntry(address).Tokens >= 1);
584 out_msg.Tokens := getL2CacheEntry(address).Tokens;
585 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
586 out_msg.Dirty := getL2CacheEntry(address).Dirty;
587 out_msg.MessageSize := MessageSizeType:Response_Data;
588 }
589 }
590 getL2CacheEntry(address).Tokens := 0;
591 }
592
593 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
594 if (getL2CacheEntry(address).Tokens > 0) {
595 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
596 out_msg.Address := address;
597 out_msg.Type := CoherenceResponseType:ACK;
598 out_msg.Sender := machineID;
599 out_msg.SenderMachine := MachineType:L2Cache;
600 out_msg.Destination.add(persistentTable.findSmallest(address));
601 assert(getL2CacheEntry(address).Tokens >= 1);
602 out_msg.Tokens := getL2CacheEntry(address).Tokens;
603 out_msg.MessageSize := MessageSizeType:Response_Control;
604 }
605 }
606 getL2CacheEntry(address).Tokens := 0;
607 }
608
609 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
610 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
611 out_msg.Address := address;
612 out_msg.Type := CoherenceResponseType:DATA_OWNER;
613 out_msg.Sender := machineID;
614 out_msg.SenderMachine := MachineType:L2Cache;
615 out_msg.Destination.add(persistentTable.findSmallest(address));
616 assert(getL2CacheEntry(address).Tokens >= 1);
617 out_msg.Tokens := getL2CacheEntry(address).Tokens;
618 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
619 out_msg.Dirty := getL2CacheEntry(address).Dirty;
620 out_msg.MessageSize := MessageSizeType:Response_Data;
621 }
622 getL2CacheEntry(address).Tokens := 0;
623 }
624
625 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
626 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
627 assert(getL2CacheEntry(address).Tokens > 0);
628 if (getL2CacheEntry(address).Tokens > 1) {
629 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
630 out_msg.Address := address;
631 out_msg.Type := CoherenceResponseType:ACK;
632 out_msg.Sender := machineID;
633 out_msg.SenderMachine := MachineType:L2Cache;
634 out_msg.Destination.add(persistentTable.findSmallest(address));
635 assert(getL2CacheEntry(address).Tokens >= 1);
636 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
637 out_msg.MessageSize := MessageSizeType:Response_Control;
638 }
639 }
640 getL2CacheEntry(address).Tokens := 1;
641 }
642
643 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
644 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
645 assert(getL2CacheEntry(address).Tokens > 0);
646 if (getL2CacheEntry(address).Tokens > 1) {
647 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
648 out_msg.Address := address;
649 out_msg.Type := CoherenceResponseType:DATA_OWNER;
650 out_msg.Sender := machineID;
651 out_msg.SenderMachine := MachineType:L2Cache;
652 out_msg.Destination.add(persistentTable.findSmallest(address));
653 assert(getL2CacheEntry(address).Tokens >= 1);
654 out_msg.Tokens := getL2CacheEntry(address).Tokens - 1;
655 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
656 out_msg.Dirty := getL2CacheEntry(address).Dirty;
657 out_msg.MessageSize := MessageSizeType:Response_Data;
658 }
659 getL2CacheEntry(address).Tokens := 1;
660 }
661 }
662
663
664
665 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
666 // assert(persistentTable.isLocked(address));
667 peek(responseNetwork_in, ResponseMsg) {
668 // FIXME, should use a 3rd vnet in some cases
669 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
670 out_msg.Address := address;
671 out_msg.Type := in_msg.Type;
672 out_msg.Sender := machineID;
673 out_msg.SenderMachine := MachineType:L2Cache;
674 out_msg.Destination.add(persistentTable.findSmallest(address));
675 out_msg.Tokens := in_msg.Tokens;
676 out_msg.DataBlk := in_msg.DataBlk;
677 out_msg.Dirty := in_msg.Dirty;
678 out_msg.MessageSize := in_msg.MessageSize;
679 }
680 }
681 }
682
683 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
684 //assert(persistentTable.isLocked(address));
685 peek(responseNetwork_in, ResponseMsg) {
686 // FIXME, should use a 3rd vnet in some cases
687 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
688 out_msg.Address := address;
689 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
690 out_msg.Type := CoherenceResponseType:DATA_SHARED;
691 } else {
692 out_msg.Type := CoherenceResponseType:ACK;
693 }
694 out_msg.Sender := machineID;
695 out_msg.SenderMachine := MachineType:L2Cache;
696 out_msg.Destination.add(persistentTable.findSmallest(address));
697 out_msg.Tokens := in_msg.Tokens;
698 out_msg.DataBlk := in_msg.DataBlk;
699 out_msg.Dirty := in_msg.Dirty;
700 out_msg.MessageSize := in_msg.MessageSize;
701 }
702 }
703 }
704
705 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
706 // assert(persistentTable.isLocked(address));
707 peek(responseNetwork_in, ResponseMsg) {
708 // FIXME, should use a 3rd vnet in some cases
709 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
710 out_msg.Address := address;
711 out_msg.Type := CoherenceResponseType:DATA_OWNER;
712 out_msg.Sender := machineID;
713 out_msg.SenderMachine := MachineType:L2Cache;
714 out_msg.Destination.add(persistentTable.findSmallest(address));
715 out_msg.Tokens := in_msg.Tokens;
716 out_msg.DataBlk := in_msg.DataBlk;
717 out_msg.Dirty := in_msg.Dirty;
718 out_msg.MessageSize := in_msg.MessageSize;
719 }
720 }
721 }
722
723
724 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
725 peek(responseNetwork_in, ResponseMsg) {
726 removeSharer(in_msg.Address, machineIDToNodeID(in_msg.Sender));
727 }
728 }
729
730 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
731 peek(requestNetwork_in, RequestMsg) {
732 if (getFilteringEnabled() == true && in_msg.RetryNum == 0 && sharersExist(in_msg.Address) == false) {
733 profile_filter_action(1);
734 DEBUG_EXPR("filtered message");
735 DEBUG_EXPR(in_msg.RetryNum);
736 }
737 else {
738 enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
739 out_msg.Address := in_msg.Address;
740 out_msg.Requestor := in_msg.Requestor;
741 out_msg.RequestorMachine := in_msg.RequestorMachine;
742 out_msg.Destination := getLocalL1IDs(machineID);
743 out_msg.Type := in_msg.Type;
744 out_msg.isLocal := false;
745 out_msg.MessageSize := MessageSizeType:Request_Control;
746 out_msg.AccessMode := in_msg.AccessMode;
747 out_msg.Prefetch := in_msg.Prefetch;
748 }
749 profile_filter_action(0);
750 }
751 }
752 }
753
754
755 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
756 peek(L1requestNetwork_in, RequestMsg) {
757 assert(getL2CacheEntry(address).Tokens > 0);
758 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
759 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
760 out_msg.Address := address;
761 out_msg.Type := CoherenceResponseType:DATA_SHARED;
762 out_msg.Sender := machineID;
763 out_msg.SenderMachine := MachineType:L2Cache;
764 out_msg.Destination.add(in_msg.Requestor);
765 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
766 out_msg.Dirty := false;
767 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
768 out_msg.Tokens := 1;
769 }
770 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
771 }
772 }
773
774 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
775 peek(L1requestNetwork_in, RequestMsg) {
776 assert(getL2CacheEntry(address).Tokens > 0);
777 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
778 out_msg.Address := address;
779 out_msg.Type := CoherenceResponseType:DATA_OWNER;
780 out_msg.Sender := machineID;
781 out_msg.SenderMachine := MachineType:L2Cache;
782 out_msg.Destination.add(in_msg.Requestor);
783 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
784 out_msg.Dirty := getL2CacheEntry(address).Dirty;
785 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
786 out_msg.Tokens := 1;
787 }
788 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens - 1;
789 }
790 }
791
792 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
793 peek(L1requestNetwork_in, RequestMsg) {
794 // assert(getL2CacheEntry(address).Tokens == max_tokens());
795 //enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_to_L1_RESPONSE_LATENCY") {
796 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
797 out_msg.Address := address;
798 out_msg.Type := CoherenceResponseType:DATA_OWNER;
799 out_msg.Sender := machineID;
800 out_msg.SenderMachine := MachineType:L2Cache;
801 out_msg.Destination.add(in_msg.Requestor);
802 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
803 out_msg.Dirty := getL2CacheEntry(address).Dirty;
804 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
805 //out_msg.Tokens := max_tokens();
806 out_msg.Tokens := getL2CacheEntry(address).Tokens;
807 }
808 getL2CacheEntry(address).Tokens := 0;
809 }
810 }
811
812 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
813 persistentNetwork_in.dequeue();
814 }
815
816 action(m_popRequestQueue, "m", desc="Pop request queue.") {
817 requestNetwork_in.dequeue();
818 }
819
820 action(n_popResponseQueue, "n", desc="Pop response queue") {
821 responseNetwork_in.dequeue();
822 }
823
824 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
825 L1requestNetwork_in.dequeue();
826 }
827
828
829 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
830 peek(responseNetwork_in, ResponseMsg) {
831 assert(in_msg.Tokens != 0);
832 getL2CacheEntry(address).Tokens := getL2CacheEntry(address).Tokens + in_msg.Tokens;
833
834 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
835 // may not trigger this action.
836 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
837 getL2CacheEntry(address).Dirty := true;
838 }
839 }
840 }
841
842 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
843
844 peek(L1requestNetwork_in, RequestMsg) {
845 if (in_msg.Type == CoherenceRequestType:GETX) {
846 setNewWriter(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
847 } else if (in_msg.Type == CoherenceRequestType:GETS) {
848 addNewSharer(in_msg.Address, machineIDToNodeID(in_msg.Requestor));
849 }
850 }
851 }
852
853 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
854 clearExclusiveBitIfExists(address);
855 }
856
857 action( r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
858 if(isCacheTagPresent(address)) {
859 L2cacheMemory.setMRU(address);
860 }
861 }
862
863 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
864 if (getL2CacheEntry(address).Tokens > 0) {
865 peek(requestNetwork_in, RequestMsg) {
866 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
867 out_msg.Address := address;
868 out_msg.Type := CoherenceResponseType:ACK;
869 out_msg.Sender := machineID;
870 out_msg.SenderMachine := MachineType:L2Cache;
871 out_msg.Destination.add(in_msg.Requestor);
872 assert(getL2CacheEntry(address).Tokens >= 1);
873 out_msg.Tokens := getL2CacheEntry(address).Tokens;
874 out_msg.MessageSize := MessageSizeType:Response_Control;
875 }
876 }
877 }
878 getL2CacheEntry(address).Tokens := 0;
879 }
880
881 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
882 if (getL2CacheEntry(address).Tokens > 0) {
883 peek(L1requestNetwork_in, RequestMsg) {
884 enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
885 out_msg.Address := address;
886 out_msg.Type := CoherenceResponseType:ACK;
887 out_msg.Sender := machineID;
888 out_msg.SenderMachine := MachineType:L2Cache;
889 out_msg.Destination.add(in_msg.Requestor);
890 assert(getL2CacheEntry(address).Tokens >= 1);
891 out_msg.Tokens := getL2CacheEntry(address).Tokens;
892 out_msg.MessageSize := MessageSizeType:Response_Control;
893 }
894 }
895 }
896 getL2CacheEntry(address).Tokens := 0;
897 }
898
899 action(u_writeDataToCache, "u", desc="Write data to cache") {
900 peek(responseNetwork_in, ResponseMsg) {
901 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
902 if ((getL2CacheEntry(address).Dirty == false) && in_msg.Dirty) {
903 getL2CacheEntry(address).Dirty := in_msg.Dirty;
904 }
905 }
906 }
907
908 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
909 L2cacheMemory.allocate(address);
910 }
911
912 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
913 L2cacheMemory.deallocate(address);
914 }
915
916 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
917 peek(L1requestNetwork_in, RequestMsg) {
918 // AccessModeType not implemented
919 //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
920 }
921 }
922
923
924 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
925 peek(responseNetwork_in, ResponseMsg) {
926 assert(getL2CacheEntry(address).DataBlk == in_msg.DataBlk);
927 }
928 }
929
930 action(z_stall, "z", desc="Stall") {
931 }
932
933
934
935
936 //*****************************************************
937 // TRANSITIONS
938 //*****************************************************
939
940 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
941
942 h_updateFilterFromL1HintOrWB;
943 n_popResponseQueue;
944 }
945
946 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
947 l_popPersistentQueue;
948 }
949
950
951 // Transitions from NP
952
953 transition(NP, {Transient_GETX, Transient_GETS}) {
954 // forward message to local sharers
955 r_clearExclusive;
956 j_forwardTransientRequestToLocalSharers;
957 m_popRequestQueue;
958 }
959
960
961 transition(NP, {L1_GETS, L1_GETX}) {
962 a_broadcastLocalRequest;
963 r_markNewSharer;
964 uu_profileMiss;
965 o_popL1RequestQueue;
966 }
967
968 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
969 bb_bounceResponse;
970 n_popResponseQueue;
971 }
972
973 transition(NP, Writeback_Shared_Data, S) {
974 vv_allocateL2CacheBlock;
975 u_writeDataToCache;
976 q_updateTokensFromResponse;
977 h_updateFilterFromL1HintOrWB;
978 n_popResponseQueue;
979 }
980
981 transition(NP, Writeback_Tokens, I) {
982 vv_allocateL2CacheBlock;
983 q_updateTokensFromResponse;
984 h_updateFilterFromL1HintOrWB;
985 n_popResponseQueue;
986 }
987
988 transition(NP, Writeback_All_Tokens, M) {
989 vv_allocateL2CacheBlock;
990 u_writeDataToCache;
991 q_updateTokensFromResponse;
992 h_updateFilterFromL1HintOrWB;
993 n_popResponseQueue;
994 }
995
996 transition(NP, Writeback_Owned, O) {
997 vv_allocateL2CacheBlock;
998 u_writeDataToCache;
999 q_updateTokensFromResponse;
1000 h_updateFilterFromL1HintOrWB;
1001 n_popResponseQueue;
1002 }
1003
1004
1005 transition(NP, {Persistent_GETX, Persistent_GETS}, I_L) {
1006 l_popPersistentQueue;
1007 }
1008
1009 // Transitions from Idle
1010
1011 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1012 a_broadcastLocalRequest;
1013 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1014 r_markNewSharer;
1015 uu_profileMiss;
1016 o_popL1RequestQueue;
1017 }
1018
1019 transition(I, L1_GETX) {
1020 a_broadcastLocalRequest;
1021 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1022 r_markNewSharer;
1023 uu_profileMiss;
1024 o_popL1RequestQueue;
1025 }
1026
1027 transition(I, L2_Replacement) {
1028 c_cleanReplacement; // Only needed in some cases
1029 rr_deallocateL2CacheBlock;
1030 }
1031
1032 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1033 r_clearExclusive;
1034 t_sendAckWithCollectedTokens;
1035 j_forwardTransientRequestToLocalSharers;
1036 m_popRequestQueue;
1037 }
1038
1039 transition(I, {Persistent_GETX, Persistent_GETS}, I_L) {
1040 e_sendAckWithCollectedTokens;
1041 l_popPersistentQueue;
1042 }
1043
1044
1045 transition(I, Ack) {
1046 q_updateTokensFromResponse;
1047 n_popResponseQueue;
1048 }
1049
1050 transition(I, Data_Shared, S) {
1051 u_writeDataToCache;
1052 q_updateTokensFromResponse;
1053 n_popResponseQueue;
1054 }
1055
1056 transition(I, Writeback_Shared_Data, S) {
1057 u_writeDataToCache;
1058 q_updateTokensFromResponse;
1059 h_updateFilterFromL1HintOrWB;
1060 n_popResponseQueue;
1061 }
1062
1063 transition(I, Writeback_Tokens) {
1064 q_updateTokensFromResponse;
1065 h_updateFilterFromL1HintOrWB;
1066 n_popResponseQueue;
1067 }
1068
1069 transition(I, Data_Owner, O) {
1070 u_writeDataToCache;
1071 q_updateTokensFromResponse;
1072 n_popResponseQueue;
1073 }
1074
1075 transition(I, Writeback_Owned, O) {
1076 u_writeDataToCache;
1077 q_updateTokensFromResponse;
1078 h_updateFilterFromL1HintOrWB;
1079 n_popResponseQueue;
1080 }
1081
1082 transition(I, Data_All_Tokens, M) {
1083 u_writeDataToCache;
1084 q_updateTokensFromResponse;
1085 n_popResponseQueue;
1086 }
1087
1088
1089 transition(I, Writeback_All_Tokens, M) {
1090 u_writeDataToCache;
1091 q_updateTokensFromResponse;
1092 h_updateFilterFromL1HintOrWB;
1093 n_popResponseQueue;
1094 }
1095
1096 // Transitions from Shared
1097
1098 transition(S, L2_Replacement, I) {
1099 c_cleanReplacement;
1100 rr_deallocateL2CacheBlock;
1101 }
1102
1103 transition(S, Transient_GETX, I) {
1104 r_clearExclusive;
1105 t_sendAckWithCollectedTokens;
1106 j_forwardTransientRequestToLocalSharers;
1107 m_popRequestQueue;
1108 }
1109
1110 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1111 j_forwardTransientRequestToLocalSharers;
1112 r_clearExclusive;
1113 m_popRequestQueue;
1114 }
1115
1116 transition(S, Persistent_GETX, I_L) {
1117 e_sendAckWithCollectedTokens;
1118 l_popPersistentQueue;
1119 }
1120
1121
1122 transition(S, Persistent_GETS, S_L) {
1123 f_sendAckWithAllButOneTokens;
1124 l_popPersistentQueue;
1125 }
1126
1127
1128 transition(S, Ack) {
1129 q_updateTokensFromResponse;
1130 n_popResponseQueue;
1131 }
1132
1133 transition(S, Data_Shared) {
1134 w_assertIncomingDataAndCacheDataMatch;
1135 q_updateTokensFromResponse;
1136 n_popResponseQueue;
1137 }
1138
1139 transition(S, Writeback_Tokens) {
1140 q_updateTokensFromResponse;
1141 h_updateFilterFromL1HintOrWB;
1142 n_popResponseQueue;
1143 }
1144
1145 transition(S, Writeback_Shared_Data) {
1146 w_assertIncomingDataAndCacheDataMatch;
1147 q_updateTokensFromResponse;
1148 h_updateFilterFromL1HintOrWB;
1149 n_popResponseQueue;
1150 }
1151
1152
1153 transition(S, Data_Owner, O) {
1154 w_assertIncomingDataAndCacheDataMatch;
1155 q_updateTokensFromResponse;
1156 n_popResponseQueue;
1157 }
1158
1159 transition(S, Writeback_Owned, O) {
1160 w_assertIncomingDataAndCacheDataMatch;
1161 q_updateTokensFromResponse;
1162 h_updateFilterFromL1HintOrWB;
1163 n_popResponseQueue;
1164 }
1165
1166 transition(S, Data_All_Tokens, M) {
1167 w_assertIncomingDataAndCacheDataMatch;
1168 q_updateTokensFromResponse;
1169 n_popResponseQueue;
1170 }
1171
1172 transition(S, Writeback_All_Tokens, M) {
1173 w_assertIncomingDataAndCacheDataMatch;
1174 q_updateTokensFromResponse;
1175 h_updateFilterFromL1HintOrWB;
1176 n_popResponseQueue;
1177 }
1178
1179 transition(S, L1_GETX, I) {
1180 a_broadcastLocalRequest;
1181 tt_sendLocalAckWithCollectedTokens;
1182 r_markNewSharer;
1183 r_setMRU;
1184 uu_profileMiss;
1185 o_popL1RequestQueue;
1186 }
1187
1188
1189 transition(S, L1_GETS) {
1190 k_dataFromL2CacheToL1Requestor;
1191 r_markNewSharer;
1192 r_setMRU;
1193 o_popL1RequestQueue;
1194 }
1195
1196 transition(S, L1_GETS_Last_Token, I) {
1197
1198 k_dataFromL2CacheToL1Requestor;
1199 r_markNewSharer;
1200 r_setMRU;
1201 o_popL1RequestQueue;
1202 }
1203
1204 // Transitions from Owned
1205
1206 transition(O, L2_Replacement, I) {
1207 cc_dirtyReplacement;
1208 rr_deallocateL2CacheBlock;
1209 }
1210
1211 transition(O, Transient_GETX, I) {
1212 r_clearExclusive;
1213 dd_sendDataWithAllTokens;
1214 j_forwardTransientRequestToLocalSharers;
1215 m_popRequestQueue;
1216 }
1217
1218 transition(O, Persistent_GETX, I_L) {
1219 ee_sendDataWithAllTokens;
1220 l_popPersistentQueue;
1221 }
1222
1223 transition(O, Persistent_GETS, S_L) {
1224 ff_sendDataWithAllButOneTokens;
1225 l_popPersistentQueue;
1226 }
1227
1228 transition(O, Transient_GETS) {
1229 // send multiple tokens
1230 r_clearExclusive;
1231 d_sendDataWithTokens;
1232 m_popRequestQueue;
1233 }
1234
1235 transition(O, Transient_GETS_Last_Token) {
1236 // WAIT FOR IT TO GO PERSISTENT
1237 r_clearExclusive;
1238 m_popRequestQueue;
1239 }
1240
1241 transition(O, Ack) {
1242 q_updateTokensFromResponse;
1243 n_popResponseQueue;
1244 }
1245
1246 transition(O, Ack_All_Tokens, M) {
1247 q_updateTokensFromResponse;
1248 n_popResponseQueue;
1249 }
1250
1251 transition(O, Data_Shared) {
1252 w_assertIncomingDataAndCacheDataMatch;
1253 q_updateTokensFromResponse;
1254 n_popResponseQueue;
1255 }
1256
1257
1258 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1259 w_assertIncomingDataAndCacheDataMatch;
1260 q_updateTokensFromResponse;
1261 h_updateFilterFromL1HintOrWB;
1262 n_popResponseQueue;
1263 }
1264
1265 transition(O, Data_All_Tokens, M) {
1266 w_assertIncomingDataAndCacheDataMatch;
1267 q_updateTokensFromResponse;
1268 n_popResponseQueue;
1269 }
1270
1271 transition(O, Writeback_All_Tokens, M) {
1272 w_assertIncomingDataAndCacheDataMatch;
1273 q_updateTokensFromResponse;
1274 h_updateFilterFromL1HintOrWB;
1275 n_popResponseQueue;
1276 }
1277
1278 transition(O, L1_GETS) {
1279 k_dataFromL2CacheToL1Requestor;
1280 r_markNewSharer;
1281 r_setMRU;
1282 o_popL1RequestQueue;
1283 }
1284
1285 transition(O, L1_GETS_Last_Token, I) {
1286 k_dataOwnerFromL2CacheToL1Requestor;
1287 r_markNewSharer;
1288 r_setMRU;
1289 o_popL1RequestQueue;
1290 }
1291
1292 transition(O, L1_GETX, I) {
1293 a_broadcastLocalRequest;
1294 k_dataAndAllTokensFromL2CacheToL1Requestor;
1295 r_markNewSharer;
1296 r_setMRU;
1297 uu_profileMiss;
1298 o_popL1RequestQueue;
1299 }
1300
1301 // Transitions from M
1302
1303 transition(M, L2_Replacement, I) {
1304 cc_dirtyReplacement;
1305 rr_deallocateL2CacheBlock;
1306 }
1307
1308 // MRM_DEBUG: Give up all tokens even for GETS? ???
1309 transition(M, {Transient_GETX, Transient_GETS}, I) {
1310 r_clearExclusive;
1311 dd_sendDataWithAllTokens;
1312 m_popRequestQueue;
1313 }
1314
1315 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1316 ee_sendDataWithAllTokens;
1317 l_popPersistentQueue;
1318 }
1319
1320
1321 transition(M, L1_GETS, O) {
1322 k_dataFromL2CacheToL1Requestor;
1323 r_markNewSharer;
1324 r_setMRU;
1325 o_popL1RequestQueue;
1326 }
1327
1328 transition(M, L1_GETX, I) {
1329 k_dataAndAllTokensFromL2CacheToL1Requestor;
1330 r_markNewSharer;
1331 r_setMRU;
1332 o_popL1RequestQueue;
1333 }
1334
1335
1336 //Transitions from locked states
1337
1338 transition({I_L, S_L}, Ack) {
1339 gg_bounceResponseToStarver;
1340 n_popResponseQueue;
1341 }
1342
1343 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1344 gg_bounceResponseToStarver;
1345 n_popResponseQueue;
1346 }
1347
1348 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1349 gg_bounceWBSharedToStarver;
1350 h_updateFilterFromL1HintOrWB;
1351 n_popResponseQueue;
1352 }
1353
1354 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1355 gg_bounceWBOwnedToStarver;
1356 h_updateFilterFromL1HintOrWB;
1357 n_popResponseQueue;
1358 }
1359
1360 transition(S_L, L2_Replacement, I) {
1361 c_cleanReplacement;
1362 rr_deallocateL2CacheBlock;
1363 }
1364
1365 transition(I_L, L2_Replacement, I) {
1366 rr_deallocateL2CacheBlock;
1367 }
1368
1369 transition(I_L, Own_Lock_or_Unlock, I) {
1370 l_popPersistentQueue;
1371 }
1372
1373 transition(S_L, Own_Lock_or_Unlock, S) {
1374 l_popPersistentQueue;
1375 }
1376
1377 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1378 r_clearExclusive;
1379 m_popRequestQueue;
1380 }
1381
1382 transition(I_L, {L1_GETX, L1_GETS}) {
1383 a_broadcastLocalRequest;
1384 r_markNewSharer;
1385 uu_profileMiss;
1386 o_popL1RequestQueue;
1387 }
1388
1389 transition(S_L, L1_GETX, I_L) {
1390 a_broadcastLocalRequest;
1391 tt_sendLocalAckWithCollectedTokens;
1392 r_markNewSharer;
1393 r_setMRU;
1394 uu_profileMiss;
1395 o_popL1RequestQueue;
1396 }
1397
1398 transition(S_L, L1_GETS) {
1399 k_dataFromL2CacheToL1Requestor;
1400 r_markNewSharer;
1401 r_setMRU;
1402 o_popL1RequestQueue;
1403 }
1404
1405 transition(S_L, L1_GETS_Last_Token, I_L) {
1406 k_dataFromL2CacheToL1Requestor;
1407 r_markNewSharer;
1408 r_setMRU;
1409 o_popL1RequestQueue;
1410 }
1411
1412 transition(S_L, Persistent_GETX, I_L) {
1413 e_sendAckWithCollectedTokens;
1414 l_popPersistentQueue;
1415 }
1416
1417 transition(S_L, Persistent_GETS) {
1418 l_popPersistentQueue;
1419 }
1420
1421 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1422 l_popPersistentQueue;
1423 }
1424 }