mem-ruby: Use check_on_cache_probe on MOESI hammer
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:L2Cache, "Token protocol")
30 : CacheMemory * L2cache;
31 int N_tokens;
32 Cycles l2_request_latency := 5;
33 Cycles l2_response_latency := 5;
34 bool filtering_enabled := "True";
35
36 // L2 BANK QUEUES
37 // From local bank of L2 cache TO the network
38
39 // this L2 bank -> a local L1 || mod-directory
40 MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
41 vnet_type="response";
42 // this L2 bank -> mod-directory
43 MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
44 vnet_type="request";
45 // this L2 bank -> a local L1
46 MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
47 vnet_type="request";
48
49
50 // FROM the network to this local bank of L2 cache
51
52 // a local L1 || mod-directory -> this L2 bank
53 MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
54 vnet_type="response";
55 MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
56 vnet_type="persistent";
57 // mod-directory -> this L2 bank
58 MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
59 vnet_type="request";
60 // a local L1 -> this L2 bank
61 MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
62 vnet_type="request";
63
64 {
65 // STATES
66 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
67 // Base states
68 NP, AccessPermission:Invalid, desc="Not Present";
69 I, AccessPermission:Invalid, desc="Idle";
70 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
71 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
72 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
73
74 // Locked states
75 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
76 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
77 }
78
79 // EVENTS
80 enumeration(Event, desc="Cache events") {
81
82 // Requests
83 L1_GETS, desc="local L1 GETS request";
84 L1_GETS_Last_Token, desc="local L1 GETS request";
85 L1_GETX, desc="local L1 GETX request";
86 L1_INV, desc="L1 no longer has tokens";
87 Transient_GETX, desc="A GetX from another processor";
88 Transient_GETS, desc="A GetS from another processor";
89 Transient_GETS_Last_Token, desc="A GetS from another processor";
90
91 // events initiated by this L2
92 L2_Replacement, desc="L2 Replacement", format="!r";
93
94 // events of external L2 responses
95
96 // Responses
97 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
98 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
99 Writeback_All_Tokens, desc="Received a writeback from L1";
100 Writeback_Owned, desc="Received a writeback from L1";
101
102
103 Data_Shared, desc="Received a data message, we are now a sharer";
104 Data_Owner, desc="Received a data message, we are now the owner";
105 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
106 Ack, desc="Received an ack message";
107 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
108
109 // Lock/Unlock
110 Persistent_GETX, desc="Another processor has priority to read/write";
111 Persistent_GETS, desc="Another processor has priority to read";
112 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
113 Own_Lock_or_Unlock, desc="This processor now has priority";
114 }
115
116 // TYPES
117
118 // CacheEntry
119 structure(Entry, desc="...", interface="AbstractCacheEntry") {
120 State CacheState, desc="cache state";
121 bool Dirty, desc="Is the data dirty (different than memory)?";
122 int Tokens, desc="The number of tokens we're holding for the line";
123 DataBlock DataBlk, desc="data for the block";
124 }
125
126 structure(DirEntry, desc="...", interface="AbstractEntry") {
127 Set Sharers, desc="Set of the internal processors that want the block in shared state";
128 bool exclusive, default="false", desc="if local exclusive is likely";
129 }
130
131 structure(PerfectCacheMemory, external="yes") {
132 void allocate(Addr);
133 void deallocate(Addr);
134 DirEntry lookup(Addr);
135 bool isTagPresent(Addr);
136 }
137
138 structure(PersistentTable, external="yes") {
139 void persistentRequestLock(Addr, MachineID, AccessType);
140 void persistentRequestUnlock(Addr, MachineID);
141 MachineID findSmallest(Addr);
142 AccessType typeOfSmallest(Addr);
143 void markEntries(Addr);
144 bool isLocked(Addr);
145 int countStarvingForAddress(Addr);
146 int countReadStarvingForAddress(Addr);
147 }
148
149 PersistentTable persistentTable;
150 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
151
152 Tick clockEdge();
153 void set_cache_entry(AbstractCacheEntry b);
154 void unset_cache_entry();
155 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
156
157 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
158 Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
159 return cache_entry;
160 }
161
162 DirEntry getDirEntry(Addr address), return_by_pointer="yes" {
163 return localDirectory.lookup(address);
164 }
165
166 void functionalRead(Addr addr, Packet *pkt) {
167 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
168 }
169
170 int functionalWrite(Addr addr, Packet *pkt) {
171 int num_functional_writes := 0;
172 num_functional_writes := num_functional_writes +
173 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
174 return num_functional_writes;
175 }
176
177 int getTokens(Entry cache_entry) {
178 if (is_valid(cache_entry)) {
179 return cache_entry.Tokens;
180 } else {
181 return 0;
182 }
183 }
184
185 State getState(Entry cache_entry, Addr addr) {
186 if (is_valid(cache_entry)) {
187 return cache_entry.CacheState;
188 } else if (persistentTable.isLocked(addr)) {
189 return State:I_L;
190 } else {
191 return State:NP;
192 }
193 }
194
195 void setState(Entry cache_entry, Addr addr, State state) {
196
197 if (is_valid(cache_entry)) {
198 // Make sure the token count is in range
199 assert(cache_entry.Tokens >= 0);
200 assert(cache_entry.Tokens <= max_tokens());
201 assert(cache_entry.Tokens != (max_tokens() / 2));
202
203 // Make sure we have no tokens in L
204 if ((state == State:I_L) ) {
205 assert(cache_entry.Tokens == 0);
206 }
207
208 // in M and E you have all the tokens
209 if (state == State:M ) {
210 assert(cache_entry.Tokens == max_tokens());
211 }
212
213 // in NP you have no tokens
214 if (state == State:NP) {
215 assert(cache_entry.Tokens == 0);
216 }
217
218 // You have at least one token in S-like states
219 if (state == State:S ) {
220 assert(cache_entry.Tokens > 0);
221 }
222
223 // You have at least half the token in O-like states
224 if (state == State:O ) {
225 assert(cache_entry.Tokens > (max_tokens() / 2));
226 }
227
228 cache_entry.CacheState := state;
229 }
230 }
231
232 AccessPermission getAccessPermission(Addr addr) {
233 Entry cache_entry := getCacheEntry(addr);
234 if(is_valid(cache_entry)) {
235 return L2Cache_State_to_permission(cache_entry.CacheState);
236 }
237
238 return AccessPermission:NotPresent;
239 }
240
241 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
242 if (is_valid(cache_entry)) {
243 cache_entry.changePermission(L2Cache_State_to_permission(state));
244 }
245 }
246
247 void removeSharer(Addr addr, NodeID id) {
248
249 if (localDirectory.isTagPresent(addr)) {
250 DirEntry dir_entry := getDirEntry(addr);
251 dir_entry.Sharers.remove(id);
252 if (dir_entry.Sharers.count() == 0) {
253 localDirectory.deallocate(addr);
254 }
255 }
256 }
257
258 bool sharersExist(Addr addr) {
259 if (localDirectory.isTagPresent(addr)) {
260 DirEntry dir_entry := getDirEntry(addr);
261 if (dir_entry.Sharers.count() > 0) {
262 return true;
263 }
264 else {
265 return false;
266 }
267 }
268 else {
269 return false;
270 }
271 }
272
273 bool exclusiveExists(Addr addr) {
274 if (localDirectory.isTagPresent(addr)) {
275 DirEntry dir_entry := getDirEntry(addr);
276 if (dir_entry.exclusive) {
277 return true;
278 }
279 else {
280 return false;
281 }
282 }
283 else {
284 return false;
285 }
286 }
287
288 // assumes that caller will check to make sure tag is present
289 Set getSharers(Addr addr) {
290 DirEntry dir_entry := getDirEntry(addr);
291 return dir_entry.Sharers;
292 }
293
294 void setNewWriter(Addr addr, NodeID id) {
295 if (localDirectory.isTagPresent(addr) == false) {
296 localDirectory.allocate(addr);
297 }
298 DirEntry dir_entry := getDirEntry(addr);
299 dir_entry.Sharers.clear();
300 dir_entry.Sharers.add(id);
301 dir_entry.exclusive := true;
302 }
303
304 void addNewSharer(Addr addr, NodeID id) {
305 if (localDirectory.isTagPresent(addr) == false) {
306 localDirectory.allocate(addr);
307 }
308 DirEntry dir_entry := getDirEntry(addr);
309 dir_entry.Sharers.add(id);
310 // dir_entry.exclusive := false;
311 }
312
313 void clearExclusiveBitIfExists(Addr addr) {
314 if (localDirectory.isTagPresent(addr)) {
315 DirEntry dir_entry := getDirEntry(addr);
316 dir_entry.exclusive := false;
317 }
318 }
319
320 // ** OUT_PORTS **
321 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
322 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
323 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
324
325
326
327 // ** IN_PORTS **
328
329 // Persistent Network
330 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
331 if (persistentNetwork_in.isReady(clockEdge())) {
332 peek(persistentNetwork_in, PersistentMsg) {
333 assert(in_msg.Destination.isElement(machineID));
334
335 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
336 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
337 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
338 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
339 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
340 persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
341 } else {
342 error("Unexpected message");
343 }
344
345 Entry cache_entry := getCacheEntry(in_msg.addr);
346 // React to the message based on the current state of the table
347 if (persistentTable.isLocked(in_msg.addr)) {
348
349 if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) {
350 if (getTokens(cache_entry) == 1 ||
351 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
352 trigger(Event:Persistent_GETS_Last_Token, in_msg.addr,
353 cache_entry);
354 } else {
355 trigger(Event:Persistent_GETS, in_msg.addr, cache_entry);
356 }
357 } else {
358 trigger(Event:Persistent_GETX, in_msg.addr, cache_entry);
359 }
360 }
361 else {
362 trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry);
363 }
364 }
365 }
366 }
367
368
369 // Request Network
370 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
371 if (requestNetwork_in.isReady(clockEdge())) {
372 peek(requestNetwork_in, RequestMsg) {
373 assert(in_msg.Destination.isElement(machineID));
374
375 Entry cache_entry := getCacheEntry(in_msg.addr);
376 if (in_msg.Type == CoherenceRequestType:GETX) {
377 trigger(Event:Transient_GETX, in_msg.addr, cache_entry);
378 } else if (in_msg.Type == CoherenceRequestType:GETS) {
379 if (getTokens(cache_entry) == 1) {
380 trigger(Event:Transient_GETS_Last_Token, in_msg.addr,
381 cache_entry);
382 }
383 else {
384 trigger(Event:Transient_GETS, in_msg.addr, cache_entry);
385 }
386 } else {
387 error("Unexpected message");
388 }
389 }
390 }
391 }
392
393 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
394 if (L1requestNetwork_in.isReady(clockEdge())) {
395 peek(L1requestNetwork_in, RequestMsg) {
396 assert(in_msg.Destination.isElement(machineID));
397 Entry cache_entry := getCacheEntry(in_msg.addr);
398 if (in_msg.Type == CoherenceRequestType:GETX) {
399 trigger(Event:L1_GETX, in_msg.addr, cache_entry);
400 } else if (in_msg.Type == CoherenceRequestType:GETS) {
401 if (getTokens(cache_entry) == 1 ||
402 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
403 trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry);
404 }
405 else {
406 trigger(Event:L1_GETS, in_msg.addr, cache_entry);
407 }
408 } else {
409 error("Unexpected message");
410 }
411 }
412 }
413 }
414
415
416 // Response Network
417 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
418 if (responseNetwork_in.isReady(clockEdge())) {
419 peek(responseNetwork_in, ResponseMsg) {
420 assert(in_msg.Destination.isElement(machineID));
421 Entry cache_entry := getCacheEntry(in_msg.addr);
422
423 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
424 if (in_msg.Type == CoherenceResponseType:ACK) {
425 assert(in_msg.Tokens < (max_tokens() / 2));
426 trigger(Event:Ack, in_msg.addr, cache_entry);
427 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
428 trigger(Event:Data_Owner, in_msg.addr, cache_entry);
429 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
430 trigger(Event:Data_Shared, in_msg.addr, cache_entry);
431 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
432 in_msg.Type == CoherenceResponseType:WB_OWNED ||
433 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
434
435 if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
436
437 // either room is available or the block is already present
438
439 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
440 assert(in_msg.Dirty == false);
441 trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry);
442 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
443 assert(in_msg.Dirty == false);
444 trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry);
445 }
446 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
447 //assert(in_msg.Dirty == false);
448 trigger(Event:Writeback_Owned, in_msg.addr, cache_entry);
449 }
450 }
451 else {
452 trigger(Event:L2_Replacement,
453 L2cache.cacheProbe(in_msg.addr),
454 getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
455 }
456 } else if (in_msg.Type == CoherenceResponseType:INV) {
457 trigger(Event:L1_INV, in_msg.addr, cache_entry);
458 } else {
459 error("Unexpected message");
460 }
461 } else {
462 if (in_msg.Type == CoherenceResponseType:ACK) {
463 assert(in_msg.Tokens < (max_tokens() / 2));
464 trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry);
465 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
466 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
467 trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry);
468 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
469 in_msg.Type == CoherenceResponseType:WB_OWNED ||
470 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
471 if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) {
472
473 // either room is available or the block is already present
474
475 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
476 assert(in_msg.Dirty == false);
477 assert( (getState(cache_entry, in_msg.addr) != State:NP)
478 && (getState(cache_entry, in_msg.addr) != State:I) );
479 trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
480 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
481 assert(in_msg.Dirty == false);
482 trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
483 }
484 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
485 trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry);
486 }
487 }
488 else {
489 trigger(Event:L2_Replacement,
490 L2cache.cacheProbe(in_msg.addr),
491 getCacheEntry(L2cache.cacheProbe(in_msg.addr)));
492 }
493 } else if (in_msg.Type == CoherenceResponseType:INV) {
494 trigger(Event:L1_INV, in_msg.addr, cache_entry);
495 } else {
496 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
497 error("Unexpected message");
498 }
499 }
500 }
501 }
502 }
503
504
505 // ACTIONS
506
507 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
508
509 peek(L1requestNetwork_in, RequestMsg) {
510
511 // if this is a retry or no local sharers, broadcast normally
512 enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
513 out_msg.addr := in_msg.addr;
514 out_msg.Type := in_msg.Type;
515 out_msg.Requestor := in_msg.Requestor;
516 out_msg.RetryNum := in_msg.RetryNum;
517
518 //
519 // If a statically shared L2 cache, then no other L2 caches can
520 // store the block
521 //
522 //out_msg.Destination.broadcast(MachineType:L2Cache);
523 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
524 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
525
526 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
527 out_msg.MessageSize := MessageSizeType:Request_Control;
528 out_msg.AccessMode := in_msg.AccessMode;
529 out_msg.Prefetch := in_msg.Prefetch;
530 } //enqueue
531 // } // if
532
533 //profile_filter_action(0);
534 } // peek
535 } //action
536
537
538 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
539 peek(responseNetwork_in, ResponseMsg) {
540 // FIXME, should use a 3rd vnet
541 enqueue(responseNetwork_out, ResponseMsg, 1) {
542 out_msg.addr := address;
543 out_msg.Type := in_msg.Type;
544 out_msg.Sender := machineID;
545 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
546 out_msg.Tokens := in_msg.Tokens;
547 out_msg.MessageSize := in_msg.MessageSize;
548 out_msg.DataBlk := in_msg.DataBlk;
549 out_msg.Dirty := in_msg.Dirty;
550 }
551 }
552 }
553
554 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
555 assert(is_valid(cache_entry));
556 if (cache_entry.Tokens > 0) {
557 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
558 out_msg.addr := address;
559 out_msg.Type := CoherenceResponseType:ACK;
560 out_msg.Sender := machineID;
561 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
562 out_msg.Tokens := cache_entry.Tokens;
563 out_msg.MessageSize := MessageSizeType:Writeback_Control;
564 }
565 cache_entry.Tokens := 0;
566 }
567 }
568
569 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
570 assert(is_valid(cache_entry));
571 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
572 out_msg.addr := address;
573 out_msg.Sender := machineID;
574 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
575 out_msg.Tokens := cache_entry.Tokens;
576 out_msg.DataBlk := cache_entry.DataBlk;
577 out_msg.Dirty := cache_entry.Dirty;
578
579 if (cache_entry.Dirty) {
580 out_msg.MessageSize := MessageSizeType:Writeback_Data;
581 out_msg.Type := CoherenceResponseType:DATA_OWNER;
582 } else {
583 out_msg.MessageSize := MessageSizeType:Writeback_Control;
584 out_msg.Type := CoherenceResponseType:ACK_OWNER;
585 }
586 }
587 cache_entry.Tokens := 0;
588 }
589
590 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
591 peek(requestNetwork_in, RequestMsg) {
592 assert(is_valid(cache_entry));
593 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
594 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
595 out_msg.addr := address;
596 out_msg.Type := CoherenceResponseType:DATA_SHARED;
597 out_msg.Sender := machineID;
598 out_msg.Destination.add(in_msg.Requestor);
599 out_msg.Tokens := N_tokens;
600 out_msg.DataBlk := cache_entry.DataBlk;
601 out_msg.Dirty := false;
602 out_msg.MessageSize := MessageSizeType:Response_Data;
603 }
604 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
605 }
606 else {
607 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
608 out_msg.addr := address;
609 out_msg.Type := CoherenceResponseType:DATA_SHARED;
610 out_msg.Sender := machineID;
611 out_msg.Destination.add(in_msg.Requestor);
612 out_msg.Tokens := 1;
613 out_msg.DataBlk := cache_entry.DataBlk;
614 out_msg.Dirty := false;
615 out_msg.MessageSize := MessageSizeType:Response_Data;
616 }
617 cache_entry.Tokens := cache_entry.Tokens - 1;
618 }
619 }
620 }
621
622 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
623 assert(is_valid(cache_entry));
624 peek(requestNetwork_in, RequestMsg) {
625 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
626 out_msg.addr := address;
627 out_msg.Type := CoherenceResponseType:DATA_OWNER;
628 out_msg.Sender := machineID;
629 out_msg.Destination.add(in_msg.Requestor);
630 assert(cache_entry.Tokens >= 1);
631 out_msg.Tokens := cache_entry.Tokens;
632 out_msg.DataBlk := cache_entry.DataBlk;
633 out_msg.Dirty := cache_entry.Dirty;
634 out_msg.MessageSize := MessageSizeType:Response_Data;
635 }
636 }
637 cache_entry.Tokens := 0;
638 }
639
640 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
641 assert(is_valid(cache_entry));
642 if (cache_entry.Tokens > 0) {
643 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
644 out_msg.addr := address;
645 out_msg.Type := CoherenceResponseType:ACK;
646 out_msg.Sender := machineID;
647 out_msg.Destination.add(persistentTable.findSmallest(address));
648 assert(cache_entry.Tokens >= 1);
649 out_msg.Tokens := cache_entry.Tokens;
650 out_msg.MessageSize := MessageSizeType:Response_Control;
651 }
652 }
653 cache_entry.Tokens := 0;
654 }
655
656 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
657 assert(is_valid(cache_entry));
658 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
659 out_msg.addr := address;
660 out_msg.Type := CoherenceResponseType:DATA_OWNER;
661 out_msg.Sender := machineID;
662 out_msg.Destination.add(persistentTable.findSmallest(address));
663 assert(cache_entry.Tokens >= 1);
664 out_msg.Tokens := cache_entry.Tokens;
665 out_msg.DataBlk := cache_entry.DataBlk;
666 out_msg.Dirty := cache_entry.Dirty;
667 out_msg.MessageSize := MessageSizeType:Response_Data;
668 }
669 cache_entry.Tokens := 0;
670 }
671
672 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
673 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
674 assert(is_valid(cache_entry));
675 assert(cache_entry.Tokens > 0);
676 if (cache_entry.Tokens > 1) {
677 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
678 out_msg.addr := address;
679 out_msg.Type := CoherenceResponseType:ACK;
680 out_msg.Sender := machineID;
681 out_msg.Destination.add(persistentTable.findSmallest(address));
682 assert(cache_entry.Tokens >= 1);
683 out_msg.Tokens := cache_entry.Tokens - 1;
684 out_msg.MessageSize := MessageSizeType:Response_Control;
685 }
686 }
687 cache_entry.Tokens := 1;
688 }
689
690 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
691 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
692 assert(is_valid(cache_entry));
693 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
694 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
695 out_msg.addr := address;
696 out_msg.Type := CoherenceResponseType:DATA_OWNER;
697 out_msg.Sender := machineID;
698 out_msg.Destination.add(persistentTable.findSmallest(address));
699 out_msg.Tokens := cache_entry.Tokens - 1;
700 out_msg.DataBlk := cache_entry.DataBlk;
701 out_msg.Dirty := cache_entry.Dirty;
702 out_msg.MessageSize := MessageSizeType:Response_Data;
703 }
704 cache_entry.Tokens := 1;
705 }
706
707 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
708 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
709 assert(is_valid(cache_entry));
710 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
711 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
712 out_msg.addr := address;
713 out_msg.Type := CoherenceResponseType:DATA_OWNER;
714 out_msg.Sender := machineID;
715 out_msg.Destination.add(persistentTable.findSmallest(address));
716 out_msg.Tokens := cache_entry.Tokens;
717 out_msg.DataBlk := cache_entry.DataBlk;
718 out_msg.Dirty := cache_entry.Dirty;
719 out_msg.MessageSize := MessageSizeType:Response_Data;
720 }
721 cache_entry.Tokens := 0;
722 }
723
724
725
726 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
727 // assert(persistentTable.isLocked(address));
728 peek(responseNetwork_in, ResponseMsg) {
729 // FIXME, should use a 3rd vnet in some cases
730 enqueue(responseNetwork_out, ResponseMsg, 1) {
731 out_msg.addr := address;
732 out_msg.Type := in_msg.Type;
733 out_msg.Sender := machineID;
734 out_msg.Destination.add(persistentTable.findSmallest(address));
735 out_msg.Tokens := in_msg.Tokens;
736 out_msg.DataBlk := in_msg.DataBlk;
737 out_msg.Dirty := in_msg.Dirty;
738 out_msg.MessageSize := in_msg.MessageSize;
739 }
740 }
741 }
742
743 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
744 //assert(persistentTable.isLocked(address));
745 peek(responseNetwork_in, ResponseMsg) {
746 // FIXME, should use a 3rd vnet in some cases
747 enqueue(responseNetwork_out, ResponseMsg, 1) {
748 out_msg.addr := address;
749 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
750 out_msg.Type := CoherenceResponseType:DATA_SHARED;
751 } else {
752 assert(in_msg.Tokens < (max_tokens() / 2));
753 out_msg.Type := CoherenceResponseType:ACK;
754 }
755 out_msg.Sender := machineID;
756 out_msg.Destination.add(persistentTable.findSmallest(address));
757 out_msg.Tokens := in_msg.Tokens;
758 out_msg.DataBlk := in_msg.DataBlk;
759 out_msg.Dirty := in_msg.Dirty;
760 out_msg.MessageSize := in_msg.MessageSize;
761 }
762 }
763 }
764
765 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
766 // assert(persistentTable.isLocked(address));
767 peek(responseNetwork_in, ResponseMsg) {
768 // FIXME, should use a 3rd vnet in some cases
769 enqueue(responseNetwork_out, ResponseMsg, 1) {
770 out_msg.addr := address;
771 out_msg.Type := CoherenceResponseType:DATA_OWNER;
772 out_msg.Sender := machineID;
773 out_msg.Destination.add(persistentTable.findSmallest(address));
774 out_msg.Tokens := in_msg.Tokens;
775 out_msg.DataBlk := in_msg.DataBlk;
776 out_msg.Dirty := in_msg.Dirty;
777 out_msg.MessageSize := in_msg.MessageSize;
778 }
779 }
780 }
781
782
783 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
784 peek(responseNetwork_in, ResponseMsg) {
785 removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender));
786 }
787 }
788
789 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
790 peek(requestNetwork_in, RequestMsg) {
791 if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) {
792 //profile_filter_action(1);
793 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
794 in_msg.RetryNum);
795 }
796 else {
797 enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
798 out_msg.addr := in_msg.addr;
799 out_msg.Requestor := in_msg.Requestor;
800
801 //
802 // Currently assuming only one chip so all L1s are local
803 //
804 //out_msg.Destination := getLocalL1IDs(machineID);
805 out_msg.Destination.broadcast(MachineType:L1Cache);
806 out_msg.Destination.remove(in_msg.Requestor);
807
808 out_msg.Type := in_msg.Type;
809 out_msg.isLocal := false;
810 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
811 out_msg.AccessMode := in_msg.AccessMode;
812 out_msg.Prefetch := in_msg.Prefetch;
813 }
814 //profile_filter_action(0);
815 }
816 }
817 }
818
819 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
820 peek(L1requestNetwork_in, RequestMsg) {
821 assert(is_valid(cache_entry));
822 assert(cache_entry.Tokens > 0);
823 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
824 out_msg.addr := address;
825 out_msg.Type := CoherenceResponseType:DATA_SHARED;
826 out_msg.Sender := machineID;
827 out_msg.Destination.add(in_msg.Requestor);
828 out_msg.DataBlk := cache_entry.DataBlk;
829 out_msg.Dirty := false;
830 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
831 out_msg.Tokens := 1;
832 }
833 cache_entry.Tokens := cache_entry.Tokens - 1;
834 }
835 }
836
837 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
838 peek(L1requestNetwork_in, RequestMsg) {
839 assert(is_valid(cache_entry));
840 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
841 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
842 out_msg.addr := address;
843 out_msg.Type := CoherenceResponseType:DATA_OWNER;
844 out_msg.Sender := machineID;
845 out_msg.Destination.add(in_msg.Requestor);
846 out_msg.DataBlk := cache_entry.DataBlk;
847 out_msg.Dirty := cache_entry.Dirty;
848 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
849 out_msg.Tokens := cache_entry.Tokens;
850 }
851 cache_entry.Tokens := 0;
852 }
853 }
854
855 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
856 peek(L1requestNetwork_in, RequestMsg) {
857 assert(is_valid(cache_entry));
858 // assert(cache_entry.Tokens == max_tokens());
859 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
860 out_msg.addr := address;
861 out_msg.Type := CoherenceResponseType:DATA_OWNER;
862 out_msg.Sender := machineID;
863 out_msg.Destination.add(in_msg.Requestor);
864 out_msg.DataBlk := cache_entry.DataBlk;
865 out_msg.Dirty := cache_entry.Dirty;
866 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
867 //out_msg.Tokens := max_tokens();
868 out_msg.Tokens := cache_entry.Tokens;
869 }
870 cache_entry.Tokens := 0;
871 }
872 }
873
874 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
875 persistentNetwork_in.dequeue(clockEdge());
876 }
877
878 action(m_popRequestQueue, "m", desc="Pop request queue.") {
879 requestNetwork_in.dequeue(clockEdge());
880 }
881
882 action(n_popResponseQueue, "n", desc="Pop response queue") {
883 responseNetwork_in.dequeue(clockEdge());
884 }
885
886 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
887 L1requestNetwork_in.dequeue(clockEdge());
888 }
889
890
891 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
892 peek(responseNetwork_in, ResponseMsg) {
893 assert(is_valid(cache_entry));
894 assert(in_msg.Tokens != 0);
895 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
896
897 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
898 // may not trigger this action.
899 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
900 cache_entry.Dirty := true;
901 }
902 }
903 }
904
905 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
906 peek(L1requestNetwork_in, RequestMsg) {
907 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
908 if (in_msg.Type == CoherenceRequestType:GETX) {
909 setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
910 } else if (in_msg.Type == CoherenceRequestType:GETS) {
911 addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor));
912 }
913 }
914 }
915 }
916
917 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
918 clearExclusiveBitIfExists(address);
919 }
920
921 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
922 peek(L1requestNetwork_in, RequestMsg) {
923 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
924 (is_valid(cache_entry))) {
925 L2cache.setMRU(address);
926 }
927 }
928 }
929
930 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
931 assert(is_valid(cache_entry));
932 if (cache_entry.Tokens > 0) {
933 peek(requestNetwork_in, RequestMsg) {
934 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
935 out_msg.addr := address;
936 out_msg.Type := CoherenceResponseType:ACK;
937 out_msg.Sender := machineID;
938 out_msg.Destination.add(in_msg.Requestor);
939 assert(cache_entry.Tokens >= 1);
940 out_msg.Tokens := cache_entry.Tokens;
941 out_msg.MessageSize := MessageSizeType:Response_Control;
942 }
943 }
944 }
945 cache_entry.Tokens := 0;
946 }
947
948 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
949 assert(is_valid(cache_entry));
950 if (cache_entry.Tokens > 0) {
951 peek(L1requestNetwork_in, RequestMsg) {
952 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
953 out_msg.addr := address;
954 out_msg.Type := CoherenceResponseType:ACK;
955 out_msg.Sender := machineID;
956 out_msg.Destination.add(in_msg.Requestor);
957 assert(cache_entry.Tokens >= 1);
958 out_msg.Tokens := cache_entry.Tokens;
959 out_msg.MessageSize := MessageSizeType:Response_Control;
960 }
961 }
962 }
963 cache_entry.Tokens := 0;
964 }
965
966 action(u_writeDataToCache, "u", desc="Write data to cache") {
967 peek(responseNetwork_in, ResponseMsg) {
968 assert(is_valid(cache_entry));
969 cache_entry.DataBlk := in_msg.DataBlk;
970 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
971 cache_entry.Dirty := in_msg.Dirty;
972 }
973 }
974 }
975
976 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
977 set_cache_entry(L2cache.allocate(address, new Entry));
978 }
979
980 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
981 L2cache.deallocate(address);
982 unset_cache_entry();
983 }
984
985 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
986 ++L2cache.demand_misses;
987 }
988
989 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
990 ++L2cache.demand_hits;
991 }
992
993 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
994 peek(responseNetwork_in, ResponseMsg) {
995 if (in_msg.Type != CoherenceResponseType:ACK &&
996 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
997 assert(is_valid(cache_entry));
998 assert(cache_entry.DataBlk == in_msg.DataBlk);
999 }
1000 }
1001 }
1002
1003
1004 //*****************************************************
1005 // TRANSITIONS
1006 //*****************************************************
1007
1008 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
1009
1010 h_updateFilterFromL1HintOrWB;
1011 n_popResponseQueue;
1012 }
1013
1014 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
1015 l_popPersistentQueue;
1016 }
1017
1018
1019 // Transitions from NP
1020
1021 transition(NP, {Transient_GETX, Transient_GETS}) {
1022 // forward message to local sharers
1023 r_clearExclusive;
1024 j_forwardTransientRequestToLocalSharers;
1025 m_popRequestQueue;
1026 }
1027
1028
1029 transition(NP, {L1_GETS, L1_GETX}) {
1030 a_broadcastLocalRequest;
1031 r_markNewSharer;
1032 uu_profileMiss;
1033 o_popL1RequestQueue;
1034 }
1035
1036 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1037 bb_bounceResponse;
1038 n_popResponseQueue;
1039 }
1040
1041 transition(NP, Writeback_Shared_Data, S) {
1042 vv_allocateL2CacheBlock;
1043 u_writeDataToCache;
1044 q_updateTokensFromResponse;
1045 h_updateFilterFromL1HintOrWB;
1046 n_popResponseQueue;
1047 }
1048
1049 transition(NP, Writeback_Tokens, I) {
1050 vv_allocateL2CacheBlock;
1051 q_updateTokensFromResponse;
1052 h_updateFilterFromL1HintOrWB;
1053 n_popResponseQueue;
1054 }
1055
1056 transition(NP, Writeback_All_Tokens, M) {
1057 vv_allocateL2CacheBlock;
1058 u_writeDataToCache;
1059 q_updateTokensFromResponse;
1060 h_updateFilterFromL1HintOrWB;
1061 n_popResponseQueue;
1062 }
1063
1064 transition(NP, Writeback_Owned, O) {
1065 vv_allocateL2CacheBlock;
1066 u_writeDataToCache;
1067 q_updateTokensFromResponse;
1068 h_updateFilterFromL1HintOrWB;
1069 n_popResponseQueue;
1070 }
1071
1072
1073 transition(NP,
1074 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1075 I_L) {
1076 l_popPersistentQueue;
1077 }
1078
1079 // Transitions from Idle
1080
1081 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1082 a_broadcastLocalRequest;
1083 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1084 r_markNewSharer;
1085 uu_profileMiss;
1086 o_popL1RequestQueue;
1087 }
1088
1089 transition(I, L1_GETX) {
1090 a_broadcastLocalRequest;
1091 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1092 r_markNewSharer;
1093 uu_profileMiss;
1094 o_popL1RequestQueue;
1095 }
1096
1097 transition(I, L2_Replacement) {
1098 c_cleanReplacement; // Only needed in some cases
1099 rr_deallocateL2CacheBlock;
1100 }
1101
1102 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1103 r_clearExclusive;
1104 t_sendAckWithCollectedTokens;
1105 j_forwardTransientRequestToLocalSharers;
1106 m_popRequestQueue;
1107 }
1108
1109 transition(I,
1110 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1111 I_L) {
1112 e_sendAckWithCollectedTokens;
1113 l_popPersistentQueue;
1114 }
1115
1116
1117 transition(I, Ack) {
1118 q_updateTokensFromResponse;
1119 n_popResponseQueue;
1120 }
1121
1122 transition(I, Data_Shared, S) {
1123 u_writeDataToCache;
1124 q_updateTokensFromResponse;
1125 n_popResponseQueue;
1126 }
1127
1128 transition(I, Writeback_Shared_Data, S) {
1129 u_writeDataToCache;
1130 q_updateTokensFromResponse;
1131 h_updateFilterFromL1HintOrWB;
1132 n_popResponseQueue;
1133 }
1134
1135 transition(I, Writeback_Tokens) {
1136 q_updateTokensFromResponse;
1137 h_updateFilterFromL1HintOrWB;
1138 n_popResponseQueue;
1139 }
1140
1141 transition(I, Data_Owner, O) {
1142 u_writeDataToCache;
1143 q_updateTokensFromResponse;
1144 n_popResponseQueue;
1145 }
1146
1147 transition(I, Writeback_Owned, O) {
1148 u_writeDataToCache;
1149 q_updateTokensFromResponse;
1150 h_updateFilterFromL1HintOrWB;
1151 n_popResponseQueue;
1152 }
1153
1154 transition(I, Data_All_Tokens, M) {
1155 u_writeDataToCache;
1156 q_updateTokensFromResponse;
1157 n_popResponseQueue;
1158 }
1159
1160
1161 transition(I, Writeback_All_Tokens, M) {
1162 u_writeDataToCache;
1163 q_updateTokensFromResponse;
1164 h_updateFilterFromL1HintOrWB;
1165 n_popResponseQueue;
1166 }
1167
1168 // Transitions from Shared
1169
1170 transition(S, L2_Replacement, I) {
1171 c_cleanReplacement;
1172 rr_deallocateL2CacheBlock;
1173 }
1174
1175 transition(S, Transient_GETX, I) {
1176 r_clearExclusive;
1177 t_sendAckWithCollectedTokens;
1178 j_forwardTransientRequestToLocalSharers;
1179 m_popRequestQueue;
1180 }
1181
1182 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1183 j_forwardTransientRequestToLocalSharers;
1184 r_clearExclusive;
1185 m_popRequestQueue;
1186 }
1187
1188 transition(S, Persistent_GETX, I_L) {
1189 e_sendAckWithCollectedTokens;
1190 l_popPersistentQueue;
1191 }
1192
1193
1194 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1195 f_sendAckWithAllButOneTokens;
1196 l_popPersistentQueue;
1197 }
1198
1199
1200 transition(S, Ack) {
1201 q_updateTokensFromResponse;
1202 n_popResponseQueue;
1203 }
1204
1205 transition(S, Data_Shared) {
1206 w_assertIncomingDataAndCacheDataMatch;
1207 q_updateTokensFromResponse;
1208 n_popResponseQueue;
1209 }
1210
1211 transition(S, Writeback_Tokens) {
1212 q_updateTokensFromResponse;
1213 h_updateFilterFromL1HintOrWB;
1214 n_popResponseQueue;
1215 }
1216
1217 transition(S, Writeback_Shared_Data) {
1218 w_assertIncomingDataAndCacheDataMatch;
1219 q_updateTokensFromResponse;
1220 h_updateFilterFromL1HintOrWB;
1221 n_popResponseQueue;
1222 }
1223
1224
1225 transition(S, Data_Owner, O) {
1226 w_assertIncomingDataAndCacheDataMatch;
1227 q_updateTokensFromResponse;
1228 n_popResponseQueue;
1229 }
1230
1231 transition(S, Writeback_Owned, O) {
1232 w_assertIncomingDataAndCacheDataMatch;
1233 q_updateTokensFromResponse;
1234 h_updateFilterFromL1HintOrWB;
1235 n_popResponseQueue;
1236 }
1237
1238 transition(S, Data_All_Tokens, M) {
1239 w_assertIncomingDataAndCacheDataMatch;
1240 q_updateTokensFromResponse;
1241 n_popResponseQueue;
1242 }
1243
1244 transition(S, Writeback_All_Tokens, M) {
1245 w_assertIncomingDataAndCacheDataMatch;
1246 q_updateTokensFromResponse;
1247 h_updateFilterFromL1HintOrWB;
1248 n_popResponseQueue;
1249 }
1250
1251 transition(S, L1_GETX, I) {
1252 a_broadcastLocalRequest;
1253 tt_sendLocalAckWithCollectedTokens;
1254 r_markNewSharer;
1255 r_setMRU;
1256 uu_profileMiss;
1257 o_popL1RequestQueue;
1258 }
1259
1260
1261 transition(S, L1_GETS) {
1262 k_dataFromL2CacheToL1Requestor;
1263 r_markNewSharer;
1264 r_setMRU;
1265 uu_profileHit;
1266 o_popL1RequestQueue;
1267 }
1268
1269 transition(S, L1_GETS_Last_Token, I) {
1270
1271 k_dataFromL2CacheToL1Requestor;
1272 r_markNewSharer;
1273 r_setMRU;
1274 uu_profileHit;
1275 o_popL1RequestQueue;
1276 }
1277
1278 // Transitions from Owned
1279
1280 transition(O, L2_Replacement, I) {
1281 cc_dirtyReplacement;
1282 rr_deallocateL2CacheBlock;
1283 }
1284
1285 transition(O, Transient_GETX, I) {
1286 r_clearExclusive;
1287 dd_sendDataWithAllTokens;
1288 j_forwardTransientRequestToLocalSharers;
1289 m_popRequestQueue;
1290 }
1291
1292 transition(O, Persistent_GETX, I_L) {
1293 ee_sendDataWithAllTokens;
1294 l_popPersistentQueue;
1295 }
1296
1297 transition(O, Persistent_GETS, S_L) {
1298 ff_sendDataWithAllButOneTokens;
1299 l_popPersistentQueue;
1300 }
1301
1302 transition(O, Persistent_GETS_Last_Token, I_L) {
1303 fa_sendDataWithAllTokens;
1304 l_popPersistentQueue;
1305 }
1306
1307 transition(O, Transient_GETS) {
1308 // send multiple tokens
1309 r_clearExclusive;
1310 d_sendDataWithTokens;
1311 m_popRequestQueue;
1312 }
1313
1314 transition(O, Transient_GETS_Last_Token) {
1315 // WAIT FOR IT TO GO PERSISTENT
1316 r_clearExclusive;
1317 m_popRequestQueue;
1318 }
1319
1320 transition(O, Ack) {
1321 q_updateTokensFromResponse;
1322 n_popResponseQueue;
1323 }
1324
1325 transition(O, Ack_All_Tokens, M) {
1326 q_updateTokensFromResponse;
1327 n_popResponseQueue;
1328 }
1329
1330 transition(O, Data_Shared) {
1331 w_assertIncomingDataAndCacheDataMatch;
1332 q_updateTokensFromResponse;
1333 n_popResponseQueue;
1334 }
1335
1336
1337 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1338 w_assertIncomingDataAndCacheDataMatch;
1339 q_updateTokensFromResponse;
1340 h_updateFilterFromL1HintOrWB;
1341 n_popResponseQueue;
1342 }
1343
1344 transition(O, Data_All_Tokens, M) {
1345 w_assertIncomingDataAndCacheDataMatch;
1346 q_updateTokensFromResponse;
1347 n_popResponseQueue;
1348 }
1349
1350 transition(O, Writeback_All_Tokens, M) {
1351 w_assertIncomingDataAndCacheDataMatch;
1352 q_updateTokensFromResponse;
1353 h_updateFilterFromL1HintOrWB;
1354 n_popResponseQueue;
1355 }
1356
1357 transition(O, L1_GETS) {
1358 k_dataFromL2CacheToL1Requestor;
1359 r_markNewSharer;
1360 r_setMRU;
1361 uu_profileHit;
1362 o_popL1RequestQueue;
1363 }
1364
1365 transition(O, L1_GETS_Last_Token, I) {
1366 k_dataOwnerFromL2CacheToL1Requestor;
1367 r_markNewSharer;
1368 r_setMRU;
1369 uu_profileHit;
1370 o_popL1RequestQueue;
1371 }
1372
1373 transition(O, L1_GETX, I) {
1374 a_broadcastLocalRequest;
1375 k_dataAndAllTokensFromL2CacheToL1Requestor;
1376 r_markNewSharer;
1377 r_setMRU;
1378 uu_profileMiss;
1379 o_popL1RequestQueue;
1380 }
1381
1382 // Transitions from M
1383
1384 transition(M, L2_Replacement, I) {
1385 cc_dirtyReplacement;
1386 rr_deallocateL2CacheBlock;
1387 }
1388
1389 // MRM_DEBUG: Give up all tokens even for GETS? ???
1390 transition(M, {Transient_GETX, Transient_GETS}, I) {
1391 r_clearExclusive;
1392 dd_sendDataWithAllTokens;
1393 m_popRequestQueue;
1394 }
1395
1396 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1397 ee_sendDataWithAllTokens;
1398 l_popPersistentQueue;
1399 }
1400
1401
1402 transition(M, L1_GETS, O) {
1403 k_dataFromL2CacheToL1Requestor;
1404 r_markNewSharer;
1405 r_setMRU;
1406 uu_profileHit;
1407 o_popL1RequestQueue;
1408 }
1409
1410 transition(M, L1_GETX, I) {
1411 k_dataAndAllTokensFromL2CacheToL1Requestor;
1412 r_markNewSharer;
1413 r_setMRU;
1414 uu_profileHit;
1415 o_popL1RequestQueue;
1416 }
1417
1418
1419 //Transitions from locked states
1420
1421 transition({I_L, S_L}, Ack) {
1422 gg_bounceResponseToStarver;
1423 n_popResponseQueue;
1424 }
1425
1426 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1427 gg_bounceResponseToStarver;
1428 n_popResponseQueue;
1429 }
1430
1431 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1432 gg_bounceWBSharedToStarver;
1433 h_updateFilterFromL1HintOrWB;
1434 n_popResponseQueue;
1435 }
1436
1437 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1438 gg_bounceWBOwnedToStarver;
1439 h_updateFilterFromL1HintOrWB;
1440 n_popResponseQueue;
1441 }
1442
1443 transition(S_L, L2_Replacement, I) {
1444 c_cleanReplacement;
1445 rr_deallocateL2CacheBlock;
1446 }
1447
1448 transition(I_L, L2_Replacement, I) {
1449 rr_deallocateL2CacheBlock;
1450 }
1451
1452 transition(I_L, Own_Lock_or_Unlock, I) {
1453 l_popPersistentQueue;
1454 }
1455
1456 transition(S_L, Own_Lock_or_Unlock, S) {
1457 l_popPersistentQueue;
1458 }
1459
1460 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1461 r_clearExclusive;
1462 m_popRequestQueue;
1463 }
1464
1465 transition(I_L, {L1_GETX, L1_GETS}) {
1466 a_broadcastLocalRequest;
1467 r_markNewSharer;
1468 uu_profileMiss;
1469 o_popL1RequestQueue;
1470 }
1471
1472 transition(S_L, L1_GETX, I_L) {
1473 a_broadcastLocalRequest;
1474 tt_sendLocalAckWithCollectedTokens;
1475 r_markNewSharer;
1476 r_setMRU;
1477 uu_profileMiss;
1478 o_popL1RequestQueue;
1479 }
1480
1481 transition(S_L, L1_GETS) {
1482 k_dataFromL2CacheToL1Requestor;
1483 r_markNewSharer;
1484 r_setMRU;
1485 uu_profileHit;
1486 o_popL1RequestQueue;
1487 }
1488
1489 transition(S_L, L1_GETS_Last_Token, I_L) {
1490 k_dataFromL2CacheToL1Requestor;
1491 r_markNewSharer;
1492 r_setMRU;
1493 uu_profileHit;
1494 o_popL1RequestQueue;
1495 }
1496
1497 transition(S_L, Persistent_GETX, I_L) {
1498 e_sendAckWithCollectedTokens;
1499 l_popPersistentQueue;
1500 }
1501
1502 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1503 l_popPersistentQueue;
1504 }
1505
1506 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1507 l_popPersistentQueue;
1508 }
1509 }