ruby: message buffers: significant changes
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(L2Cache, "Token protocol")
30 : CacheMemory * L2cache;
31 int N_tokens;
32 Cycles l2_request_latency := 5;
33 Cycles l2_response_latency := 5;
34 bool filtering_enabled := "True";
35
36 // L2 BANK QUEUES
37 // From local bank of L2 cache TO the network
38
39 // this L2 bank -> a local L1 || mod-directory
40 MessageBuffer * responseFromL2Cache, network="To", virtual_network="4",
41 ordered="false", vnet_type="response";
42 // this L2 bank -> mod-directory
43 MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2",
44 ordered="false", vnet_type="request";
45 // this L2 bank -> a local L1
46 MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1",
47 ordered="false", vnet_type="request";
48
49
50 // FROM the network to this local bank of L2 cache
51
52 // a local L1 || mod-directory -> this L2 bank
53 MessageBuffer * responseToL2Cache, network="From", virtual_network="4",
54 ordered="false", vnet_type="response";
55 MessageBuffer * persistentToL2Cache, network="From", virtual_network="3",
56 ordered="true", vnet_type="persistent";
57 // mod-directory -> this L2 bank
58 MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2",
59 ordered="false", vnet_type="request";
60 // a local L1 -> this L2 bank
61 MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1",
62 ordered="false", vnet_type="request";
63
64 {
65 // STATES
66 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
67 // Base states
68 NP, AccessPermission:Invalid, desc="Not Present";
69 I, AccessPermission:Invalid, desc="Idle";
70 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
71 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
72 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
73
74 // Locked states
75 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
76 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
77 }
78
79 // EVENTS
80 enumeration(Event, desc="Cache events") {
81
82 // Requests
83 L1_GETS, desc="local L1 GETS request";
84 L1_GETS_Last_Token, desc="local L1 GETS request";
85 L1_GETX, desc="local L1 GETX request";
86 L1_INV, desc="L1 no longer has tokens";
87 Transient_GETX, desc="A GetX from another processor";
88 Transient_GETS, desc="A GetS from another processor";
89 Transient_GETS_Last_Token, desc="A GetS from another processor";
90
91 // events initiated by this L2
92 L2_Replacement, desc="L2 Replacement", format="!r";
93
94 // events of external L2 responses
95
96 // Responses
97 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
98 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
99 Writeback_All_Tokens, desc="Received a writeback from L1";
100 Writeback_Owned, desc="Received a writeback from L1";
101
102
103 Data_Shared, desc="Received a data message, we are now a sharer";
104 Data_Owner, desc="Received a data message, we are now the owner";
105 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
106 Ack, desc="Received an ack message";
107 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
108
109 // Lock/Unlock
110 Persistent_GETX, desc="Another processor has priority to read/write";
111 Persistent_GETS, desc="Another processor has priority to read";
112 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
113 Own_Lock_or_Unlock, desc="This processor now has priority";
114 }
115
116 // TYPES
117
118 // CacheEntry
119 structure(Entry, desc="...", interface="AbstractCacheEntry") {
120 State CacheState, desc="cache state";
121 bool Dirty, desc="Is the data dirty (different than memory)?";
122 int Tokens, desc="The number of tokens we're holding for the line";
123 DataBlock DataBlk, desc="data for the block";
124 }
125
126 structure(DirEntry, desc="...") {
127 Set Sharers, desc="Set of the internal processors that want the block in shared state";
128 bool exclusive, default="false", desc="if local exclusive is likely";
129 }
130
131 structure(PerfectCacheMemory, external="yes") {
132 void allocate(Address);
133 void deallocate(Address);
134 DirEntry lookup(Address);
135 bool isTagPresent(Address);
136 }
137
138 structure(PersistentTable, external="yes") {
139 void persistentRequestLock(Address, MachineID, AccessType);
140 void persistentRequestUnlock(Address, MachineID);
141 MachineID findSmallest(Address);
142 AccessType typeOfSmallest(Address);
143 void markEntries(Address);
144 bool isLocked(Address);
145 int countStarvingForAddress(Address);
146 int countReadStarvingForAddress(Address);
147 }
148
149 PersistentTable persistentTable;
150 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
151
152 void set_cache_entry(AbstractCacheEntry b);
153 void unset_cache_entry();
154
155 Entry getCacheEntry(Address address), return_by_pointer="yes" {
156 Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
157 return cache_entry;
158 }
159
160 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
161 return getCacheEntry(addr).DataBlk;
162 }
163
164 int getTokens(Entry cache_entry) {
165 if (is_valid(cache_entry)) {
166 return cache_entry.Tokens;
167 } else {
168 return 0;
169 }
170 }
171
172 State getState(Entry cache_entry, Address addr) {
173 if (is_valid(cache_entry)) {
174 return cache_entry.CacheState;
175 } else if (persistentTable.isLocked(addr)) {
176 return State:I_L;
177 } else {
178 return State:NP;
179 }
180 }
181
182 void setState(Entry cache_entry, Address addr, State state) {
183
184 if (is_valid(cache_entry)) {
185 // Make sure the token count is in range
186 assert(cache_entry.Tokens >= 0);
187 assert(cache_entry.Tokens <= max_tokens());
188 assert(cache_entry.Tokens != (max_tokens() / 2));
189
190 // Make sure we have no tokens in L
191 if ((state == State:I_L) ) {
192 assert(cache_entry.Tokens == 0);
193 }
194
195 // in M and E you have all the tokens
196 if (state == State:M ) {
197 assert(cache_entry.Tokens == max_tokens());
198 }
199
200 // in NP you have no tokens
201 if (state == State:NP) {
202 assert(cache_entry.Tokens == 0);
203 }
204
205 // You have at least one token in S-like states
206 if (state == State:S ) {
207 assert(cache_entry.Tokens > 0);
208 }
209
210 // You have at least half the token in O-like states
211 if (state == State:O ) {
212 assert(cache_entry.Tokens > (max_tokens() / 2));
213 }
214
215 cache_entry.CacheState := state;
216 }
217 }
218
219 AccessPermission getAccessPermission(Address addr) {
220 Entry cache_entry := getCacheEntry(addr);
221 if(is_valid(cache_entry)) {
222 return L2Cache_State_to_permission(cache_entry.CacheState);
223 }
224
225 return AccessPermission:NotPresent;
226 }
227
228 void setAccessPermission(Entry cache_entry, Address addr, State state) {
229 if (is_valid(cache_entry)) {
230 cache_entry.changePermission(L2Cache_State_to_permission(state));
231 }
232 }
233
234 void removeSharer(Address addr, NodeID id) {
235
236 if (localDirectory.isTagPresent(addr)) {
237 localDirectory[addr].Sharers.remove(id);
238 if (localDirectory[addr].Sharers.count() == 0) {
239 localDirectory.deallocate(addr);
240 }
241 }
242 }
243
244 bool sharersExist(Address addr) {
245 if (localDirectory.isTagPresent(addr)) {
246 if (localDirectory[addr].Sharers.count() > 0) {
247 return true;
248 }
249 else {
250 return false;
251 }
252 }
253 else {
254 return false;
255 }
256 }
257
258 bool exclusiveExists(Address addr) {
259 if (localDirectory.isTagPresent(addr)) {
260 if (localDirectory[addr].exclusive) {
261 return true;
262 }
263 else {
264 return false;
265 }
266 }
267 else {
268 return false;
269 }
270 }
271
272 // assumes that caller will check to make sure tag is present
273 Set getSharers(Address addr) {
274 return localDirectory[addr].Sharers;
275 }
276
277 void setNewWriter(Address addr, NodeID id) {
278 if (localDirectory.isTagPresent(addr) == false) {
279 localDirectory.allocate(addr);
280 }
281 localDirectory[addr].Sharers.clear();
282 localDirectory[addr].Sharers.add(id);
283 localDirectory[addr].exclusive := true;
284 }
285
286 void addNewSharer(Address addr, NodeID id) {
287 if (localDirectory.isTagPresent(addr) == false) {
288 localDirectory.allocate(addr);
289 }
290 localDirectory[addr].Sharers.add(id);
291 // localDirectory[addr].exclusive := false;
292 }
293
294 void clearExclusiveBitIfExists(Address addr) {
295 if (localDirectory.isTagPresent(addr)) {
296 localDirectory[addr].exclusive := false;
297 }
298 }
299
300 // ** OUT_PORTS **
301 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
302 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
303 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
304
305
306
307 // ** IN_PORTS **
308
309 // Persistent Network
310 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
311 if (persistentNetwork_in.isReady()) {
312 peek(persistentNetwork_in, PersistentMsg) {
313 assert(in_msg.Destination.isElement(machineID));
314
315 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
316 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Write);
317 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
318 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Read);
319 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
320 persistentTable.persistentRequestUnlock(in_msg.Addr, in_msg.Requestor);
321 } else {
322 error("Unexpected message");
323 }
324
325 Entry cache_entry := getCacheEntry(in_msg.Addr);
326 // React to the message based on the current state of the table
327 if (persistentTable.isLocked(in_msg.Addr)) {
328
329 if (persistentTable.typeOfSmallest(in_msg.Addr) == AccessType:Read) {
330 if (getTokens(cache_entry) == 1 ||
331 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
332 trigger(Event:Persistent_GETS_Last_Token, in_msg.Addr,
333 cache_entry);
334 } else {
335 trigger(Event:Persistent_GETS, in_msg.Addr, cache_entry);
336 }
337 } else {
338 trigger(Event:Persistent_GETX, in_msg.Addr, cache_entry);
339 }
340 }
341 else {
342 trigger(Event:Own_Lock_or_Unlock, in_msg.Addr, cache_entry);
343 }
344 }
345 }
346 }
347
348
349 // Request Network
350 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
351 if (requestNetwork_in.isReady()) {
352 peek(requestNetwork_in, RequestMsg) {
353 assert(in_msg.Destination.isElement(machineID));
354
355 Entry cache_entry := getCacheEntry(in_msg.Addr);
356 if (in_msg.Type == CoherenceRequestType:GETX) {
357 trigger(Event:Transient_GETX, in_msg.Addr, cache_entry);
358 } else if (in_msg.Type == CoherenceRequestType:GETS) {
359 if (getTokens(cache_entry) == 1) {
360 trigger(Event:Transient_GETS_Last_Token, in_msg.Addr,
361 cache_entry);
362 }
363 else {
364 trigger(Event:Transient_GETS, in_msg.Addr, cache_entry);
365 }
366 } else {
367 error("Unexpected message");
368 }
369 }
370 }
371 }
372
373 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
374 if (L1requestNetwork_in.isReady()) {
375 peek(L1requestNetwork_in, RequestMsg) {
376 assert(in_msg.Destination.isElement(machineID));
377 Entry cache_entry := getCacheEntry(in_msg.Addr);
378 if (in_msg.Type == CoherenceRequestType:GETX) {
379 trigger(Event:L1_GETX, in_msg.Addr, cache_entry);
380 } else if (in_msg.Type == CoherenceRequestType:GETS) {
381 if (getTokens(cache_entry) == 1 ||
382 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
383 trigger(Event:L1_GETS_Last_Token, in_msg.Addr, cache_entry);
384 }
385 else {
386 trigger(Event:L1_GETS, in_msg.Addr, cache_entry);
387 }
388 } else {
389 error("Unexpected message");
390 }
391 }
392 }
393 }
394
395
396 // Response Network
397 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
398 if (responseNetwork_in.isReady()) {
399 peek(responseNetwork_in, ResponseMsg) {
400 assert(in_msg.Destination.isElement(machineID));
401 Entry cache_entry := getCacheEntry(in_msg.Addr);
402
403 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
404 if (in_msg.Type == CoherenceResponseType:ACK) {
405 assert(in_msg.Tokens < (max_tokens() / 2));
406 trigger(Event:Ack, in_msg.Addr, cache_entry);
407 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
408 trigger(Event:Data_Owner, in_msg.Addr, cache_entry);
409 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
410 trigger(Event:Data_Shared, in_msg.Addr, cache_entry);
411 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
412 in_msg.Type == CoherenceResponseType:WB_OWNED ||
413 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
414
415 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
416
417 // either room is available or the block is already present
418
419 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
420 assert(in_msg.Dirty == false);
421 trigger(Event:Writeback_Tokens, in_msg.Addr, cache_entry);
422 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
423 assert(in_msg.Dirty == false);
424 trigger(Event:Writeback_Shared_Data, in_msg.Addr, cache_entry);
425 }
426 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
427 //assert(in_msg.Dirty == false);
428 trigger(Event:Writeback_Owned, in_msg.Addr, cache_entry);
429 }
430 }
431 else {
432 trigger(Event:L2_Replacement,
433 L2cache.cacheProbe(in_msg.Addr),
434 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
435 }
436 } else if (in_msg.Type == CoherenceResponseType:INV) {
437 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
438 } else {
439 error("Unexpected message");
440 }
441 } else {
442 if (in_msg.Type == CoherenceResponseType:ACK) {
443 assert(in_msg.Tokens < (max_tokens() / 2));
444 trigger(Event:Ack_All_Tokens, in_msg.Addr, cache_entry);
445 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
446 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
447 trigger(Event:Data_All_Tokens, in_msg.Addr, cache_entry);
448 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
449 in_msg.Type == CoherenceResponseType:WB_OWNED ||
450 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
451 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
452
453 // either room is available or the block is already present
454
455 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
456 assert(in_msg.Dirty == false);
457 assert( (getState(cache_entry, in_msg.Addr) != State:NP)
458 && (getState(cache_entry, in_msg.Addr) != State:I) );
459 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
460 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
461 assert(in_msg.Dirty == false);
462 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
463 }
464 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
465 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
466 }
467 }
468 else {
469 trigger(Event:L2_Replacement,
470 L2cache.cacheProbe(in_msg.Addr),
471 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
472 }
473 } else if (in_msg.Type == CoherenceResponseType:INV) {
474 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
475 } else {
476 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
477 error("Unexpected message");
478 }
479 }
480 }
481 }
482 }
483
484
485 // ACTIONS
486
487 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
488
489 peek(L1requestNetwork_in, RequestMsg) {
490
491 // if this is a retry or no local sharers, broadcast normally
492 enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
493 out_msg.Addr := in_msg.Addr;
494 out_msg.Type := in_msg.Type;
495 out_msg.Requestor := in_msg.Requestor;
496 out_msg.RetryNum := in_msg.RetryNum;
497
498 //
499 // If a statically shared L2 cache, then no other L2 caches can
500 // store the block
501 //
502 //out_msg.Destination.broadcast(MachineType:L2Cache);
503 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
504 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
505
506 out_msg.Destination.add(map_Address_to_Directory(address));
507 out_msg.MessageSize := MessageSizeType:Request_Control;
508 out_msg.AccessMode := in_msg.AccessMode;
509 out_msg.Prefetch := in_msg.Prefetch;
510 } //enqueue
511 // } // if
512
513 //profile_filter_action(0);
514 } // peek
515 } //action
516
517
518 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
519 peek(responseNetwork_in, ResponseMsg) {
520 // FIXME, should use a 3rd vnet
521 enqueue(responseNetwork_out, ResponseMsg, 1) {
522 out_msg.Addr := address;
523 out_msg.Type := in_msg.Type;
524 out_msg.Sender := machineID;
525 out_msg.Destination.add(map_Address_to_Directory(address));
526 out_msg.Tokens := in_msg.Tokens;
527 out_msg.MessageSize := in_msg.MessageSize;
528 out_msg.DataBlk := in_msg.DataBlk;
529 out_msg.Dirty := in_msg.Dirty;
530 }
531 }
532 }
533
534 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
535 assert(is_valid(cache_entry));
536 if (cache_entry.Tokens > 0) {
537 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
538 out_msg.Addr := address;
539 out_msg.Type := CoherenceResponseType:ACK;
540 out_msg.Sender := machineID;
541 out_msg.Destination.add(map_Address_to_Directory(address));
542 out_msg.Tokens := cache_entry.Tokens;
543 out_msg.MessageSize := MessageSizeType:Writeback_Control;
544 }
545 cache_entry.Tokens := 0;
546 }
547 }
548
549 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
550 assert(is_valid(cache_entry));
551 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
552 out_msg.Addr := address;
553 out_msg.Sender := machineID;
554 out_msg.Destination.add(map_Address_to_Directory(address));
555 out_msg.Tokens := cache_entry.Tokens;
556 out_msg.DataBlk := cache_entry.DataBlk;
557 out_msg.Dirty := cache_entry.Dirty;
558
559 if (cache_entry.Dirty) {
560 out_msg.MessageSize := MessageSizeType:Writeback_Data;
561 out_msg.Type := CoherenceResponseType:DATA_OWNER;
562 } else {
563 out_msg.MessageSize := MessageSizeType:Writeback_Control;
564 out_msg.Type := CoherenceResponseType:ACK_OWNER;
565 }
566 }
567 cache_entry.Tokens := 0;
568 }
569
570 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
571 peek(requestNetwork_in, RequestMsg) {
572 assert(is_valid(cache_entry));
573 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
574 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
575 out_msg.Addr := address;
576 out_msg.Type := CoherenceResponseType:DATA_SHARED;
577 out_msg.Sender := machineID;
578 out_msg.Destination.add(in_msg.Requestor);
579 out_msg.Tokens := N_tokens;
580 out_msg.DataBlk := cache_entry.DataBlk;
581 out_msg.Dirty := false;
582 out_msg.MessageSize := MessageSizeType:Response_Data;
583 }
584 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
585 }
586 else {
587 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
588 out_msg.Addr := address;
589 out_msg.Type := CoherenceResponseType:DATA_SHARED;
590 out_msg.Sender := machineID;
591 out_msg.Destination.add(in_msg.Requestor);
592 out_msg.Tokens := 1;
593 out_msg.DataBlk := cache_entry.DataBlk;
594 out_msg.Dirty := false;
595 out_msg.MessageSize := MessageSizeType:Response_Data;
596 }
597 cache_entry.Tokens := cache_entry.Tokens - 1;
598 }
599 }
600 }
601
602 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
603 assert(is_valid(cache_entry));
604 peek(requestNetwork_in, RequestMsg) {
605 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
606 out_msg.Addr := address;
607 out_msg.Type := CoherenceResponseType:DATA_OWNER;
608 out_msg.Sender := machineID;
609 out_msg.Destination.add(in_msg.Requestor);
610 assert(cache_entry.Tokens >= 1);
611 out_msg.Tokens := cache_entry.Tokens;
612 out_msg.DataBlk := cache_entry.DataBlk;
613 out_msg.Dirty := cache_entry.Dirty;
614 out_msg.MessageSize := MessageSizeType:Response_Data;
615 }
616 }
617 cache_entry.Tokens := 0;
618 }
619
620 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
621 assert(is_valid(cache_entry));
622 if (cache_entry.Tokens > 0) {
623 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
624 out_msg.Addr := address;
625 out_msg.Type := CoherenceResponseType:ACK;
626 out_msg.Sender := machineID;
627 out_msg.Destination.add(persistentTable.findSmallest(address));
628 assert(cache_entry.Tokens >= 1);
629 out_msg.Tokens := cache_entry.Tokens;
630 out_msg.MessageSize := MessageSizeType:Response_Control;
631 }
632 }
633 cache_entry.Tokens := 0;
634 }
635
636 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
637 assert(is_valid(cache_entry));
638 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
639 out_msg.Addr := address;
640 out_msg.Type := CoherenceResponseType:DATA_OWNER;
641 out_msg.Sender := machineID;
642 out_msg.Destination.add(persistentTable.findSmallest(address));
643 assert(cache_entry.Tokens >= 1);
644 out_msg.Tokens := cache_entry.Tokens;
645 out_msg.DataBlk := cache_entry.DataBlk;
646 out_msg.Dirty := cache_entry.Dirty;
647 out_msg.MessageSize := MessageSizeType:Response_Data;
648 }
649 cache_entry.Tokens := 0;
650 }
651
652 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
653 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
654 assert(is_valid(cache_entry));
655 assert(cache_entry.Tokens > 0);
656 if (cache_entry.Tokens > 1) {
657 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
658 out_msg.Addr := address;
659 out_msg.Type := CoherenceResponseType:ACK;
660 out_msg.Sender := machineID;
661 out_msg.Destination.add(persistentTable.findSmallest(address));
662 assert(cache_entry.Tokens >= 1);
663 out_msg.Tokens := cache_entry.Tokens - 1;
664 out_msg.MessageSize := MessageSizeType:Response_Control;
665 }
666 }
667 cache_entry.Tokens := 1;
668 }
669
670 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
671 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
672 assert(is_valid(cache_entry));
673 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
674 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
675 out_msg.Addr := address;
676 out_msg.Type := CoherenceResponseType:DATA_OWNER;
677 out_msg.Sender := machineID;
678 out_msg.Destination.add(persistentTable.findSmallest(address));
679 out_msg.Tokens := cache_entry.Tokens - 1;
680 out_msg.DataBlk := cache_entry.DataBlk;
681 out_msg.Dirty := cache_entry.Dirty;
682 out_msg.MessageSize := MessageSizeType:Response_Data;
683 }
684 cache_entry.Tokens := 1;
685 }
686
687 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
688 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
689 assert(is_valid(cache_entry));
690 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
691 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
692 out_msg.Addr := address;
693 out_msg.Type := CoherenceResponseType:DATA_OWNER;
694 out_msg.Sender := machineID;
695 out_msg.Destination.add(persistentTable.findSmallest(address));
696 out_msg.Tokens := cache_entry.Tokens;
697 out_msg.DataBlk := cache_entry.DataBlk;
698 out_msg.Dirty := cache_entry.Dirty;
699 out_msg.MessageSize := MessageSizeType:Response_Data;
700 }
701 cache_entry.Tokens := 0;
702 }
703
704
705
706 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
707 // assert(persistentTable.isLocked(address));
708 peek(responseNetwork_in, ResponseMsg) {
709 // FIXME, should use a 3rd vnet in some cases
710 enqueue(responseNetwork_out, ResponseMsg, 1) {
711 out_msg.Addr := address;
712 out_msg.Type := in_msg.Type;
713 out_msg.Sender := machineID;
714 out_msg.Destination.add(persistentTable.findSmallest(address));
715 out_msg.Tokens := in_msg.Tokens;
716 out_msg.DataBlk := in_msg.DataBlk;
717 out_msg.Dirty := in_msg.Dirty;
718 out_msg.MessageSize := in_msg.MessageSize;
719 }
720 }
721 }
722
723 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
724 //assert(persistentTable.isLocked(address));
725 peek(responseNetwork_in, ResponseMsg) {
726 // FIXME, should use a 3rd vnet in some cases
727 enqueue(responseNetwork_out, ResponseMsg, 1) {
728 out_msg.Addr := address;
729 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
730 out_msg.Type := CoherenceResponseType:DATA_SHARED;
731 } else {
732 assert(in_msg.Tokens < (max_tokens() / 2));
733 out_msg.Type := CoherenceResponseType:ACK;
734 }
735 out_msg.Sender := machineID;
736 out_msg.Destination.add(persistentTable.findSmallest(address));
737 out_msg.Tokens := in_msg.Tokens;
738 out_msg.DataBlk := in_msg.DataBlk;
739 out_msg.Dirty := in_msg.Dirty;
740 out_msg.MessageSize := in_msg.MessageSize;
741 }
742 }
743 }
744
745 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
746 // assert(persistentTable.isLocked(address));
747 peek(responseNetwork_in, ResponseMsg) {
748 // FIXME, should use a 3rd vnet in some cases
749 enqueue(responseNetwork_out, ResponseMsg, 1) {
750 out_msg.Addr := address;
751 out_msg.Type := CoherenceResponseType:DATA_OWNER;
752 out_msg.Sender := machineID;
753 out_msg.Destination.add(persistentTable.findSmallest(address));
754 out_msg.Tokens := in_msg.Tokens;
755 out_msg.DataBlk := in_msg.DataBlk;
756 out_msg.Dirty := in_msg.Dirty;
757 out_msg.MessageSize := in_msg.MessageSize;
758 }
759 }
760 }
761
762
763 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
764 peek(responseNetwork_in, ResponseMsg) {
765 removeSharer(in_msg.Addr, machineIDToNodeID(in_msg.Sender));
766 }
767 }
768
769 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
770 peek(requestNetwork_in, RequestMsg) {
771 if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) {
772 //profile_filter_action(1);
773 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
774 in_msg.RetryNum);
775 }
776 else {
777 enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
778 out_msg.Addr := in_msg.Addr;
779 out_msg.Requestor := in_msg.Requestor;
780
781 //
782 // Currently assuming only one chip so all L1s are local
783 //
784 //out_msg.Destination := getLocalL1IDs(machineID);
785 out_msg.Destination.broadcast(MachineType:L1Cache);
786 out_msg.Destination.remove(in_msg.Requestor);
787
788 out_msg.Type := in_msg.Type;
789 out_msg.isLocal := false;
790 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
791 out_msg.AccessMode := in_msg.AccessMode;
792 out_msg.Prefetch := in_msg.Prefetch;
793 }
794 //profile_filter_action(0);
795 }
796 }
797 }
798
799 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
800 peek(L1requestNetwork_in, RequestMsg) {
801 assert(is_valid(cache_entry));
802 assert(cache_entry.Tokens > 0);
803 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
804 out_msg.Addr := address;
805 out_msg.Type := CoherenceResponseType:DATA_SHARED;
806 out_msg.Sender := machineID;
807 out_msg.Destination.add(in_msg.Requestor);
808 out_msg.DataBlk := cache_entry.DataBlk;
809 out_msg.Dirty := false;
810 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
811 out_msg.Tokens := 1;
812 }
813 cache_entry.Tokens := cache_entry.Tokens - 1;
814 }
815 }
816
817 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
818 peek(L1requestNetwork_in, RequestMsg) {
819 assert(is_valid(cache_entry));
820 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
821 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
822 out_msg.Addr := address;
823 out_msg.Type := CoherenceResponseType:DATA_OWNER;
824 out_msg.Sender := machineID;
825 out_msg.Destination.add(in_msg.Requestor);
826 out_msg.DataBlk := cache_entry.DataBlk;
827 out_msg.Dirty := cache_entry.Dirty;
828 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
829 out_msg.Tokens := cache_entry.Tokens;
830 }
831 cache_entry.Tokens := 0;
832 }
833 }
834
835 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
836 peek(L1requestNetwork_in, RequestMsg) {
837 assert(is_valid(cache_entry));
838 // assert(cache_entry.Tokens == max_tokens());
839 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
840 out_msg.Addr := address;
841 out_msg.Type := CoherenceResponseType:DATA_OWNER;
842 out_msg.Sender := machineID;
843 out_msg.Destination.add(in_msg.Requestor);
844 out_msg.DataBlk := cache_entry.DataBlk;
845 out_msg.Dirty := cache_entry.Dirty;
846 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
847 //out_msg.Tokens := max_tokens();
848 out_msg.Tokens := cache_entry.Tokens;
849 }
850 cache_entry.Tokens := 0;
851 }
852 }
853
854 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
855 persistentNetwork_in.dequeue();
856 }
857
858 action(m_popRequestQueue, "m", desc="Pop request queue.") {
859 requestNetwork_in.dequeue();
860 }
861
862 action(n_popResponseQueue, "n", desc="Pop response queue") {
863 responseNetwork_in.dequeue();
864 }
865
866 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
867 L1requestNetwork_in.dequeue();
868 }
869
870
871 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
872 peek(responseNetwork_in, ResponseMsg) {
873 assert(is_valid(cache_entry));
874 assert(in_msg.Tokens != 0);
875 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
876
877 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
878 // may not trigger this action.
879 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
880 cache_entry.Dirty := true;
881 }
882 }
883 }
884
885 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
886 peek(L1requestNetwork_in, RequestMsg) {
887 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
888 if (in_msg.Type == CoherenceRequestType:GETX) {
889 setNewWriter(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
890 } else if (in_msg.Type == CoherenceRequestType:GETS) {
891 addNewSharer(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
892 }
893 }
894 }
895 }
896
897 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
898 clearExclusiveBitIfExists(address);
899 }
900
901 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
902 peek(L1requestNetwork_in, RequestMsg) {
903 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
904 (is_valid(cache_entry))) {
905 L2cache.setMRU(address);
906 }
907 }
908 }
909
910 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
911 assert(is_valid(cache_entry));
912 if (cache_entry.Tokens > 0) {
913 peek(requestNetwork_in, RequestMsg) {
914 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
915 out_msg.Addr := address;
916 out_msg.Type := CoherenceResponseType:ACK;
917 out_msg.Sender := machineID;
918 out_msg.Destination.add(in_msg.Requestor);
919 assert(cache_entry.Tokens >= 1);
920 out_msg.Tokens := cache_entry.Tokens;
921 out_msg.MessageSize := MessageSizeType:Response_Control;
922 }
923 }
924 }
925 cache_entry.Tokens := 0;
926 }
927
928 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
929 assert(is_valid(cache_entry));
930 if (cache_entry.Tokens > 0) {
931 peek(L1requestNetwork_in, RequestMsg) {
932 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
933 out_msg.Addr := address;
934 out_msg.Type := CoherenceResponseType:ACK;
935 out_msg.Sender := machineID;
936 out_msg.Destination.add(in_msg.Requestor);
937 assert(cache_entry.Tokens >= 1);
938 out_msg.Tokens := cache_entry.Tokens;
939 out_msg.MessageSize := MessageSizeType:Response_Control;
940 }
941 }
942 }
943 cache_entry.Tokens := 0;
944 }
945
946 action(u_writeDataToCache, "u", desc="Write data to cache") {
947 peek(responseNetwork_in, ResponseMsg) {
948 assert(is_valid(cache_entry));
949 cache_entry.DataBlk := in_msg.DataBlk;
950 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
951 cache_entry.Dirty := in_msg.Dirty;
952 }
953 }
954 }
955
956 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
957 set_cache_entry(L2cache.allocate(address, new Entry));
958 }
959
960 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
961 L2cache.deallocate(address);
962 unset_cache_entry();
963 }
964
965 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
966 ++L2cache.demand_misses;
967 }
968
969 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
970 ++L2cache.demand_hits;
971 }
972
973 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
974 peek(responseNetwork_in, ResponseMsg) {
975 if (in_msg.Type != CoherenceResponseType:ACK &&
976 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
977 assert(is_valid(cache_entry));
978 assert(cache_entry.DataBlk == in_msg.DataBlk);
979 }
980 }
981 }
982
983
984 //*****************************************************
985 // TRANSITIONS
986 //*****************************************************
987
988 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
989
990 h_updateFilterFromL1HintOrWB;
991 n_popResponseQueue;
992 }
993
994 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
995 l_popPersistentQueue;
996 }
997
998
999 // Transitions from NP
1000
1001 transition(NP, {Transient_GETX, Transient_GETS}) {
1002 // forward message to local sharers
1003 r_clearExclusive;
1004 j_forwardTransientRequestToLocalSharers;
1005 m_popRequestQueue;
1006 }
1007
1008
1009 transition(NP, {L1_GETS, L1_GETX}) {
1010 a_broadcastLocalRequest;
1011 r_markNewSharer;
1012 uu_profileMiss;
1013 o_popL1RequestQueue;
1014 }
1015
1016 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1017 bb_bounceResponse;
1018 n_popResponseQueue;
1019 }
1020
1021 transition(NP, Writeback_Shared_Data, S) {
1022 vv_allocateL2CacheBlock;
1023 u_writeDataToCache;
1024 q_updateTokensFromResponse;
1025 h_updateFilterFromL1HintOrWB;
1026 n_popResponseQueue;
1027 }
1028
1029 transition(NP, Writeback_Tokens, I) {
1030 vv_allocateL2CacheBlock;
1031 q_updateTokensFromResponse;
1032 h_updateFilterFromL1HintOrWB;
1033 n_popResponseQueue;
1034 }
1035
1036 transition(NP, Writeback_All_Tokens, M) {
1037 vv_allocateL2CacheBlock;
1038 u_writeDataToCache;
1039 q_updateTokensFromResponse;
1040 h_updateFilterFromL1HintOrWB;
1041 n_popResponseQueue;
1042 }
1043
1044 transition(NP, Writeback_Owned, O) {
1045 vv_allocateL2CacheBlock;
1046 u_writeDataToCache;
1047 q_updateTokensFromResponse;
1048 h_updateFilterFromL1HintOrWB;
1049 n_popResponseQueue;
1050 }
1051
1052
1053 transition(NP,
1054 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1055 I_L) {
1056 l_popPersistentQueue;
1057 }
1058
1059 // Transitions from Idle
1060
1061 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1062 a_broadcastLocalRequest;
1063 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1064 r_markNewSharer;
1065 uu_profileMiss;
1066 o_popL1RequestQueue;
1067 }
1068
1069 transition(I, L1_GETX) {
1070 a_broadcastLocalRequest;
1071 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1072 r_markNewSharer;
1073 uu_profileMiss;
1074 o_popL1RequestQueue;
1075 }
1076
1077 transition(I, L2_Replacement) {
1078 c_cleanReplacement; // Only needed in some cases
1079 rr_deallocateL2CacheBlock;
1080 }
1081
1082 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1083 r_clearExclusive;
1084 t_sendAckWithCollectedTokens;
1085 j_forwardTransientRequestToLocalSharers;
1086 m_popRequestQueue;
1087 }
1088
1089 transition(I,
1090 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1091 I_L) {
1092 e_sendAckWithCollectedTokens;
1093 l_popPersistentQueue;
1094 }
1095
1096
1097 transition(I, Ack) {
1098 q_updateTokensFromResponse;
1099 n_popResponseQueue;
1100 }
1101
1102 transition(I, Data_Shared, S) {
1103 u_writeDataToCache;
1104 q_updateTokensFromResponse;
1105 n_popResponseQueue;
1106 }
1107
1108 transition(I, Writeback_Shared_Data, S) {
1109 u_writeDataToCache;
1110 q_updateTokensFromResponse;
1111 h_updateFilterFromL1HintOrWB;
1112 n_popResponseQueue;
1113 }
1114
1115 transition(I, Writeback_Tokens) {
1116 q_updateTokensFromResponse;
1117 h_updateFilterFromL1HintOrWB;
1118 n_popResponseQueue;
1119 }
1120
1121 transition(I, Data_Owner, O) {
1122 u_writeDataToCache;
1123 q_updateTokensFromResponse;
1124 n_popResponseQueue;
1125 }
1126
1127 transition(I, Writeback_Owned, O) {
1128 u_writeDataToCache;
1129 q_updateTokensFromResponse;
1130 h_updateFilterFromL1HintOrWB;
1131 n_popResponseQueue;
1132 }
1133
1134 transition(I, Data_All_Tokens, M) {
1135 u_writeDataToCache;
1136 q_updateTokensFromResponse;
1137 n_popResponseQueue;
1138 }
1139
1140
1141 transition(I, Writeback_All_Tokens, M) {
1142 u_writeDataToCache;
1143 q_updateTokensFromResponse;
1144 h_updateFilterFromL1HintOrWB;
1145 n_popResponseQueue;
1146 }
1147
1148 // Transitions from Shared
1149
1150 transition(S, L2_Replacement, I) {
1151 c_cleanReplacement;
1152 rr_deallocateL2CacheBlock;
1153 }
1154
1155 transition(S, Transient_GETX, I) {
1156 r_clearExclusive;
1157 t_sendAckWithCollectedTokens;
1158 j_forwardTransientRequestToLocalSharers;
1159 m_popRequestQueue;
1160 }
1161
1162 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1163 j_forwardTransientRequestToLocalSharers;
1164 r_clearExclusive;
1165 m_popRequestQueue;
1166 }
1167
1168 transition(S, Persistent_GETX, I_L) {
1169 e_sendAckWithCollectedTokens;
1170 l_popPersistentQueue;
1171 }
1172
1173
1174 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1175 f_sendAckWithAllButOneTokens;
1176 l_popPersistentQueue;
1177 }
1178
1179
1180 transition(S, Ack) {
1181 q_updateTokensFromResponse;
1182 n_popResponseQueue;
1183 }
1184
1185 transition(S, Data_Shared) {
1186 w_assertIncomingDataAndCacheDataMatch;
1187 q_updateTokensFromResponse;
1188 n_popResponseQueue;
1189 }
1190
1191 transition(S, Writeback_Tokens) {
1192 q_updateTokensFromResponse;
1193 h_updateFilterFromL1HintOrWB;
1194 n_popResponseQueue;
1195 }
1196
1197 transition(S, Writeback_Shared_Data) {
1198 w_assertIncomingDataAndCacheDataMatch;
1199 q_updateTokensFromResponse;
1200 h_updateFilterFromL1HintOrWB;
1201 n_popResponseQueue;
1202 }
1203
1204
1205 transition(S, Data_Owner, O) {
1206 w_assertIncomingDataAndCacheDataMatch;
1207 q_updateTokensFromResponse;
1208 n_popResponseQueue;
1209 }
1210
1211 transition(S, Writeback_Owned, O) {
1212 w_assertIncomingDataAndCacheDataMatch;
1213 q_updateTokensFromResponse;
1214 h_updateFilterFromL1HintOrWB;
1215 n_popResponseQueue;
1216 }
1217
1218 transition(S, Data_All_Tokens, M) {
1219 w_assertIncomingDataAndCacheDataMatch;
1220 q_updateTokensFromResponse;
1221 n_popResponseQueue;
1222 }
1223
1224 transition(S, Writeback_All_Tokens, M) {
1225 w_assertIncomingDataAndCacheDataMatch;
1226 q_updateTokensFromResponse;
1227 h_updateFilterFromL1HintOrWB;
1228 n_popResponseQueue;
1229 }
1230
1231 transition(S, L1_GETX, I) {
1232 a_broadcastLocalRequest;
1233 tt_sendLocalAckWithCollectedTokens;
1234 r_markNewSharer;
1235 r_setMRU;
1236 uu_profileMiss;
1237 o_popL1RequestQueue;
1238 }
1239
1240
1241 transition(S, L1_GETS) {
1242 k_dataFromL2CacheToL1Requestor;
1243 r_markNewSharer;
1244 r_setMRU;
1245 uu_profileHit;
1246 o_popL1RequestQueue;
1247 }
1248
1249 transition(S, L1_GETS_Last_Token, I) {
1250
1251 k_dataFromL2CacheToL1Requestor;
1252 r_markNewSharer;
1253 r_setMRU;
1254 uu_profileHit;
1255 o_popL1RequestQueue;
1256 }
1257
1258 // Transitions from Owned
1259
1260 transition(O, L2_Replacement, I) {
1261 cc_dirtyReplacement;
1262 rr_deallocateL2CacheBlock;
1263 }
1264
1265 transition(O, Transient_GETX, I) {
1266 r_clearExclusive;
1267 dd_sendDataWithAllTokens;
1268 j_forwardTransientRequestToLocalSharers;
1269 m_popRequestQueue;
1270 }
1271
1272 transition(O, Persistent_GETX, I_L) {
1273 ee_sendDataWithAllTokens;
1274 l_popPersistentQueue;
1275 }
1276
1277 transition(O, Persistent_GETS, S_L) {
1278 ff_sendDataWithAllButOneTokens;
1279 l_popPersistentQueue;
1280 }
1281
1282 transition(O, Persistent_GETS_Last_Token, I_L) {
1283 fa_sendDataWithAllTokens;
1284 l_popPersistentQueue;
1285 }
1286
1287 transition(O, Transient_GETS) {
1288 // send multiple tokens
1289 r_clearExclusive;
1290 d_sendDataWithTokens;
1291 m_popRequestQueue;
1292 }
1293
1294 transition(O, Transient_GETS_Last_Token) {
1295 // WAIT FOR IT TO GO PERSISTENT
1296 r_clearExclusive;
1297 m_popRequestQueue;
1298 }
1299
1300 transition(O, Ack) {
1301 q_updateTokensFromResponse;
1302 n_popResponseQueue;
1303 }
1304
1305 transition(O, Ack_All_Tokens, M) {
1306 q_updateTokensFromResponse;
1307 n_popResponseQueue;
1308 }
1309
1310 transition(O, Data_Shared) {
1311 w_assertIncomingDataAndCacheDataMatch;
1312 q_updateTokensFromResponse;
1313 n_popResponseQueue;
1314 }
1315
1316
1317 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1318 w_assertIncomingDataAndCacheDataMatch;
1319 q_updateTokensFromResponse;
1320 h_updateFilterFromL1HintOrWB;
1321 n_popResponseQueue;
1322 }
1323
1324 transition(O, Data_All_Tokens, M) {
1325 w_assertIncomingDataAndCacheDataMatch;
1326 q_updateTokensFromResponse;
1327 n_popResponseQueue;
1328 }
1329
1330 transition(O, Writeback_All_Tokens, M) {
1331 w_assertIncomingDataAndCacheDataMatch;
1332 q_updateTokensFromResponse;
1333 h_updateFilterFromL1HintOrWB;
1334 n_popResponseQueue;
1335 }
1336
1337 transition(O, L1_GETS) {
1338 k_dataFromL2CacheToL1Requestor;
1339 r_markNewSharer;
1340 r_setMRU;
1341 uu_profileHit;
1342 o_popL1RequestQueue;
1343 }
1344
1345 transition(O, L1_GETS_Last_Token, I) {
1346 k_dataOwnerFromL2CacheToL1Requestor;
1347 r_markNewSharer;
1348 r_setMRU;
1349 uu_profileHit;
1350 o_popL1RequestQueue;
1351 }
1352
1353 transition(O, L1_GETX, I) {
1354 a_broadcastLocalRequest;
1355 k_dataAndAllTokensFromL2CacheToL1Requestor;
1356 r_markNewSharer;
1357 r_setMRU;
1358 uu_profileMiss;
1359 o_popL1RequestQueue;
1360 }
1361
1362 // Transitions from M
1363
1364 transition(M, L2_Replacement, I) {
1365 cc_dirtyReplacement;
1366 rr_deallocateL2CacheBlock;
1367 }
1368
1369 // MRM_DEBUG: Give up all tokens even for GETS? ???
1370 transition(M, {Transient_GETX, Transient_GETS}, I) {
1371 r_clearExclusive;
1372 dd_sendDataWithAllTokens;
1373 m_popRequestQueue;
1374 }
1375
1376 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1377 ee_sendDataWithAllTokens;
1378 l_popPersistentQueue;
1379 }
1380
1381
1382 transition(M, L1_GETS, O) {
1383 k_dataFromL2CacheToL1Requestor;
1384 r_markNewSharer;
1385 r_setMRU;
1386 uu_profileHit;
1387 o_popL1RequestQueue;
1388 }
1389
1390 transition(M, L1_GETX, I) {
1391 k_dataAndAllTokensFromL2CacheToL1Requestor;
1392 r_markNewSharer;
1393 r_setMRU;
1394 uu_profileHit;
1395 o_popL1RequestQueue;
1396 }
1397
1398
1399 //Transitions from locked states
1400
1401 transition({I_L, S_L}, Ack) {
1402 gg_bounceResponseToStarver;
1403 n_popResponseQueue;
1404 }
1405
1406 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1407 gg_bounceResponseToStarver;
1408 n_popResponseQueue;
1409 }
1410
1411 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1412 gg_bounceWBSharedToStarver;
1413 h_updateFilterFromL1HintOrWB;
1414 n_popResponseQueue;
1415 }
1416
1417 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1418 gg_bounceWBOwnedToStarver;
1419 h_updateFilterFromL1HintOrWB;
1420 n_popResponseQueue;
1421 }
1422
1423 transition(S_L, L2_Replacement, I) {
1424 c_cleanReplacement;
1425 rr_deallocateL2CacheBlock;
1426 }
1427
1428 transition(I_L, L2_Replacement, I) {
1429 rr_deallocateL2CacheBlock;
1430 }
1431
1432 transition(I_L, Own_Lock_or_Unlock, I) {
1433 l_popPersistentQueue;
1434 }
1435
1436 transition(S_L, Own_Lock_or_Unlock, S) {
1437 l_popPersistentQueue;
1438 }
1439
1440 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1441 r_clearExclusive;
1442 m_popRequestQueue;
1443 }
1444
1445 transition(I_L, {L1_GETX, L1_GETS}) {
1446 a_broadcastLocalRequest;
1447 r_markNewSharer;
1448 uu_profileMiss;
1449 o_popL1RequestQueue;
1450 }
1451
1452 transition(S_L, L1_GETX, I_L) {
1453 a_broadcastLocalRequest;
1454 tt_sendLocalAckWithCollectedTokens;
1455 r_markNewSharer;
1456 r_setMRU;
1457 uu_profileMiss;
1458 o_popL1RequestQueue;
1459 }
1460
1461 transition(S_L, L1_GETS) {
1462 k_dataFromL2CacheToL1Requestor;
1463 r_markNewSharer;
1464 r_setMRU;
1465 uu_profileHit;
1466 o_popL1RequestQueue;
1467 }
1468
1469 transition(S_L, L1_GETS_Last_Token, I_L) {
1470 k_dataFromL2CacheToL1Requestor;
1471 r_markNewSharer;
1472 r_setMRU;
1473 uu_profileHit;
1474 o_popL1RequestQueue;
1475 }
1476
1477 transition(S_L, Persistent_GETX, I_L) {
1478 e_sendAckWithCollectedTokens;
1479 l_popPersistentQueue;
1480 }
1481
1482 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1483 l_popPersistentQueue;
1484 }
1485
1486 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1487 l_popPersistentQueue;
1488 }
1489 }