cpu: Add TraceCPU to playback elastic traces
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(Directory, "Token protocol")
30 : DirectoryMemory * directory;
31 int l2_select_num_bits;
32 Cycles directory_latency := 5;
33 bool distributed_persistent := "True";
34 Cycles fixed_timeout_latency := 100;
35 Cycles reissue_wakeup_latency := 10;
36 Cycles to_memory_controller_latency := 1;
37
38 // Message Queues from dir to other controllers / network
39 MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
40 vnet_type="response";
41
42 MessageBuffer * responseFromDir, network="To", virtual_network="4",
43 vnet_type="response";
44
45 MessageBuffer * persistentFromDir, network="To", virtual_network="3",
46 vnet_type="persistent";
47
48 MessageBuffer * requestFromDir, network="To", virtual_network="1",
49 vnet_type="request";
50
51 // Message Queues to dir from other controllers / network
52 MessageBuffer * responseToDir, network="From", virtual_network="4",
53 vnet_type="response";
54
55 MessageBuffer * persistentToDir, network="From", virtual_network="3",
56 vnet_type="persistent";
57
58 MessageBuffer * requestToDir, network="From", virtual_network="2",
59 vnet_type="request";
60
61 MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
62 vnet_type="request";
63
64 MessageBuffer * responseFromMemory;
65 {
66 // STATES
67 state_declaration(State, desc="Directory states", default="Directory_State_O") {
68 // Base states
69 O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
70 NO, AccessPermission:Maybe_Stale, desc="Not Owner";
71 L, AccessPermission:Busy, desc="Locked";
72
73 // Memory wait states - can block all messages including persistent requests
74 O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
75 L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
76 L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
77 DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
78 DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
79 NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
80 O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
81 O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
82
83 // DMA request transient states - must respond to persistent requests
84 O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
85 NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
86 NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
87
88 // DMA request in progress - competing with a CPU persistent request
89 DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
90 DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
91
92 }
93
94 // Events
95 enumeration(Event, desc="Directory events") {
96 GETX, desc="A GETX arrives";
97 GETS, desc="A GETS arrives";
98 Lockdown, desc="A lockdown request arrives";
99 Unlockdown, desc="An un-lockdown request arrives";
100 Own_Lock_or_Unlock, desc="own lock or unlock";
101 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
102 Data_Owner, desc="Data arrive";
103 Data_All_Tokens, desc="Data and all tokens";
104 Ack_Owner, desc="Owner token arrived without data because it was clean";
105 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
106 Tokens, desc="Tokens arrive";
107 Ack_All_Tokens, desc="All_Tokens arrive";
108 Request_Timeout, desc="A DMA request has timed out";
109
110 // Memory Controller
111 Memory_Data, desc="Fetched data from memory arrives";
112 Memory_Ack, desc="Writeback Ack from memory arrives";
113
114 // DMA requests
115 DMA_READ, desc="A DMA Read memory request";
116 DMA_WRITE, desc="A DMA Write memory request";
117 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
118 }
119
120 // TYPES
121
122 // DirectoryEntry
123 structure(Entry, desc="...", interface="AbstractEntry") {
124 State DirectoryState, desc="Directory state";
125 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
126
127 // The following state is provided to allow for bandwidth
128 // efficient directory-like operation. However all of this state
129 // is 'soft state' that does not need to be correct (as long as
130 // you're eventually willing to resort to broadcast.)
131
132 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
133 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
134 }
135
136 structure(PersistentTable, external="yes") {
137 void persistentRequestLock(Addr, MachineID, AccessType);
138 void persistentRequestUnlock(Addr, MachineID);
139 bool okToIssueStarving(Addr, MachineID);
140 MachineID findSmallest(Addr);
141 AccessType typeOfSmallest(Addr);
142 void markEntries(Addr);
143 bool isLocked(Addr);
144 int countStarvingForAddress(Addr);
145 int countReadStarvingForAddress(Addr);
146 }
147
148 // TBE entries for DMA requests
149 structure(TBE, desc="TBE entries for outstanding DMA requests") {
150 Addr PhysicalAddress, desc="physical address";
151 State TBEState, desc="Transient State";
152 DataBlock DataBlk, desc="Current view of the associated address range";
153 int Len, desc="...";
154 MachineID DmaRequestor, desc="DMA requestor";
155 bool WentPersistent, desc="Did the DMA request require a persistent request";
156 }
157
158 structure(TBETable, external="yes") {
159 TBE lookup(Addr);
160 void allocate(Addr);
161 void deallocate(Addr);
162 bool isPresent(Addr);
163 }
164
165 // ** OBJECTS **
166
167 PersistentTable persistentTable;
168 TimerTable reissueTimerTable;
169
170 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
171
172 bool starving, default="false";
173 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
174
175 Tick clockEdge();
176 Tick cyclesToTicks(Cycles c);
177 void set_tbe(TBE b);
178 void unset_tbe();
179
180 Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
181 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
182
183 if (is_valid(dir_entry)) {
184 return dir_entry;
185 }
186
187 dir_entry := static_cast(Entry, "pointer",
188 directory.allocate(addr, new Entry));
189 return dir_entry;
190 }
191
192 State getState(TBE tbe, Addr addr) {
193 if (is_valid(tbe)) {
194 return tbe.TBEState;
195 } else {
196 return getDirectoryEntry(addr).DirectoryState;
197 }
198 }
199
200 void setState(TBE tbe, Addr addr, State state) {
201 if (is_valid(tbe)) {
202 tbe.TBEState := state;
203 }
204 getDirectoryEntry(addr).DirectoryState := state;
205
206 if (state == State:L || state == State:DW_L || state == State:DR_L) {
207 assert(getDirectoryEntry(addr).Tokens == 0);
208 }
209
210 // We have one or zero owners
211 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
212
213 // Make sure the token count is in range
214 assert(getDirectoryEntry(addr).Tokens >= 0);
215 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
216
217 if (state == State:O || state == State:O_W || state == State:O_DW) {
218 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
219 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
220 }
221 }
222
223 AccessPermission getAccessPermission(Addr addr) {
224 TBE tbe := TBEs[addr];
225 if(is_valid(tbe)) {
226 return Directory_State_to_permission(tbe.TBEState);
227 }
228
229 if (directory.isPresent(addr)) {
230 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
231 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
232 }
233
234 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
235 return AccessPermission:NotPresent;
236 }
237
238 void setAccessPermission(Addr addr, State state) {
239 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
240 }
241
242 bool okToIssueStarving(Addr addr, MachineID machinID) {
243 return persistentTable.okToIssueStarving(addr, machineID);
244 }
245
246 void markPersistentEntries(Addr addr) {
247 persistentTable.markEntries(addr);
248 }
249
250 void functionalRead(Addr addr, Packet *pkt) {
251 TBE tbe := TBEs[addr];
252 if(is_valid(tbe)) {
253 testAndRead(addr, tbe.DataBlk, pkt);
254 } else {
255 functionalMemoryRead(pkt);
256 }
257 }
258
259 int functionalWrite(Addr addr, Packet *pkt) {
260 int num_functional_writes := 0;
261
262 TBE tbe := TBEs[addr];
263 if(is_valid(tbe)) {
264 num_functional_writes := num_functional_writes +
265 testAndWrite(addr, tbe.DataBlk, pkt);
266 }
267
268 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
269 return num_functional_writes;
270 }
271
272 // ** OUT_PORTS **
273 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
274 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
275 out_port(requestNetwork_out, RequestMsg, requestFromDir);
276 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
277
278 // ** IN_PORTS **
279 // off-chip memory request/response is done
280 in_port(memQueue_in, MemoryMsg, responseFromMemory) {
281 if (memQueue_in.isReady(clockEdge())) {
282 peek(memQueue_in, MemoryMsg) {
283 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
284 trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
285 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
286 trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
287 } else {
288 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
289 error("Invalid message");
290 }
291 }
292 }
293 }
294
295 // Reissue Timer
296 in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
297 Tick current_time := clockEdge();
298 if (reissueTimerTable_in.isReady(current_time)) {
299 Addr addr := reissueTimerTable.nextAddress();
300 trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
301 }
302 }
303
304 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
305 if (responseNetwork_in.isReady(clockEdge())) {
306 peek(responseNetwork_in, ResponseMsg) {
307 assert(in_msg.Destination.isElement(machineID));
308 if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
309 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
310 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
311 trigger(Event:Data_All_Tokens, in_msg.addr,
312 TBEs[in_msg.addr]);
313 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
314 trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
315 TBEs[in_msg.addr]);
316 } else if (in_msg.Type == CoherenceResponseType:ACK) {
317 trigger(Event:Ack_All_Tokens, in_msg.addr,
318 TBEs[in_msg.addr]);
319 } else {
320 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
321 error("Invalid message");
322 }
323 } else {
324 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
325 trigger(Event:Data_Owner, in_msg.addr,
326 TBEs[in_msg.addr]);
327 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
328 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
329 trigger(Event:Tokens, in_msg.addr,
330 TBEs[in_msg.addr]);
331 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
332 trigger(Event:Ack_Owner, in_msg.addr,
333 TBEs[in_msg.addr]);
334 } else {
335 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
336 error("Invalid message");
337 }
338 }
339 }
340 }
341 }
342
343 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
344 if (persistentNetwork_in.isReady(clockEdge())) {
345 peek(persistentNetwork_in, PersistentMsg) {
346 assert(in_msg.Destination.isElement(machineID));
347
348 if (distributed_persistent) {
349 // Apply the lockdown or unlockdown message to the table
350 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
351 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
352 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
353 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
354 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
355 persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
356 } else {
357 error("Invalid message");
358 }
359
360 // React to the message based on the current state of the table
361 if (persistentTable.isLocked(in_msg.addr)) {
362 if (persistentTable.findSmallest(in_msg.addr) == machineID) {
363 if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
364 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
365 TBEs[in_msg.addr]);
366 } else {
367 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
368 TBEs[in_msg.addr]);
369 }
370 } else {
371 // locked
372 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
373 }
374 } else {
375 // unlocked
376 trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
377 }
378 }
379 else {
380 if (persistentTable.findSmallest(in_msg.addr) == machineID) {
381 if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
382 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
383 TBEs[in_msg.addr]);
384 } else {
385 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
386 TBEs[in_msg.addr]);
387 }
388 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
389 // locked
390 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
391 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
392 // locked
393 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
394 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
395 // unlocked
396 trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
397 } else {
398 error("Invalid message");
399 }
400 }
401 }
402 }
403 }
404
405 in_port(requestNetwork_in, RequestMsg, requestToDir) {
406 if (requestNetwork_in.isReady(clockEdge())) {
407 peek(requestNetwork_in, RequestMsg) {
408 assert(in_msg.Destination.isElement(machineID));
409 if (in_msg.Type == CoherenceRequestType:GETS) {
410 trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
411 } else if (in_msg.Type == CoherenceRequestType:GETX) {
412 trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
413 } else {
414 error("Invalid message");
415 }
416 }
417 }
418 }
419
420 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
421 if (dmaRequestQueue_in.isReady(clockEdge())) {
422 peek(dmaRequestQueue_in, DMARequestMsg) {
423 if (in_msg.Type == DMARequestType:READ) {
424 trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
425 } else if (in_msg.Type == DMARequestType:WRITE) {
426 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
427 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
428 TBEs[in_msg.LineAddress]);
429 } else {
430 trigger(Event:DMA_WRITE, in_msg.LineAddress,
431 TBEs[in_msg.LineAddress]);
432 }
433 } else {
434 error("Invalid message");
435 }
436 }
437 }
438 }
439
440 // Actions
441
442 action(a_sendTokens, "a", desc="Send tokens to requestor") {
443 // Only send a message if we have tokens to send
444 if (getDirectoryEntry(address).Tokens > 0) {
445 peek(requestNetwork_in, RequestMsg) {
446 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
447 out_msg.addr := address;
448 out_msg.Type := CoherenceResponseType:ACK;
449 out_msg.Sender := machineID;
450 out_msg.Destination.add(in_msg.Requestor);
451 out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
452 out_msg.MessageSize := MessageSizeType:Response_Control;
453 }
454 }
455 getDirectoryEntry(address).Tokens := 0;
456 }
457 }
458
459 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
460 if (okToIssueStarving(address, machineID) && (starving == false)) {
461 enqueue(persistentNetwork_out, PersistentMsg, 1) {
462 out_msg.addr := address;
463 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
464 out_msg.Requestor := machineID;
465 out_msg.Destination.broadcast(MachineType:L1Cache);
466
467 //
468 // Currently the configuration system limits the system to only one
469 // chip. Therefore, if we assume one shared L2 cache, then only one
470 // pertinent L2 cache exist.
471 //
472 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
473
474 out_msg.Destination.add(mapAddressToRange(address,
475 MachineType:L2Cache, l2_select_low_bit,
476 l2_select_num_bits, intToID(0)));
477
478 out_msg.Destination.add(map_Address_to_Directory(address));
479 out_msg.MessageSize := MessageSizeType:Persistent_Control;
480 out_msg.Prefetch := PrefetchBit:No;
481 out_msg.AccessMode := RubyAccessMode:Supervisor;
482 }
483 markPersistentEntries(address);
484 starving := true;
485
486 tbe.WentPersistent := true;
487
488 // Do not schedule a wakeup, a persistent requests will always complete
489 } else {
490
491 // We'd like to issue a persistent request, but are not allowed
492 // to issue a P.R. right now. This, we do not increment the
493 // IssueCount.
494
495 // Set a wakeup timer
496 reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
497 }
498 }
499
500 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
501 peek(dmaRequestQueue_in, DMARequestMsg) {
502 //
503 // Assser that we only send message if we don't already have all the tokens
504 //
505 assert(getDirectoryEntry(address).Tokens != max_tokens());
506 enqueue(requestNetwork_out, RequestMsg, 1) {
507 out_msg.addr := address;
508 out_msg.Type := CoherenceRequestType:GETX;
509 out_msg.Requestor := machineID;
510
511 //
512 // Since only one chip, assuming all L1 caches are local
513 //
514 out_msg.Destination.broadcast(MachineType:L1Cache);
515 out_msg.Destination.add(mapAddressToRange(address,
516 MachineType:L2Cache, l2_select_low_bit,
517 l2_select_num_bits, intToID(0)));
518
519 out_msg.RetryNum := 0;
520 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
521 out_msg.Prefetch := PrefetchBit:No;
522 out_msg.AccessMode := RubyAccessMode:Supervisor;
523 }
524 }
525 }
526
527 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
528 if (okToIssueStarving(address, machineID) && (starving == false)) {
529 enqueue(persistentNetwork_out, PersistentMsg, 1) {
530 out_msg.addr := address;
531 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
532 out_msg.Requestor := machineID;
533 out_msg.Destination.broadcast(MachineType:L1Cache);
534
535 //
536 // Currently the configuration system limits the system to only one
537 // chip. Therefore, if we assume one shared L2 cache, then only one
538 // pertinent L2 cache exist.
539 //
540 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
541
542 out_msg.Destination.add(mapAddressToRange(address,
543 MachineType:L2Cache, l2_select_low_bit,
544 l2_select_num_bits, intToID(0)));
545
546 out_msg.Destination.add(map_Address_to_Directory(address));
547 out_msg.MessageSize := MessageSizeType:Persistent_Control;
548 out_msg.Prefetch := PrefetchBit:No;
549 out_msg.AccessMode := RubyAccessMode:Supervisor;
550 }
551 markPersistentEntries(address);
552 starving := true;
553
554 tbe.WentPersistent := true;
555
556 // Do not schedule a wakeup, a persistent requests will always complete
557 } else {
558
559 // We'd like to issue a persistent request, but are not allowed
560 // to issue a P.R. right now. This, we do not increment the
561 // IssueCount.
562
563 // Set a wakeup timer
564 reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
565 }
566 }
567
568 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
569 peek(dmaRequestQueue_in, DMARequestMsg) {
570 enqueue(requestNetwork_out, RequestMsg, 1) {
571 out_msg.addr := address;
572 out_msg.Type := CoherenceRequestType:GETS;
573 out_msg.Requestor := machineID;
574
575 //
576 // Since only one chip, assuming all L1 caches are local
577 //
578 out_msg.Destination.broadcast(MachineType:L1Cache);
579 out_msg.Destination.add(mapAddressToRange(address,
580 MachineType:L2Cache, l2_select_low_bit,
581 l2_select_num_bits, intToID(0)));
582
583 out_msg.RetryNum := 0;
584 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
585 out_msg.Prefetch := PrefetchBit:No;
586 out_msg.AccessMode := RubyAccessMode:Supervisor;
587 }
588 }
589 }
590
591 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
592 // Only send a message if we have tokens to send
593 if (getDirectoryEntry(address).Tokens > 0) {
594 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
595 out_msg.addr := address;
596 out_msg.Type := CoherenceResponseType:ACK;
597 out_msg.Sender := machineID;
598 out_msg.Destination.add(persistentTable.findSmallest(address));
599 out_msg.Tokens := getDirectoryEntry(address).Tokens;
600 out_msg.MessageSize := MessageSizeType:Response_Control;
601 }
602 getDirectoryEntry(address).Tokens := 0;
603 }
604 }
605
606 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
607 peek(memQueue_in, MemoryMsg) {
608 enqueue(responseNetwork_out, ResponseMsg, 1) {
609 out_msg.addr := address;
610 out_msg.Type := CoherenceResponseType:DATA_OWNER;
611 out_msg.Sender := machineID;
612 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
613 assert(getDirectoryEntry(address).Tokens > 0);
614 out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
615 out_msg.DataBlk := in_msg.DataBlk;
616 out_msg.Dirty := false;
617 out_msg.MessageSize := MessageSizeType:Response_Data;
618 }
619 }
620 getDirectoryEntry(address).Tokens := 0;
621 }
622
623 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
624 peek(memQueue_in, MemoryMsg) {
625 enqueue(responseNetwork_out, ResponseMsg, 1) {
626 out_msg.addr := address;
627 out_msg.Type := CoherenceResponseType:DATA_OWNER;
628 out_msg.Sender := machineID;
629 out_msg.Destination.add(persistentTable.findSmallest(address));
630 assert(getDirectoryEntry(address).Tokens > 0);
631 out_msg.Tokens := getDirectoryEntry(address).Tokens;
632 out_msg.DataBlk := in_msg.DataBlk;
633 out_msg.Dirty := false;
634 out_msg.MessageSize := MessageSizeType:Response_Data;
635 }
636 }
637 getDirectoryEntry(address).Tokens := 0;
638 }
639
640 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
641 enqueue(responseNetwork_out, ResponseMsg, 1) {
642 out_msg.addr := address;
643 out_msg.Type := CoherenceResponseType:DATA_OWNER;
644 out_msg.Sender := machineID;
645 out_msg.Destination.add(persistentTable.findSmallest(address));
646 assert(getDirectoryEntry(address).Tokens > 0);
647 out_msg.Tokens := getDirectoryEntry(address).Tokens;
648 out_msg.DataBlk := tbe.DataBlk;
649 out_msg.Dirty := false;
650 out_msg.MessageSize := MessageSizeType:Response_Data;
651 }
652 getDirectoryEntry(address).Tokens := 0;
653 }
654
655 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
656 peek(requestNetwork_in, RequestMsg) {
657 queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
658 }
659 }
660
661 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
662 queueMemoryRead(persistentTable.findSmallest(address), address,
663 to_memory_controller_latency);
664 }
665
666 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
667 peek(dmaRequestQueue_in, DMARequestMsg) {
668 queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
669 }
670 }
671
672 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
673 peek(responseNetwork_in, ResponseMsg) {
674 queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
675 in_msg.DataBlk);
676 }
677 }
678
679 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
680 queueMemoryWritePartial(tbe.DmaRequestor, address,
681 to_memory_controller_latency, tbe.DataBlk,
682 tbe.Len);
683 }
684
685 action(lr_queueMemoryDmaReadWriteback, "lr",
686 desc="Write DMA data from read to memory") {
687 peek(responseNetwork_in, ResponseMsg) {
688 queueMemoryWrite(machineID, address, to_memory_controller_latency,
689 in_msg.DataBlk);
690 }
691 }
692
693 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
694 peek(dmaRequestQueue_in, DMARequestMsg) {
695 TBEs.allocate(address);
696 set_tbe(TBEs[address]);
697 tbe.DataBlk := in_msg.DataBlk;
698 tbe.PhysicalAddress := in_msg.PhysicalAddress;
699 tbe.Len := in_msg.Len;
700 tbe.DmaRequestor := in_msg.Requestor;
701 tbe.WentPersistent := false;
702 }
703 }
704
705 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
706
707 if (tbe.WentPersistent) {
708 assert(starving);
709
710 enqueue(persistentNetwork_out, PersistentMsg, 1) {
711 out_msg.addr := address;
712 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
713 out_msg.Requestor := machineID;
714 out_msg.Destination.broadcast(MachineType:L1Cache);
715
716 //
717 // Currently the configuration system limits the system to only one
718 // chip. Therefore, if we assume one shared L2 cache, then only one
719 // pertinent L2 cache exist.
720 //
721 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
722
723 out_msg.Destination.add(mapAddressToRange(address,
724 MachineType:L2Cache, l2_select_low_bit,
725 l2_select_num_bits, intToID(0)));
726
727 out_msg.Destination.add(map_Address_to_Directory(address));
728 out_msg.MessageSize := MessageSizeType:Persistent_Control;
729 }
730 starving := false;
731 }
732
733 TBEs.deallocate(address);
734 unset_tbe();
735 }
736
737 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
738 peek(responseNetwork_in, ResponseMsg) {
739 DataBlock DataBlk := tbe.DataBlk;
740 tbe.DataBlk := in_msg.DataBlk;
741 tbe.DataBlk.copyPartial(DataBlk, getOffset(tbe.PhysicalAddress),
742 tbe.Len);
743 }
744 }
745
746 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
747 peek(responseNetwork_in, ResponseMsg) {
748 assert(in_msg.Tokens >= 1);
749 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
750 }
751 }
752
753 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
754 assert(getDirectoryEntry(address).Tokens == max_tokens());
755 }
756
757 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
758 requestNetwork_in.dequeue(clockEdge());
759 }
760
761 action(z_recycleRequest, "z", desc="Recycle the request queue") {
762 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
763 }
764
765 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
766 responseNetwork_in.dequeue(clockEdge());
767 }
768
769 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
770 responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
771 }
772
773 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
774 persistentNetwork_in.dequeue(clockEdge());
775 }
776
777 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
778 dmaRequestQueue_in.dequeue(clockEdge());
779 }
780
781 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
782 dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
783 }
784
785 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
786 memQueue_in.dequeue(clockEdge());
787 }
788
789 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
790 peek(responseNetwork_in, ResponseMsg) {
791 enqueue(responseNetwork_out, ResponseMsg, 1) {
792 out_msg.addr := address;
793 out_msg.Type := in_msg.Type;
794 out_msg.Sender := machineID;
795 out_msg.Destination.add(persistentTable.findSmallest(address));
796 out_msg.Tokens := in_msg.Tokens;
797 out_msg.MessageSize := in_msg.MessageSize;
798 out_msg.DataBlk := in_msg.DataBlk;
799 out_msg.Dirty := in_msg.Dirty;
800 }
801 }
802 }
803
804 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
805 //
806 // currently only support a fixed timeout latency
807 //
808 if (reissueTimerTable.isSet(address)) {
809 reissueTimerTable.unset(address);
810 reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
811 }
812 }
813
814 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
815 //
816 // currently only support a fixed timeout latency
817 //
818 reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
819 }
820
821 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
822 if (reissueTimerTable.isSet(address)) {
823 reissueTimerTable.unset(address);
824 }
825 }
826
827 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
828 peek(responseNetwork_in, ResponseMsg) {
829 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
830 assert(in_msg.Dirty == false);
831 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
832
833 // Bounce the message, but "re-associate" the data and the owner
834 // token. In essence we're converting an ACK_OWNER message to a
835 // DATA_OWNER message, keeping the number of tokens the same.
836 enqueue(responseNetwork_out, ResponseMsg, 1) {
837 out_msg.addr := address;
838 out_msg.Type := CoherenceResponseType:DATA_OWNER;
839 out_msg.Sender := machineID;
840 out_msg.Destination.add(persistentTable.findSmallest(address));
841 out_msg.Tokens := in_msg.Tokens;
842 out_msg.Dirty := in_msg.Dirty;
843 out_msg.MessageSize := MessageSizeType:Response_Data;
844 }
845 }
846 }
847
848 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
849 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
850 out_msg.PhysicalAddress := address;
851 out_msg.LineAddress := address;
852 out_msg.Type := DMAResponseType:ACK;
853 out_msg.Destination.add(tbe.DmaRequestor);
854 out_msg.MessageSize := MessageSizeType:Writeback_Control;
855 }
856 }
857
858 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
859 peek(memQueue_in, MemoryMsg) {
860 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
861 out_msg.PhysicalAddress := address;
862 out_msg.LineAddress := address;
863 out_msg.Type := DMAResponseType:DATA;
864 //
865 // we send the entire data block and rely on the dma controller to
866 // split it up if need be
867 //
868 out_msg.DataBlk := in_msg.DataBlk;
869 out_msg.Destination.add(tbe.DmaRequestor);
870 out_msg.MessageSize := MessageSizeType:Response_Data;
871 }
872 }
873 }
874
875 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
876 peek(responseNetwork_in, ResponseMsg) {
877 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
878 out_msg.PhysicalAddress := address;
879 out_msg.LineAddress := address;
880 out_msg.Type := DMAResponseType:DATA;
881 //
882 // we send the entire data block and rely on the dma controller to
883 // split it up if need be
884 //
885 out_msg.DataBlk := in_msg.DataBlk;
886 out_msg.Destination.add(tbe.DmaRequestor);
887 out_msg.MessageSize := MessageSizeType:Response_Data;
888 }
889 }
890 }
891
892 // TRANSITIONS
893
894 //
895 // Trans. from base state O
896 // the directory has valid data
897 //
898 transition(O, GETX, NO_W) {
899 qf_queueMemoryFetchRequest;
900 j_popIncomingRequestQueue;
901 }
902
903 transition(O, DMA_WRITE, O_DW) {
904 vd_allocateDmaRequestInTBE;
905 bw_broadcastWrite;
906 st_scheduleTimeout;
907 p_popDmaRequestQueue;
908 }
909
910 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
911 vd_allocateDmaRequestInTBE;
912 ld_queueMemoryDmaWriteFromTbe;
913 p_popDmaRequestQueue;
914 }
915
916 transition(O, GETS, NO_W) {
917 qf_queueMemoryFetchRequest;
918 j_popIncomingRequestQueue;
919 }
920
921 transition(O, DMA_READ, O_DR_W) {
922 vd_allocateDmaRequestInTBE;
923 fd_memoryDma;
924 st_scheduleTimeout;
925 p_popDmaRequestQueue;
926 }
927
928 transition(O, Lockdown, L_O_W) {
929 qp_queueMemoryForPersistent;
930 l_popIncomingPersistentQueue;
931 }
932
933 transition(O, {Tokens, Ack_All_Tokens}) {
934 f_incrementTokens;
935 k_popIncomingResponseQueue;
936 }
937
938 transition(O, {Data_Owner, Data_All_Tokens}) {
939 f_incrementTokens;
940 k_popIncomingResponseQueue;
941 }
942
943 transition({O, NO}, Unlockdown) {
944 l_popIncomingPersistentQueue;
945 }
946
947 //
948 // transitioning to Owner, waiting for memory before DMA ack
949 // All other events should recycle/stall
950 //
951 transition(O_DR_W, Memory_Data, O) {
952 dm_sendMemoryDataToDma;
953 ut_unsetReissueTimer;
954 s_deallocateTBE;
955 l_popMemQueue;
956 }
957
958 //
959 // issued GETX for DMA write, waiting for all tokens
960 //
961 transition(O_DW, Request_Timeout) {
962 ut_unsetReissueTimer;
963 px_tryIssuingPersistentGETXRequest;
964 }
965
966 transition(O_DW, Tokens) {
967 f_incrementTokens;
968 k_popIncomingResponseQueue;
969 }
970
971 transition(O_DW, Data_Owner) {
972 f_incrementTokens;
973 rd_recordDataInTbe;
974 k_popIncomingResponseQueue;
975 }
976
977 transition(O_DW, Ack_Owner) {
978 f_incrementTokens;
979 k_popIncomingResponseQueue;
980 }
981
982 transition(O_DW, Lockdown, DW_L) {
983 de_sendTbeDataToStarver;
984 l_popIncomingPersistentQueue;
985 }
986
987 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
988 f_incrementTokens;
989 rd_recordDataInTbe;
990 ld_queueMemoryDmaWriteFromTbe;
991 ut_unsetReissueTimer;
992 k_popIncomingResponseQueue;
993 }
994
995 transition(O_DW, Ack_All_Tokens, O_DW_W) {
996 f_incrementTokens;
997 ld_queueMemoryDmaWriteFromTbe;
998 ut_unsetReissueTimer;
999 k_popIncomingResponseQueue;
1000 }
1001
1002 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
1003 f_incrementTokens;
1004 ld_queueMemoryDmaWriteFromTbe;
1005 ut_unsetReissueTimer;
1006 k_popIncomingResponseQueue;
1007 }
1008
1009 transition(O_DW_W, Memory_Ack, O) {
1010 da_sendDmaAck;
1011 s_deallocateTBE;
1012 l_popMemQueue;
1013 }
1014
1015 //
1016 // Trans. from NO
1017 // The direcotry does not have valid data, but may have some tokens
1018 //
1019 transition(NO, GETX) {
1020 a_sendTokens;
1021 j_popIncomingRequestQueue;
1022 }
1023
1024 transition(NO, DMA_WRITE, NO_DW) {
1025 vd_allocateDmaRequestInTBE;
1026 bw_broadcastWrite;
1027 st_scheduleTimeout;
1028 p_popDmaRequestQueue;
1029 }
1030
1031 transition(NO, GETS) {
1032 j_popIncomingRequestQueue;
1033 }
1034
1035 transition(NO, DMA_READ, NO_DR) {
1036 vd_allocateDmaRequestInTBE;
1037 br_broadcastRead;
1038 st_scheduleTimeout;
1039 p_popDmaRequestQueue;
1040 }
1041
1042 transition(NO, Lockdown, L) {
1043 aa_sendTokensToStarver;
1044 l_popIncomingPersistentQueue;
1045 }
1046
1047 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1048 f_incrementTokens;
1049 lq_queueMemoryWbRequest;
1050 k_popIncomingResponseQueue;
1051 }
1052
1053 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1054 f_incrementTokens;
1055 k_popIncomingResponseQueue;
1056 }
1057
1058 transition(NO, Tokens) {
1059 f_incrementTokens;
1060 k_popIncomingResponseQueue;
1061 }
1062
1063 transition(NO_W, Memory_Data, NO) {
1064 d_sendMemoryDataWithAllTokens;
1065 l_popMemQueue;
1066 }
1067
1068 // Trans. from NO_DW
1069 transition(NO_DW, Request_Timeout) {
1070 ut_unsetReissueTimer;
1071 px_tryIssuingPersistentGETXRequest;
1072 }
1073
1074 transition(NO_DW, Lockdown, DW_L) {
1075 aa_sendTokensToStarver;
1076 l_popIncomingPersistentQueue;
1077 }
1078
1079 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1080 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1081 // directory does not have valid data
1082
1083 transition(NO_DW, Data_Owner, O_DW) {
1084 f_incrementTokens;
1085 rd_recordDataInTbe;
1086 k_popIncomingResponseQueue;
1087 }
1088
1089 transition({NO_DW, NO_DR}, Tokens) {
1090 f_incrementTokens;
1091 k_popIncomingResponseQueue;
1092 }
1093
1094 // Trans. from NO_DR
1095 transition(NO_DR, Request_Timeout) {
1096 ut_unsetReissueTimer;
1097 ps_tryIssuingPersistentGETSRequest;
1098 }
1099
1100 transition(NO_DR, Lockdown, DR_L) {
1101 aa_sendTokensToStarver;
1102 l_popIncomingPersistentQueue;
1103 }
1104
1105 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1106 f_incrementTokens;
1107 dd_sendDmaData;
1108 lr_queueMemoryDmaReadWriteback;
1109 ut_unsetReissueTimer;
1110 s_deallocateTBE;
1111 k_popIncomingResponseQueue;
1112 }
1113
1114 // Trans. from L
1115 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1116 j_popIncomingRequestQueue;
1117 }
1118
1119 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1120 l_popIncomingPersistentQueue;
1121 }
1122
1123 //
1124 // Received data for lockdown blocks
1125 // For blocks with outstanding dma requests to them
1126 // ...we could change this to write the data to memory and send it cleanly
1127 // ...we could also proactively complete our DMA requests
1128 // However, to keep my mind from spinning out-of-control, we won't for now :)
1129 //
1130 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1131 r_bounceResponse;
1132 k_popIncomingResponseQueue;
1133 }
1134
1135 transition({DW_L, DR_L, L}, Tokens) {
1136 r_bounceResponse;
1137 k_popIncomingResponseQueue;
1138 }
1139
1140 transition({DW_L, DR_L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1141 bd_bounceDatalessOwnerToken;
1142 k_popIncomingResponseQueue;
1143 }
1144
1145 transition(L, {Ack_Owner_All_Tokens, Ack_Owner}, L_O_W) {
1146 f_incrementTokens;
1147 qp_queueMemoryForPersistent;
1148 k_popIncomingResponseQueue;
1149 }
1150
1151 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1152 l_popIncomingPersistentQueue;
1153 }
1154
1155 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1156 l_popIncomingPersistentQueue;
1157 }
1158
1159 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1160 dd_sendMemDataToStarver;
1161 l_popMemQueue;
1162 }
1163
1164 transition(L_O_W, Memory_Ack) {
1165 qp_queueMemoryForPersistent;
1166 l_popMemQueue;
1167 }
1168
1169 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1170 l_popIncomingPersistentQueue;
1171 }
1172
1173 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1174 l_popIncomingPersistentQueue;
1175 }
1176
1177 transition(DR_L_W, Memory_Data, DR_L) {
1178 dd_sendMemDataToStarver;
1179 l_popMemQueue;
1180 }
1181
1182 transition(DW_L_W, Memory_Ack, L) {
1183 aat_assertAllTokens;
1184 da_sendDmaAck;
1185 s_deallocateTBE;
1186 dd_sendMemDataToStarver;
1187 l_popMemQueue;
1188 }
1189
1190 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1191 l_popIncomingPersistentQueue;
1192 }
1193
1194 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1195 l_popIncomingPersistentQueue;
1196 }
1197
1198 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1199 l_popIncomingPersistentQueue;
1200 }
1201
1202 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1203 ut_unsetReissueTimer;
1204 px_tryIssuingPersistentGETXRequest;
1205 }
1206
1207 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1208 l_popIncomingPersistentQueue;
1209 }
1210
1211 transition(DR_L, Request_Timeout) {
1212 ut_unsetReissueTimer;
1213 ps_tryIssuingPersistentGETSRequest;
1214 }
1215
1216 //
1217 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1218 // presistent request is issued and resolve before memory returns with data
1219 //
1220 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1221 l_popMemQueue;
1222 }
1223
1224 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1225 l_popIncomingPersistentQueue;
1226 }
1227
1228 // Blocked states
1229 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1230 z_recycleRequest;
1231 }
1232
1233 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1234 y_recycleDmaRequestQueue;
1235 }
1236
1237 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1238 kz_recycleResponse;
1239 }
1240
1241 //
1242 // If we receive a request timeout while waiting for memory, it is likely that
1243 // the request will be satisfied and issuing a presistent request will do us
1244 // no good. Just wait.
1245 //
1246 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1247 rs_resetScheduleTimeout;
1248 }
1249
1250 transition(NO_W, Lockdown, L_NO_W) {
1251 l_popIncomingPersistentQueue;
1252 }
1253
1254 transition(O_W, Lockdown, L_O_W) {
1255 l_popIncomingPersistentQueue;
1256 }
1257
1258 transition(O_DR_W, Lockdown, DR_L_W) {
1259 l_popIncomingPersistentQueue;
1260 }
1261
1262 transition(O_DW_W, Lockdown, DW_L_W) {
1263 l_popIncomingPersistentQueue;
1264 }
1265
1266 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1267 l_popIncomingPersistentQueue;
1268 }
1269 }