This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34
35 machine(Directory, "Token protocol")
36 : DirectoryMemory * directory,
37 MemoryControl * memBuffer,
38 int l2_select_num_bits,
39 int directory_latency = 5,
40 bool distributed_persistent = true,
41 int fixed_timeout_latency = 100
42 {
43
44 MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true";
45 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
46 MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true";
47 MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false";
48
49 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
50 MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true";
51 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
52 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
53
54 // STATES
55 enumeration(State, desc="Directory states", default="Directory_State_O") {
56 // Base states
57 O, desc="Owner";
58 NO, desc="Not Owner";
59 L, desc="Locked";
60
61 // Memory wait states - can block all messages including persistent requests
62 O_W, desc="transitioning to Owner, waiting for memory write";
63 L_O_W, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
64 L_NO_W, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
65 DR_L_W, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
66 DW_L_W, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
67 NO_W, desc="transitioning to Not Owner, waiting for memory read";
68 O_DW_W, desc="transitioning to Owner, waiting for memory before DMA ack";
69 O_DR_W, desc="transitioning to Owner, waiting for memory before DMA data";
70
71 // DMA request transient states - must respond to persistent requests
72 O_DW, desc="issued GETX for DMA write, waiting for all tokens";
73 NO_DW, desc="issued GETX for DMA write, waiting for all tokens";
74 NO_DR, desc="issued GETS for DMA read, waiting for data";
75
76 // DMA request in progress - competing with a CPU persistent request
77 DW_L, desc="issued GETX for DMA write, CPU persistent request must complete first";
78 DR_L, desc="issued GETS for DMA read, CPU persistent request must complete first";
79
80 }
81
82 // Events
83 enumeration(Event, desc="Directory events") {
84 GETX, desc="A GETX arrives";
85 GETS, desc="A GETS arrives";
86 Lockdown, desc="A lockdown request arrives";
87 Unlockdown, desc="An un-lockdown request arrives";
88 Own_Lock_or_Unlock, desc="own lock or unlock";
89 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
90 Data_Owner, desc="Data arrive";
91 Data_All_Tokens, desc="Data and all tokens";
92 Ack_Owner, desc="Owner token arrived without data because it was clean";
93 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
94 Tokens, desc="Tokens arrive";
95 Ack_All_Tokens, desc="All_Tokens arrive";
96 Request_Timeout, desc="A DMA request has timed out";
97
98 // Memory Controller
99 Memory_Data, desc="Fetched data from memory arrives";
100 Memory_Ack, desc="Writeback Ack from memory arrives";
101
102 // DMA requests
103 DMA_READ, desc="A DMA Read memory request";
104 DMA_WRITE, desc="A DMA Write memory request";
105 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
106 }
107
108 // TYPES
109
110 // DirectoryEntry
111 structure(Entry, desc="...", interface="AbstractEntry") {
112 State DirectoryState, desc="Directory state";
113 DataBlock DataBlk, desc="data for the block";
114 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
115
116 // The following state is provided to allow for bandwidth
117 // efficient directory-like operation. However all of this state
118 // is 'soft state' that does not need to be correct (as long as
119 // you're eventually willing to resort to broadcast.)
120
121 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
122 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
123 }
124
125 external_type(PersistentTable) {
126 void persistentRequestLock(Address, MachineID, AccessType);
127 void persistentRequestUnlock(Address, MachineID);
128 bool okToIssueStarving(Address, MachineID);
129 MachineID findSmallest(Address);
130 AccessType typeOfSmallest(Address);
131 void markEntries(Address);
132 bool isLocked(Address);
133 int countStarvingForAddress(Address);
134 int countReadStarvingForAddress(Address);
135 }
136
137 // TBE entries for DMA requests
138 structure(TBE, desc="TBE entries for outstanding DMA requests") {
139 Address PhysicalAddress, desc="physical address";
140 State TBEState, desc="Transient State";
141 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
142 DataBlock DataBlk, desc="The current view of system memory";
143 int Len, desc="...";
144 MachineID DmaRequestor, desc="DMA requestor";
145 bool WentPersistent, desc="Did the DMA request require a persistent request";
146 }
147
148 external_type(TBETable) {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 // ** OBJECTS **
156
157 PersistentTable persistentTable;
158 TimerTable reissueTimerTable;
159
160 TBETable TBEs, template_hack="<Directory_TBE>";
161
162 bool starving, default="false";
163 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
164
165 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
166 return static_cast(Entry, directory[addr]);
167 }
168
169 State getState(Address addr) {
170 if (TBEs.isPresent(addr)) {
171 return TBEs[addr].TBEState;
172 } else {
173 return getDirectoryEntry(addr).DirectoryState;
174 }
175 }
176
177 void setState(Address addr, State state) {
178 if (TBEs.isPresent(addr)) {
179 TBEs[addr].TBEState := state;
180 }
181 getDirectoryEntry(addr).DirectoryState := state;
182
183 if (state == State:L || state == State:DW_L || state == State:DR_L) {
184 assert(getDirectoryEntry(addr).Tokens == 0);
185 }
186
187 // We have one or zero owners
188 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
189
190 // Make sure the token count is in range
191 assert(getDirectoryEntry(addr).Tokens >= 0);
192 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
193
194 if (state == State:O || state == State:O_W || state == State:O_DW) {
195 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
196 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
197 }
198 }
199
200 bool okToIssueStarving(Address addr, MachineID machinID) {
201 return persistentTable.okToIssueStarving(addr, machineID);
202 }
203
204 void markPersistentEntries(Address addr) {
205 persistentTable.markEntries(addr);
206 }
207
208 // ** OUT_PORTS **
209 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
210 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
211 out_port(requestNetwork_out, RequestMsg, requestFromDir);
212 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
213
214 //
215 // Memory buffer for memory controller to DIMM communication
216 //
217 out_port(memQueue_out, MemoryMsg, memBuffer);
218
219 // ** IN_PORTS **
220
221 // off-chip memory request/response is done
222 in_port(memQueue_in, MemoryMsg, memBuffer) {
223 if (memQueue_in.isReady()) {
224 peek(memQueue_in, MemoryMsg) {
225 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
226 trigger(Event:Memory_Data, in_msg.Address);
227 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
228 trigger(Event:Memory_Ack, in_msg.Address);
229 } else {
230 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
231 error("Invalid message");
232 }
233 }
234 }
235 }
236
237 // Reissue Timer
238 in_port(reissueTimerTable_in, Address, reissueTimerTable) {
239 if (reissueTimerTable_in.isReady()) {
240 trigger(Event:Request_Timeout, reissueTimerTable.readyAddress());
241 }
242 }
243
244 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
245 if (responseNetwork_in.isReady()) {
246 peek(responseNetwork_in, ResponseMsg) {
247 assert(in_msg.Destination.isElement(machineID));
248 if (getDirectoryEntry(in_msg.Address).Tokens + in_msg.Tokens == max_tokens()) {
249 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
250 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
251 trigger(Event:Data_All_Tokens, in_msg.Address);
252 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
253 trigger(Event:Ack_Owner_All_Tokens, in_msg.Address);
254 } else if (in_msg.Type == CoherenceResponseType:ACK) {
255 trigger(Event:Ack_All_Tokens, in_msg.Address);
256 } else {
257 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
258 error("Invalid message");
259 }
260 } else {
261 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
262 trigger(Event:Data_Owner, in_msg.Address);
263 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
264 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
265 trigger(Event:Tokens, in_msg.Address);
266 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
267 trigger(Event:Ack_Owner, in_msg.Address);
268 } else {
269 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
270 error("Invalid message");
271 }
272 }
273 }
274 }
275 }
276
277 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
278 if (persistentNetwork_in.isReady()) {
279 peek(persistentNetwork_in, PersistentMsg) {
280 assert(in_msg.Destination.isElement(machineID));
281
282 if (distributed_persistent) {
283 // Apply the lockdown or unlockdown message to the table
284 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
285 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
286 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
287 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
288 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
289 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
290 } else {
291 error("Invalid message");
292 }
293
294 // React to the message based on the current state of the table
295 if (persistentTable.isLocked(in_msg.Address)) {
296 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
297 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
298 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address);
299 } else {
300 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
301 }
302 } else {
303 trigger(Event:Lockdown, in_msg.Address); // locked
304 }
305 } else {
306 trigger(Event:Unlockdown, in_msg.Address); // unlocked
307 }
308 }
309 else {
310 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
311 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
312 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address);
313 } else {
314 trigger(Event:Own_Lock_or_Unlock, in_msg.Address);
315 }
316 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
317 trigger(Event:Lockdown, in_msg.Address); // locked
318 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
319 trigger(Event:Lockdown, in_msg.Address); // locked
320 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
321 trigger(Event:Unlockdown, in_msg.Address); // unlocked
322 } else {
323 error("Invalid message");
324 }
325 }
326 }
327 }
328 }
329
330 in_port(requestNetwork_in, RequestMsg, requestToDir) {
331 if (requestNetwork_in.isReady()) {
332 peek(requestNetwork_in, RequestMsg) {
333 assert(in_msg.Destination.isElement(machineID));
334 if (in_msg.Type == CoherenceRequestType:GETS) {
335 trigger(Event:GETS, in_msg.Address);
336 } else if (in_msg.Type == CoherenceRequestType:GETX) {
337 trigger(Event:GETX, in_msg.Address);
338 } else {
339 error("Invalid message");
340 }
341 }
342 }
343 }
344
345 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
346 if (dmaRequestQueue_in.isReady()) {
347 peek(dmaRequestQueue_in, DMARequestMsg) {
348 if (in_msg.Type == DMARequestType:READ) {
349 trigger(Event:DMA_READ, in_msg.LineAddress);
350 } else if (in_msg.Type == DMARequestType:WRITE) {
351 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
352 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress);
353 } else {
354 trigger(Event:DMA_WRITE, in_msg.LineAddress);
355 }
356 } else {
357 error("Invalid message");
358 }
359 }
360 }
361 }
362
363 // Actions
364
365 action(a_sendTokens, "a", desc="Send tokens to requestor") {
366 // Only send a message if we have tokens to send
367 if (getDirectoryEntry(address).Tokens > 0) {
368 peek(requestNetwork_in, RequestMsg) {
369 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
370 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
371 out_msg.Address := address;
372 out_msg.Type := CoherenceResponseType:ACK;
373 out_msg.Sender := machineID;
374 out_msg.Destination.add(in_msg.Requestor);
375 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
376 out_msg.MessageSize := MessageSizeType:Response_Control;
377 }
378 }
379 getDirectoryEntry(address).Tokens := 0;
380 }
381 }
382
383 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
384 if (okToIssueStarving(address, machineID) && (starving == false)) {
385 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
386 out_msg.Address := address;
387 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
388 out_msg.Requestor := machineID;
389 out_msg.Destination.broadcast(MachineType:L1Cache);
390
391 //
392 // Currently the configuration system limits the system to only one
393 // chip. Therefore, if we assume one shared L2 cache, then only one
394 // pertinent L2 cache exist.
395 //
396 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
397
398 out_msg.Destination.add(mapAddressToRange(address,
399 MachineType:L2Cache,
400 l2_select_low_bit,
401 l2_select_num_bits));
402
403 out_msg.Destination.add(map_Address_to_Directory(address));
404 out_msg.MessageSize := MessageSizeType:Persistent_Control;
405 out_msg.Prefetch := PrefetchBit:No;
406 out_msg.AccessMode := AccessModeType:SupervisorMode;
407 }
408 markPersistentEntries(address);
409 starving := true;
410
411 TBEs[address].WentPersistent := true;
412
413 // Do not schedule a wakeup, a persistent requests will always complete
414 } else {
415
416 // We'd like to issue a persistent request, but are not allowed
417 // to issue a P.R. right now. This, we do not increment the
418 // IssueCount.
419
420 // Set a wakeup timer
421 reissueTimerTable.set(address, 10);
422 }
423 }
424
425 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
426 peek(dmaRequestQueue_in, DMARequestMsg) {
427 //
428 // Assser that we only send message if we don't already have all the tokens
429 //
430 assert(getDirectoryEntry(address).Tokens != max_tokens());
431 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
432 out_msg.Address := address;
433 out_msg.Type := CoherenceRequestType:GETX;
434 out_msg.Requestor := machineID;
435
436 //
437 // Since only one chip, assuming all L1 caches are local
438 //
439 out_msg.Destination.broadcast(MachineType:L1Cache);
440 out_msg.Destination.add(mapAddressToRange(address,
441 MachineType:L2Cache,
442 l2_select_low_bit,
443 l2_select_num_bits));
444
445 out_msg.RetryNum := 0;
446 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
447 out_msg.Prefetch := PrefetchBit:No;
448 out_msg.AccessMode := AccessModeType:SupervisorMode;
449 }
450 }
451 }
452
453 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
454 if (okToIssueStarving(address, machineID) && (starving == false)) {
455 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
456 out_msg.Address := address;
457 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
458 out_msg.Requestor := machineID;
459 out_msg.Destination.broadcast(MachineType:L1Cache);
460
461 //
462 // Currently the configuration system limits the system to only one
463 // chip. Therefore, if we assume one shared L2 cache, then only one
464 // pertinent L2 cache exist.
465 //
466 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
467
468 out_msg.Destination.add(mapAddressToRange(address,
469 MachineType:L2Cache,
470 l2_select_low_bit,
471 l2_select_num_bits));
472
473 out_msg.Destination.add(map_Address_to_Directory(address));
474 out_msg.MessageSize := MessageSizeType:Persistent_Control;
475 out_msg.Prefetch := PrefetchBit:No;
476 out_msg.AccessMode := AccessModeType:SupervisorMode;
477 }
478 markPersistentEntries(address);
479 starving := true;
480
481 TBEs[address].WentPersistent := true;
482
483 // Do not schedule a wakeup, a persistent requests will always complete
484 } else {
485
486 // We'd like to issue a persistent request, but are not allowed
487 // to issue a P.R. right now. This, we do not increment the
488 // IssueCount.
489
490 // Set a wakeup timer
491 reissueTimerTable.set(address, 10);
492 }
493 }
494
495 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
496 peek(dmaRequestQueue_in, DMARequestMsg) {
497 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
498 out_msg.Address := address;
499 out_msg.Type := CoherenceRequestType:GETS;
500 out_msg.Requestor := machineID;
501
502 //
503 // Since only one chip, assuming all L1 caches are local
504 //
505 out_msg.Destination.broadcast(MachineType:L1Cache);
506 out_msg.Destination.add(mapAddressToRange(address,
507 MachineType:L2Cache,
508 l2_select_low_bit,
509 l2_select_num_bits));
510
511 out_msg.RetryNum := 0;
512 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
513 out_msg.Prefetch := PrefetchBit:No;
514 out_msg.AccessMode := AccessModeType:SupervisorMode;
515 }
516 }
517 }
518
519 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
520 // Only send a message if we have tokens to send
521 if (getDirectoryEntry(address).Tokens > 0) {
522 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
523 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
524 out_msg.Address := address;
525 out_msg.Type := CoherenceResponseType:ACK;
526 out_msg.Sender := machineID;
527 out_msg.Destination.add(persistentTable.findSmallest(address));
528 out_msg.Tokens := getDirectoryEntry(address).Tokens;
529 out_msg.MessageSize := MessageSizeType:Response_Control;
530 }
531 getDirectoryEntry(address).Tokens := 0;
532 }
533 }
534
535 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
536 peek(memQueue_in, MemoryMsg) {
537 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
538 out_msg.Address := address;
539 out_msg.Type := CoherenceResponseType:DATA_OWNER;
540 out_msg.Sender := machineID;
541 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
542 assert(getDirectoryEntry(address).Tokens > 0);
543 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
544 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
545 out_msg.Dirty := false;
546 out_msg.MessageSize := MessageSizeType:Response_Data;
547 }
548 }
549 getDirectoryEntry(address).Tokens := 0;
550 }
551
552 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
553 peek(memQueue_in, MemoryMsg) {
554 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
555 out_msg.Address := address;
556 out_msg.Type := CoherenceResponseType:DATA_OWNER;
557 out_msg.Sender := machineID;
558 out_msg.Destination.add(persistentTable.findSmallest(address));
559 assert(getDirectoryEntry(address).Tokens > 0);
560 out_msg.Tokens := getDirectoryEntry(address).Tokens;
561 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
562 out_msg.Dirty := false;
563 out_msg.MessageSize := MessageSizeType:Response_Data;
564 }
565 }
566 getDirectoryEntry(address).Tokens := 0;
567 }
568
569 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
570 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
571 out_msg.Address := address;
572 out_msg.Type := CoherenceResponseType:DATA_OWNER;
573 out_msg.Sender := machineID;
574 out_msg.Destination.add(persistentTable.findSmallest(address));
575 assert(getDirectoryEntry(address).Tokens > 0);
576 out_msg.Tokens := getDirectoryEntry(address).Tokens;
577 out_msg.DataBlk := TBEs[address].DataBlk;
578 out_msg.Dirty := false;
579 out_msg.MessageSize := MessageSizeType:Response_Data;
580 }
581 getDirectoryEntry(address).Tokens := 0;
582 }
583
584 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
585 peek(requestNetwork_in, RequestMsg) {
586 enqueue(memQueue_out, MemoryMsg, latency="1") {
587 out_msg.Address := address;
588 out_msg.Type := MemoryRequestType:MEMORY_READ;
589 out_msg.Sender := machineID;
590 out_msg.OriginalRequestorMachId := in_msg.Requestor;
591 out_msg.MessageSize := in_msg.MessageSize;
592 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
593 DPRINTF(RubySlicc, "%s\n", out_msg);
594 }
595 }
596 }
597
598 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
599 enqueue(memQueue_out, MemoryMsg, latency="1") {
600 out_msg.Address := address;
601 out_msg.Type := MemoryRequestType:MEMORY_READ;
602 out_msg.Sender := machineID;
603 out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
604 out_msg.MessageSize := MessageSizeType:Request_Control;
605 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
606 DPRINTF(RubySlicc, "%s\n", out_msg);
607 }
608 }
609
610 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
611 peek(dmaRequestQueue_in, DMARequestMsg) {
612 enqueue(memQueue_out, MemoryMsg, latency="1") {
613 out_msg.Address := address;
614 out_msg.Type := MemoryRequestType:MEMORY_READ;
615 out_msg.Sender := machineID;
616 out_msg.OriginalRequestorMachId := in_msg.Requestor;
617 out_msg.MessageSize := in_msg.MessageSize;
618 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
619 DPRINTF(RubySlicc, "%s\n", out_msg);
620 }
621 }
622 }
623
624 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
625 enqueue(memQueue_out, MemoryMsg, latency="1") {
626 out_msg.Address := address;
627 out_msg.Type := MemoryRequestType:MEMORY_WB;
628 DPRINTF(RubySlicc, "%s\n", out_msg);
629 }
630 }
631
632 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
633 enqueue(memQueue_out, MemoryMsg, latency="1") {
634 out_msg.Address := address;
635 out_msg.Type := MemoryRequestType:MEMORY_WB;
636 // first, initialize the data blk to the current version of system memory
637 out_msg.DataBlk := TBEs[address].DataBlk;
638 // then add the dma write data
639 out_msg.DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
640 DPRINTF(RubySlicc, "%s\n", out_msg);
641 }
642 }
643
644 action(lr_queueMemoryDmaReadWriteback, "lr", desc="Write DMA data from read to memory") {
645 enqueue(memQueue_out, MemoryMsg, latency="1") {
646 out_msg.Address := address;
647 out_msg.Type := MemoryRequestType:MEMORY_WB;
648 // first, initialize the data blk to the current version of system memory
649 out_msg.DataBlk := TBEs[address].DataBlk;
650 DPRINTF(RubySlicc, "%s\n", out_msg);
651 }
652 }
653
654 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
655 peek(dmaRequestQueue_in, DMARequestMsg) {
656 TBEs.allocate(address);
657 TBEs[address].DmaDataBlk := in_msg.DataBlk;
658 TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
659 TBEs[address].Len := in_msg.Len;
660 TBEs[address].DmaRequestor := in_msg.Requestor;
661 TBEs[address].WentPersistent := false;
662 }
663 }
664
665 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
666
667 if (TBEs[address].WentPersistent) {
668 assert(starving == true);
669
670 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
671 out_msg.Address := address;
672 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
673 out_msg.Requestor := machineID;
674 out_msg.Destination.broadcast(MachineType:L1Cache);
675
676 //
677 // Currently the configuration system limits the system to only one
678 // chip. Therefore, if we assume one shared L2 cache, then only one
679 // pertinent L2 cache exist.
680 //
681 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
682
683 out_msg.Destination.add(mapAddressToRange(address,
684 MachineType:L2Cache,
685 l2_select_low_bit,
686 l2_select_num_bits));
687
688 out_msg.Destination.add(map_Address_to_Directory(address));
689 out_msg.MessageSize := MessageSizeType:Persistent_Control;
690 }
691 starving := false;
692 }
693
694 TBEs.deallocate(address);
695 }
696
697 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
698 peek(responseNetwork_in, ResponseMsg) {
699 TBEs[address].DataBlk := in_msg.DataBlk;
700 }
701 }
702
703 action(cd_writeCleanDataToTbe, "cd", desc="Write clean memory data to TBE") {
704 TBEs[address].DataBlk := getDirectoryEntry(address).DataBlk;
705 }
706
707 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
708 getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
709 getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
710 }
711
712 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
713 peek(responseNetwork_in, ResponseMsg) {
714 assert(in_msg.Tokens >= 1);
715 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
716 }
717 }
718
719 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
720 assert(getDirectoryEntry(address).Tokens == max_tokens());
721 }
722
723 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
724 requestNetwork_in.dequeue();
725 }
726
727 action(z_recycleRequest, "z", desc="Recycle the request queue") {
728 requestNetwork_in.recycle();
729 }
730
731 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
732 responseNetwork_in.dequeue();
733 }
734
735 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
736 responseNetwork_in.recycle();
737 }
738
739 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
740 persistentNetwork_in.dequeue();
741 }
742
743 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
744 dmaRequestQueue_in.dequeue();
745 }
746
747 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
748 dmaRequestQueue_in.recycle();
749 }
750
751 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
752 memQueue_in.dequeue();
753 }
754
755 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
756 peek(responseNetwork_in, ResponseMsg) {
757 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
758 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
759 in_msg.Address, in_msg.DataBlk);
760 }
761 }
762
763 action(n_checkData, "n", desc="Check incoming clean data message") {
764 peek(responseNetwork_in, ResponseMsg) {
765 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
766 }
767 }
768
769 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
770 peek(responseNetwork_in, ResponseMsg) {
771 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
772 out_msg.Address := address;
773 out_msg.Type := in_msg.Type;
774 out_msg.Sender := machineID;
775 out_msg.Destination.add(persistentTable.findSmallest(address));
776 out_msg.Tokens := in_msg.Tokens;
777 out_msg.MessageSize := in_msg.MessageSize;
778 out_msg.DataBlk := in_msg.DataBlk;
779 out_msg.Dirty := in_msg.Dirty;
780 }
781 }
782 }
783
784 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
785 //
786 // currently only support a fixed timeout latency
787 //
788 if (reissueTimerTable.isSet(address)) {
789 reissueTimerTable.unset(address);
790 reissueTimerTable.set(address, fixed_timeout_latency);
791 }
792 }
793
794 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
795 //
796 // currently only support a fixed timeout latency
797 //
798 reissueTimerTable.set(address, fixed_timeout_latency);
799 }
800
801 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
802 if (reissueTimerTable.isSet(address)) {
803 reissueTimerTable.unset(address);
804 }
805 }
806
807 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
808 peek(responseNetwork_in, ResponseMsg) {
809 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
810 assert(in_msg.Dirty == false);
811 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
812
813 // NOTE: The following check would not be valid in a real
814 // implementation. We include the data in the "dataless"
815 // message so we can assert the clean data matches the datablock
816 // in memory
817 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
818
819 // Bounce the message, but "re-associate" the data and the owner
820 // token. In essence we're converting an ACK_OWNER message to a
821 // DATA_OWNER message, keeping the number of tokens the same.
822 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
823 out_msg.Address := address;
824 out_msg.Type := CoherenceResponseType:DATA_OWNER;
825 out_msg.Sender := machineID;
826 out_msg.Destination.add(persistentTable.findSmallest(address));
827 out_msg.Tokens := in_msg.Tokens;
828 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
829 out_msg.Dirty := in_msg.Dirty;
830 out_msg.MessageSize := MessageSizeType:Response_Data;
831 }
832 }
833 }
834
835 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
836 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
837 out_msg.PhysicalAddress := address;
838 out_msg.LineAddress := address;
839 out_msg.Type := DMAResponseType:ACK;
840 out_msg.Destination.add(TBEs[address].DmaRequestor);
841 out_msg.MessageSize := MessageSizeType:Writeback_Control;
842 }
843 }
844
845 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
846 peek(memQueue_in, MemoryMsg) {
847 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
848 out_msg.PhysicalAddress := address;
849 out_msg.LineAddress := address;
850 out_msg.Type := DMAResponseType:DATA;
851 //
852 // we send the entire data block and rely on the dma controller to
853 // split it up if need be
854 //
855 out_msg.DataBlk := in_msg.DataBlk;
856 out_msg.Destination.add(TBEs[address].DmaRequestor);
857 out_msg.MessageSize := MessageSizeType:Response_Data;
858 }
859 }
860 }
861
862 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
863 peek(responseNetwork_in, ResponseMsg) {
864 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
865 out_msg.PhysicalAddress := address;
866 out_msg.LineAddress := address;
867 out_msg.Type := DMAResponseType:DATA;
868 //
869 // we send the entire data block and rely on the dma controller to
870 // split it up if need be
871 //
872 out_msg.DataBlk := in_msg.DataBlk;
873 out_msg.Destination.add(TBEs[address].DmaRequestor);
874 out_msg.MessageSize := MessageSizeType:Response_Data;
875 }
876 }
877 }
878
879 // TRANSITIONS
880
881 //
882 // Trans. from base state O
883 // the directory has valid data
884 //
885 transition(O, GETX, NO_W) {
886 qf_queueMemoryFetchRequest;
887 j_popIncomingRequestQueue;
888 }
889
890 transition(O, DMA_WRITE, O_DW) {
891 vd_allocateDmaRequestInTBE;
892 cd_writeCleanDataToTbe;
893 bw_broadcastWrite;
894 st_scheduleTimeout;
895 p_popDmaRequestQueue;
896 }
897
898 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
899 vd_allocateDmaRequestInTBE;
900 cd_writeCleanDataToTbe;
901 dwt_writeDmaDataFromTBE;
902 ld_queueMemoryDmaWriteFromTbe;
903 p_popDmaRequestQueue;
904 }
905
906 transition(O, GETS, NO_W) {
907 qf_queueMemoryFetchRequest;
908 j_popIncomingRequestQueue;
909 }
910
911 transition(O, DMA_READ, O_DR_W) {
912 vd_allocateDmaRequestInTBE;
913 fd_memoryDma;
914 st_scheduleTimeout;
915 p_popDmaRequestQueue;
916 }
917
918 transition(O, Lockdown, L_O_W) {
919 qp_queueMemoryForPersistent;
920 l_popIncomingPersistentQueue;
921 }
922
923 transition(O, {Tokens, Ack_All_Tokens}) {
924 f_incrementTokens;
925 k_popIncomingResponseQueue;
926 }
927
928 transition(O, {Data_Owner, Data_All_Tokens}) {
929 n_checkData;
930 f_incrementTokens;
931 k_popIncomingResponseQueue;
932 }
933
934 transition({O, NO}, Unlockdown) {
935 l_popIncomingPersistentQueue;
936 }
937
938 //
939 // transitioning to Owner, waiting for memory before DMA ack
940 // All other events should recycle/stall
941 //
942 transition(O_DR_W, Memory_Data, O) {
943 dm_sendMemoryDataToDma;
944 ut_unsetReissueTimer;
945 s_deallocateTBE;
946 l_popMemQueue;
947 }
948
949 //
950 // issued GETX for DMA write, waiting for all tokens
951 //
952 transition(O_DW, Request_Timeout) {
953 ut_unsetReissueTimer;
954 px_tryIssuingPersistentGETXRequest;
955 }
956
957 transition(O_DW, Tokens) {
958 f_incrementTokens;
959 k_popIncomingResponseQueue;
960 }
961
962 transition(O_DW, Data_Owner) {
963 f_incrementTokens;
964 rd_recordDataInTbe;
965 k_popIncomingResponseQueue;
966 }
967
968 transition(O_DW, Ack_Owner) {
969 f_incrementTokens;
970 cd_writeCleanDataToTbe;
971 k_popIncomingResponseQueue;
972 }
973
974 transition(O_DW, Lockdown, DW_L) {
975 de_sendTbeDataToStarver;
976 l_popIncomingPersistentQueue;
977 }
978
979 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
980 f_incrementTokens;
981 rd_recordDataInTbe;
982 dwt_writeDmaDataFromTBE;
983 ld_queueMemoryDmaWriteFromTbe;
984 ut_unsetReissueTimer;
985 k_popIncomingResponseQueue;
986 }
987
988 transition(O_DW, Ack_All_Tokens, O_DW_W) {
989 f_incrementTokens;
990 dwt_writeDmaDataFromTBE;
991 ld_queueMemoryDmaWriteFromTbe;
992 ut_unsetReissueTimer;
993 k_popIncomingResponseQueue;
994 }
995
996 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
997 f_incrementTokens;
998 cd_writeCleanDataToTbe;
999 dwt_writeDmaDataFromTBE;
1000 ld_queueMemoryDmaWriteFromTbe;
1001 ut_unsetReissueTimer;
1002 k_popIncomingResponseQueue;
1003 }
1004
1005 transition(O_DW_W, Memory_Ack, O) {
1006 da_sendDmaAck;
1007 s_deallocateTBE;
1008 l_popMemQueue;
1009 }
1010
1011 //
1012 // Trans. from NO
1013 // The direcotry does not have valid data, but may have some tokens
1014 //
1015 transition(NO, GETX) {
1016 a_sendTokens;
1017 j_popIncomingRequestQueue;
1018 }
1019
1020 transition(NO, DMA_WRITE, NO_DW) {
1021 vd_allocateDmaRequestInTBE;
1022 bw_broadcastWrite;
1023 st_scheduleTimeout;
1024 p_popDmaRequestQueue;
1025 }
1026
1027 transition(NO, GETS) {
1028 j_popIncomingRequestQueue;
1029 }
1030
1031 transition(NO, DMA_READ, NO_DR) {
1032 vd_allocateDmaRequestInTBE;
1033 br_broadcastRead;
1034 st_scheduleTimeout;
1035 p_popDmaRequestQueue;
1036 }
1037
1038 transition(NO, Lockdown, L) {
1039 aa_sendTokensToStarver;
1040 l_popIncomingPersistentQueue;
1041 }
1042
1043 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1044 m_writeDataToMemory;
1045 f_incrementTokens;
1046 lq_queueMemoryWbRequest;
1047 k_popIncomingResponseQueue;
1048 }
1049
1050 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1051 n_checkData;
1052 f_incrementTokens;
1053 k_popIncomingResponseQueue;
1054 }
1055
1056 transition(NO, Tokens) {
1057 f_incrementTokens;
1058 k_popIncomingResponseQueue;
1059 }
1060
1061 transition(NO_W, Memory_Data, NO) {
1062 d_sendMemoryDataWithAllTokens;
1063 l_popMemQueue;
1064 }
1065
1066 // Trans. from NO_DW
1067 transition(NO_DW, Request_Timeout) {
1068 ut_unsetReissueTimer;
1069 px_tryIssuingPersistentGETXRequest;
1070 }
1071
1072 transition(NO_DW, Lockdown, DW_L) {
1073 aa_sendTokensToStarver;
1074 l_popIncomingPersistentQueue;
1075 }
1076
1077 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1078 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1079 // directory does not have valid data
1080
1081 transition(NO_DW, Data_Owner, O_DW) {
1082 f_incrementTokens;
1083 rd_recordDataInTbe;
1084 k_popIncomingResponseQueue;
1085 }
1086
1087 transition({NO_DW, NO_DR}, Tokens) {
1088 f_incrementTokens;
1089 k_popIncomingResponseQueue;
1090 }
1091
1092 // Trans. from NO_DR
1093 transition(NO_DR, Request_Timeout) {
1094 ut_unsetReissueTimer;
1095 ps_tryIssuingPersistentGETSRequest;
1096 }
1097
1098 transition(NO_DR, Lockdown, DR_L) {
1099 aa_sendTokensToStarver;
1100 l_popIncomingPersistentQueue;
1101 }
1102
1103 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1104 m_writeDataToMemory;
1105 f_incrementTokens;
1106 dd_sendDmaData;
1107 lr_queueMemoryDmaReadWriteback;
1108 ut_unsetReissueTimer;
1109 s_deallocateTBE;
1110 k_popIncomingResponseQueue;
1111 }
1112
1113 // Trans. from L
1114 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1115 j_popIncomingRequestQueue;
1116 }
1117
1118 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1119 l_popIncomingPersistentQueue;
1120 }
1121
1122 //
1123 // Received data for lockdown blocks
1124 // For blocks with outstanding dma requests to them
1125 // ...we could change this to write the data to memory and send it cleanly
1126 // ...we could also proactively complete our DMA requests
1127 // However, to keep my mind from spinning out-of-control, we won't for now :)
1128 //
1129 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1130 r_bounceResponse;
1131 k_popIncomingResponseQueue;
1132 }
1133
1134 transition({DW_L, DR_L, L}, Tokens) {
1135 r_bounceResponse;
1136 k_popIncomingResponseQueue;
1137 }
1138
1139 transition({DW_L, DR_L, L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1140 bd_bounceDatalessOwnerToken;
1141 k_popIncomingResponseQueue;
1142 }
1143
1144 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1145 l_popIncomingPersistentQueue;
1146 }
1147
1148 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1149 l_popIncomingPersistentQueue;
1150 }
1151
1152 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1153 dd_sendMemDataToStarver;
1154 l_popMemQueue;
1155 }
1156
1157 transition(L_O_W, Memory_Ack) {
1158 qp_queueMemoryForPersistent;
1159 l_popMemQueue;
1160 }
1161
1162 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1163 l_popIncomingPersistentQueue;
1164 }
1165
1166 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1167 l_popIncomingPersistentQueue;
1168 }
1169
1170 transition(DR_L_W, Memory_Data, DR_L) {
1171 dd_sendMemDataToStarver;
1172 l_popMemQueue;
1173 }
1174
1175 transition(DW_L_W, Memory_Ack, L) {
1176 aat_assertAllTokens;
1177 da_sendDmaAck;
1178 s_deallocateTBE;
1179 dd_sendMemDataToStarver;
1180 l_popMemQueue;
1181 }
1182
1183 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1184 l_popIncomingPersistentQueue;
1185 }
1186
1187 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1188 l_popIncomingPersistentQueue;
1189 }
1190
1191 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1192 l_popIncomingPersistentQueue;
1193 }
1194
1195 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1196 ut_unsetReissueTimer;
1197 px_tryIssuingPersistentGETXRequest;
1198 }
1199
1200 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1201 l_popIncomingPersistentQueue;
1202 }
1203
1204 transition(DR_L, Request_Timeout) {
1205 ut_unsetReissueTimer;
1206 ps_tryIssuingPersistentGETSRequest;
1207 }
1208
1209 //
1210 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1211 // presistent request is issued and resolve before memory returns with data
1212 //
1213 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1214 l_popMemQueue;
1215 }
1216
1217 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1218 l_popIncomingPersistentQueue;
1219 }
1220
1221 // Blocked states
1222 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1223 z_recycleRequest;
1224 }
1225
1226 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1227 y_recycleDmaRequestQueue;
1228 }
1229
1230 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1231 kz_recycleResponse;
1232 }
1233
1234 //
1235 // If we receive a request timeout while waiting for memory, it is likely that
1236 // the request will be satisfied and issuing a presistent request will do us
1237 // no good. Just wait.
1238 //
1239 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1240 rs_resetScheduleTimeout;
1241 }
1242
1243 transition(NO_W, Lockdown, L_NO_W) {
1244 l_popIncomingPersistentQueue;
1245 }
1246
1247 transition(O_W, Lockdown, L_O_W) {
1248 l_popIncomingPersistentQueue;
1249 }
1250
1251 transition(O_DR_W, Lockdown, DR_L_W) {
1252 l_popIncomingPersistentQueue;
1253 }
1254
1255 transition(O_DW_W, Lockdown, DW_L_W) {
1256 l_popIncomingPersistentQueue;
1257 }
1258
1259 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1260 l_popIncomingPersistentQueue;
1261 }
1262 }