ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34
35 machine(Directory, "Token protocol")
36 : DirectoryMemory * directory,
37 MemoryControl * memBuffer,
38 int l2_select_num_bits,
39 int directory_latency = 5,
40 bool distributed_persistent = true,
41 int fixed_timeout_latency = 100
42 {
43
44 MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true";
45 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
46 MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true";
47 MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false";
48
49 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
50 MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true";
51 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
52 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
53
54 // STATES
55 enumeration(State, desc="Directory states", default="Directory_State_O") {
56 // Base states
57 O, desc="Owner";
58 NO, desc="Not Owner";
59 L, desc="Locked";
60
61 // Memory wait states - can block all messages including persistent requests
62 O_W, desc="transitioning to Owner, waiting for memory write";
63 L_O_W, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
64 L_NO_W, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
65 DR_L_W, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
66 DW_L_W, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
67 NO_W, desc="transitioning to Not Owner, waiting for memory read";
68 O_DW_W, desc="transitioning to Owner, waiting for memory before DMA ack";
69 O_DR_W, desc="transitioning to Owner, waiting for memory before DMA data";
70
71 // DMA request transient states - must respond to persistent requests
72 O_DW, desc="issued GETX for DMA write, waiting for all tokens";
73 NO_DW, desc="issued GETX for DMA write, waiting for all tokens";
74 NO_DR, desc="issued GETS for DMA read, waiting for data";
75
76 // DMA request in progress - competing with a CPU persistent request
77 DW_L, desc="issued GETX for DMA write, CPU persistent request must complete first";
78 DR_L, desc="issued GETS for DMA read, CPU persistent request must complete first";
79
80 }
81
82 // Events
83 enumeration(Event, desc="Directory events") {
84 GETX, desc="A GETX arrives";
85 GETS, desc="A GETS arrives";
86 Lockdown, desc="A lockdown request arrives";
87 Unlockdown, desc="An un-lockdown request arrives";
88 Own_Lock_or_Unlock, desc="own lock or unlock";
89 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
90 Data_Owner, desc="Data arrive";
91 Data_All_Tokens, desc="Data and all tokens";
92 Ack_Owner, desc="Owner token arrived without data because it was clean";
93 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
94 Tokens, desc="Tokens arrive";
95 Ack_All_Tokens, desc="All_Tokens arrive";
96 Request_Timeout, desc="A DMA request has timed out";
97
98 // Memory Controller
99 Memory_Data, desc="Fetched data from memory arrives";
100 Memory_Ack, desc="Writeback Ack from memory arrives";
101
102 // DMA requests
103 DMA_READ, desc="A DMA Read memory request";
104 DMA_WRITE, desc="A DMA Write memory request";
105 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
106 }
107
108 // TYPES
109
110 // DirectoryEntry
111 structure(Entry, desc="...", interface="AbstractEntry") {
112 State DirectoryState, desc="Directory state";
113 DataBlock DataBlk, desc="data for the block";
114 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
115
116 // The following state is provided to allow for bandwidth
117 // efficient directory-like operation. However all of this state
118 // is 'soft state' that does not need to be correct (as long as
119 // you're eventually willing to resort to broadcast.)
120
121 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
122 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
123 }
124
125 external_type(PersistentTable) {
126 void persistentRequestLock(Address, MachineID, AccessType);
127 void persistentRequestUnlock(Address, MachineID);
128 bool okToIssueStarving(Address, MachineID);
129 MachineID findSmallest(Address);
130 AccessType typeOfSmallest(Address);
131 void markEntries(Address);
132 bool isLocked(Address);
133 int countStarvingForAddress(Address);
134 int countReadStarvingForAddress(Address);
135 }
136
137 // TBE entries for DMA requests
138 structure(TBE, desc="TBE entries for outstanding DMA requests") {
139 Address PhysicalAddress, desc="physical address";
140 State TBEState, desc="Transient State";
141 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
142 DataBlock DataBlk, desc="The current view of system memory";
143 int Len, desc="...";
144 MachineID DmaRequestor, desc="DMA requestor";
145 bool WentPersistent, desc="Did the DMA request require a persistent request";
146 }
147
148 external_type(TBETable) {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 // ** OBJECTS **
156
157 PersistentTable persistentTable;
158 TimerTable reissueTimerTable;
159
160 TBETable TBEs, template_hack="<Directory_TBE>";
161
162 bool starving, default="false";
163 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
164
165 void set_tbe(TBE b);
166 void unset_tbe();
167
168 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
169 return static_cast(Entry, directory[addr]);
170 }
171
172 State getState(TBE tbe, Address addr) {
173 if (is_valid(tbe)) {
174 return tbe.TBEState;
175 } else {
176 return getDirectoryEntry(addr).DirectoryState;
177 }
178 }
179
180 void setState(TBE tbe, Address addr, State state) {
181 if (is_valid(tbe)) {
182 tbe.TBEState := state;
183 }
184 getDirectoryEntry(addr).DirectoryState := state;
185
186 if (state == State:L || state == State:DW_L || state == State:DR_L) {
187 assert(getDirectoryEntry(addr).Tokens == 0);
188 }
189
190 // We have one or zero owners
191 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
192
193 // Make sure the token count is in range
194 assert(getDirectoryEntry(addr).Tokens >= 0);
195 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
196
197 if (state == State:O || state == State:O_W || state == State:O_DW) {
198 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
199 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
200 }
201 }
202
203 bool okToIssueStarving(Address addr, MachineID machinID) {
204 return persistentTable.okToIssueStarving(addr, machineID);
205 }
206
207 void markPersistentEntries(Address addr) {
208 persistentTable.markEntries(addr);
209 }
210
211 // ** OUT_PORTS **
212 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
213 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
214 out_port(requestNetwork_out, RequestMsg, requestFromDir);
215 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
216
217 //
218 // Memory buffer for memory controller to DIMM communication
219 //
220 out_port(memQueue_out, MemoryMsg, memBuffer);
221
222 // ** IN_PORTS **
223
224 // off-chip memory request/response is done
225 in_port(memQueue_in, MemoryMsg, memBuffer) {
226 if (memQueue_in.isReady()) {
227 peek(memQueue_in, MemoryMsg) {
228 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
229 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
230 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
231 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
232 } else {
233 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
234 error("Invalid message");
235 }
236 }
237 }
238 }
239
240 // Reissue Timer
241 in_port(reissueTimerTable_in, Address, reissueTimerTable) {
242 if (reissueTimerTable_in.isReady()) {
243 trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
244 TBEs[reissueTimerTable.readyAddress()]);
245 }
246 }
247
248 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
249 if (responseNetwork_in.isReady()) {
250 peek(responseNetwork_in, ResponseMsg) {
251 assert(in_msg.Destination.isElement(machineID));
252 if (getDirectoryEntry(in_msg.Address).Tokens + in_msg.Tokens == max_tokens()) {
253 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
254 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
255 trigger(Event:Data_All_Tokens, in_msg.Address,
256 TBEs[in_msg.Address]);
257 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
258 trigger(Event:Ack_Owner_All_Tokens, in_msg.Address,
259 TBEs[in_msg.Address]);
260 } else if (in_msg.Type == CoherenceResponseType:ACK) {
261 trigger(Event:Ack_All_Tokens, in_msg.Address,
262 TBEs[in_msg.Address]);
263 } else {
264 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
265 error("Invalid message");
266 }
267 } else {
268 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
269 trigger(Event:Data_Owner, in_msg.Address,
270 TBEs[in_msg.Address]);
271 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
272 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
273 trigger(Event:Tokens, in_msg.Address,
274 TBEs[in_msg.Address]);
275 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
276 trigger(Event:Ack_Owner, in_msg.Address,
277 TBEs[in_msg.Address]);
278 } else {
279 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
280 error("Invalid message");
281 }
282 }
283 }
284 }
285 }
286
287 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
288 if (persistentNetwork_in.isReady()) {
289 peek(persistentNetwork_in, PersistentMsg) {
290 assert(in_msg.Destination.isElement(machineID));
291
292 if (distributed_persistent) {
293 // Apply the lockdown or unlockdown message to the table
294 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
295 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
296 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
297 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
298 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
299 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
300 } else {
301 error("Invalid message");
302 }
303
304 // React to the message based on the current state of the table
305 if (persistentTable.isLocked(in_msg.Address)) {
306 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
307 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
308 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
309 TBEs[in_msg.Address]);
310 } else {
311 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
312 TBEs[in_msg.Address]);
313 }
314 } else {
315 // locked
316 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
317 }
318 } else {
319 // unlocked
320 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
321 }
322 }
323 else {
324 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
325 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
326 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
327 TBEs[in_msg.Address]);
328 } else {
329 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
330 TBEs[in_msg.Address]);
331 }
332 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
333 // locked
334 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
335 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
336 // locked
337 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
338 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
339 // unlocked
340 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
341 } else {
342 error("Invalid message");
343 }
344 }
345 }
346 }
347 }
348
349 in_port(requestNetwork_in, RequestMsg, requestToDir) {
350 if (requestNetwork_in.isReady()) {
351 peek(requestNetwork_in, RequestMsg) {
352 assert(in_msg.Destination.isElement(machineID));
353 if (in_msg.Type == CoherenceRequestType:GETS) {
354 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
355 } else if (in_msg.Type == CoherenceRequestType:GETX) {
356 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
357 } else {
358 error("Invalid message");
359 }
360 }
361 }
362 }
363
364 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
365 if (dmaRequestQueue_in.isReady()) {
366 peek(dmaRequestQueue_in, DMARequestMsg) {
367 if (in_msg.Type == DMARequestType:READ) {
368 trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
369 } else if (in_msg.Type == DMARequestType:WRITE) {
370 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
371 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
372 TBEs[in_msg.LineAddress]);
373 } else {
374 trigger(Event:DMA_WRITE, in_msg.LineAddress,
375 TBEs[in_msg.LineAddress]);
376 }
377 } else {
378 error("Invalid message");
379 }
380 }
381 }
382 }
383
384 // Actions
385
386 action(a_sendTokens, "a", desc="Send tokens to requestor") {
387 // Only send a message if we have tokens to send
388 if (getDirectoryEntry(address).Tokens > 0) {
389 peek(requestNetwork_in, RequestMsg) {
390 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
391 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
392 out_msg.Address := address;
393 out_msg.Type := CoherenceResponseType:ACK;
394 out_msg.Sender := machineID;
395 out_msg.Destination.add(in_msg.Requestor);
396 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
397 out_msg.MessageSize := MessageSizeType:Response_Control;
398 }
399 }
400 getDirectoryEntry(address).Tokens := 0;
401 }
402 }
403
404 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
405 if (okToIssueStarving(address, machineID) && (starving == false)) {
406 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
407 out_msg.Address := address;
408 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
409 out_msg.Requestor := machineID;
410 out_msg.Destination.broadcast(MachineType:L1Cache);
411
412 //
413 // Currently the configuration system limits the system to only one
414 // chip. Therefore, if we assume one shared L2 cache, then only one
415 // pertinent L2 cache exist.
416 //
417 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
418
419 out_msg.Destination.add(mapAddressToRange(address,
420 MachineType:L2Cache,
421 l2_select_low_bit,
422 l2_select_num_bits));
423
424 out_msg.Destination.add(map_Address_to_Directory(address));
425 out_msg.MessageSize := MessageSizeType:Persistent_Control;
426 out_msg.Prefetch := PrefetchBit:No;
427 out_msg.AccessMode := AccessModeType:SupervisorMode;
428 }
429 markPersistentEntries(address);
430 starving := true;
431
432 tbe.WentPersistent := true;
433
434 // Do not schedule a wakeup, a persistent requests will always complete
435 } else {
436
437 // We'd like to issue a persistent request, but are not allowed
438 // to issue a P.R. right now. This, we do not increment the
439 // IssueCount.
440
441 // Set a wakeup timer
442 reissueTimerTable.set(address, 10);
443 }
444 }
445
446 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
447 peek(dmaRequestQueue_in, DMARequestMsg) {
448 //
449 // Assser that we only send message if we don't already have all the tokens
450 //
451 assert(getDirectoryEntry(address).Tokens != max_tokens());
452 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
453 out_msg.Address := address;
454 out_msg.Type := CoherenceRequestType:GETX;
455 out_msg.Requestor := machineID;
456
457 //
458 // Since only one chip, assuming all L1 caches are local
459 //
460 out_msg.Destination.broadcast(MachineType:L1Cache);
461 out_msg.Destination.add(mapAddressToRange(address,
462 MachineType:L2Cache,
463 l2_select_low_bit,
464 l2_select_num_bits));
465
466 out_msg.RetryNum := 0;
467 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
468 out_msg.Prefetch := PrefetchBit:No;
469 out_msg.AccessMode := AccessModeType:SupervisorMode;
470 }
471 }
472 }
473
474 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
475 if (okToIssueStarving(address, machineID) && (starving == false)) {
476 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
477 out_msg.Address := address;
478 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
479 out_msg.Requestor := machineID;
480 out_msg.Destination.broadcast(MachineType:L1Cache);
481
482 //
483 // Currently the configuration system limits the system to only one
484 // chip. Therefore, if we assume one shared L2 cache, then only one
485 // pertinent L2 cache exist.
486 //
487 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
488
489 out_msg.Destination.add(mapAddressToRange(address,
490 MachineType:L2Cache,
491 l2_select_low_bit,
492 l2_select_num_bits));
493
494 out_msg.Destination.add(map_Address_to_Directory(address));
495 out_msg.MessageSize := MessageSizeType:Persistent_Control;
496 out_msg.Prefetch := PrefetchBit:No;
497 out_msg.AccessMode := AccessModeType:SupervisorMode;
498 }
499 markPersistentEntries(address);
500 starving := true;
501
502 tbe.WentPersistent := true;
503
504 // Do not schedule a wakeup, a persistent requests will always complete
505 } else {
506
507 // We'd like to issue a persistent request, but are not allowed
508 // to issue a P.R. right now. This, we do not increment the
509 // IssueCount.
510
511 // Set a wakeup timer
512 reissueTimerTable.set(address, 10);
513 }
514 }
515
516 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
517 peek(dmaRequestQueue_in, DMARequestMsg) {
518 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
519 out_msg.Address := address;
520 out_msg.Type := CoherenceRequestType:GETS;
521 out_msg.Requestor := machineID;
522
523 //
524 // Since only one chip, assuming all L1 caches are local
525 //
526 out_msg.Destination.broadcast(MachineType:L1Cache);
527 out_msg.Destination.add(mapAddressToRange(address,
528 MachineType:L2Cache,
529 l2_select_low_bit,
530 l2_select_num_bits));
531
532 out_msg.RetryNum := 0;
533 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
534 out_msg.Prefetch := PrefetchBit:No;
535 out_msg.AccessMode := AccessModeType:SupervisorMode;
536 }
537 }
538 }
539
540 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
541 // Only send a message if we have tokens to send
542 if (getDirectoryEntry(address).Tokens > 0) {
543 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
544 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
545 out_msg.Address := address;
546 out_msg.Type := CoherenceResponseType:ACK;
547 out_msg.Sender := machineID;
548 out_msg.Destination.add(persistentTable.findSmallest(address));
549 out_msg.Tokens := getDirectoryEntry(address).Tokens;
550 out_msg.MessageSize := MessageSizeType:Response_Control;
551 }
552 getDirectoryEntry(address).Tokens := 0;
553 }
554 }
555
556 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
557 peek(memQueue_in, MemoryMsg) {
558 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
559 out_msg.Address := address;
560 out_msg.Type := CoherenceResponseType:DATA_OWNER;
561 out_msg.Sender := machineID;
562 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
563 assert(getDirectoryEntry(address).Tokens > 0);
564 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
565 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
566 out_msg.Dirty := false;
567 out_msg.MessageSize := MessageSizeType:Response_Data;
568 }
569 }
570 getDirectoryEntry(address).Tokens := 0;
571 }
572
573 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
574 peek(memQueue_in, MemoryMsg) {
575 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
576 out_msg.Address := address;
577 out_msg.Type := CoherenceResponseType:DATA_OWNER;
578 out_msg.Sender := machineID;
579 out_msg.Destination.add(persistentTable.findSmallest(address));
580 assert(getDirectoryEntry(address).Tokens > 0);
581 out_msg.Tokens := getDirectoryEntry(address).Tokens;
582 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
583 out_msg.Dirty := false;
584 out_msg.MessageSize := MessageSizeType:Response_Data;
585 }
586 }
587 getDirectoryEntry(address).Tokens := 0;
588 }
589
590 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
591 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
592 out_msg.Address := address;
593 out_msg.Type := CoherenceResponseType:DATA_OWNER;
594 out_msg.Sender := machineID;
595 out_msg.Destination.add(persistentTable.findSmallest(address));
596 assert(getDirectoryEntry(address).Tokens > 0);
597 out_msg.Tokens := getDirectoryEntry(address).Tokens;
598 out_msg.DataBlk := tbe.DataBlk;
599 out_msg.Dirty := false;
600 out_msg.MessageSize := MessageSizeType:Response_Data;
601 }
602 getDirectoryEntry(address).Tokens := 0;
603 }
604
605 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
606 peek(requestNetwork_in, RequestMsg) {
607 enqueue(memQueue_out, MemoryMsg, latency="1") {
608 out_msg.Address := address;
609 out_msg.Type := MemoryRequestType:MEMORY_READ;
610 out_msg.Sender := machineID;
611 out_msg.OriginalRequestorMachId := in_msg.Requestor;
612 out_msg.MessageSize := in_msg.MessageSize;
613 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
614 DPRINTF(RubySlicc, "%s\n", out_msg);
615 }
616 }
617 }
618
619 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
620 enqueue(memQueue_out, MemoryMsg, latency="1") {
621 out_msg.Address := address;
622 out_msg.Type := MemoryRequestType:MEMORY_READ;
623 out_msg.Sender := machineID;
624 out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
625 out_msg.MessageSize := MessageSizeType:Request_Control;
626 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
627 DPRINTF(RubySlicc, "%s\n", out_msg);
628 }
629 }
630
631 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
632 peek(dmaRequestQueue_in, DMARequestMsg) {
633 enqueue(memQueue_out, MemoryMsg, latency="1") {
634 out_msg.Address := address;
635 out_msg.Type := MemoryRequestType:MEMORY_READ;
636 out_msg.Sender := machineID;
637 out_msg.OriginalRequestorMachId := in_msg.Requestor;
638 out_msg.MessageSize := in_msg.MessageSize;
639 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
640 DPRINTF(RubySlicc, "%s\n", out_msg);
641 }
642 }
643 }
644
645 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
646 enqueue(memQueue_out, MemoryMsg, latency="1") {
647 out_msg.Address := address;
648 out_msg.Type := MemoryRequestType:MEMORY_WB;
649 DPRINTF(RubySlicc, "%s\n", out_msg);
650 }
651 }
652
653 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
654 enqueue(memQueue_out, MemoryMsg, latency="1") {
655 out_msg.Address := address;
656 out_msg.Type := MemoryRequestType:MEMORY_WB;
657 // first, initialize the data blk to the current version of system memory
658 out_msg.DataBlk := tbe.DataBlk;
659 // then add the dma write data
660 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
661 DPRINTF(RubySlicc, "%s\n", out_msg);
662 }
663 }
664
665 action(lr_queueMemoryDmaReadWriteback, "lr", desc="Write DMA data from read to memory") {
666 enqueue(memQueue_out, MemoryMsg, latency="1") {
667 out_msg.Address := address;
668 out_msg.Type := MemoryRequestType:MEMORY_WB;
669 // first, initialize the data blk to the current version of system memory
670 out_msg.DataBlk := tbe.DataBlk;
671 DPRINTF(RubySlicc, "%s\n", out_msg);
672 }
673 }
674
675 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
676 peek(dmaRequestQueue_in, DMARequestMsg) {
677 TBEs.allocate(address);
678 set_tbe(TBEs[address]);
679 tbe.DmaDataBlk := in_msg.DataBlk;
680 tbe.PhysicalAddress := in_msg.PhysicalAddress;
681 tbe.Len := in_msg.Len;
682 tbe.DmaRequestor := in_msg.Requestor;
683 tbe.WentPersistent := false;
684 }
685 }
686
687 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
688
689 if (tbe.WentPersistent) {
690 assert(starving == true);
691
692 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
693 out_msg.Address := address;
694 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
695 out_msg.Requestor := machineID;
696 out_msg.Destination.broadcast(MachineType:L1Cache);
697
698 //
699 // Currently the configuration system limits the system to only one
700 // chip. Therefore, if we assume one shared L2 cache, then only one
701 // pertinent L2 cache exist.
702 //
703 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
704
705 out_msg.Destination.add(mapAddressToRange(address,
706 MachineType:L2Cache,
707 l2_select_low_bit,
708 l2_select_num_bits));
709
710 out_msg.Destination.add(map_Address_to_Directory(address));
711 out_msg.MessageSize := MessageSizeType:Persistent_Control;
712 }
713 starving := false;
714 }
715
716 TBEs.deallocate(address);
717 unset_tbe();
718 }
719
720 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
721 peek(responseNetwork_in, ResponseMsg) {
722 tbe.DataBlk := in_msg.DataBlk;
723 }
724 }
725
726 action(cd_writeCleanDataToTbe, "cd", desc="Write clean memory data to TBE") {
727 tbe.DataBlk := getDirectoryEntry(address).DataBlk;
728 }
729
730 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
731 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
732 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
733 }
734
735 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
736 peek(responseNetwork_in, ResponseMsg) {
737 assert(in_msg.Tokens >= 1);
738 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
739 }
740 }
741
742 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
743 assert(getDirectoryEntry(address).Tokens == max_tokens());
744 }
745
746 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
747 requestNetwork_in.dequeue();
748 }
749
750 action(z_recycleRequest, "z", desc="Recycle the request queue") {
751 requestNetwork_in.recycle();
752 }
753
754 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
755 responseNetwork_in.dequeue();
756 }
757
758 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
759 responseNetwork_in.recycle();
760 }
761
762 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
763 persistentNetwork_in.dequeue();
764 }
765
766 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
767 dmaRequestQueue_in.dequeue();
768 }
769
770 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
771 dmaRequestQueue_in.recycle();
772 }
773
774 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
775 memQueue_in.dequeue();
776 }
777
778 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
779 peek(responseNetwork_in, ResponseMsg) {
780 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
781 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
782 in_msg.Address, in_msg.DataBlk);
783 }
784 }
785
786 action(n_checkData, "n", desc="Check incoming clean data message") {
787 peek(responseNetwork_in, ResponseMsg) {
788 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
789 }
790 }
791
792 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
793 peek(responseNetwork_in, ResponseMsg) {
794 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
795 out_msg.Address := address;
796 out_msg.Type := in_msg.Type;
797 out_msg.Sender := machineID;
798 out_msg.Destination.add(persistentTable.findSmallest(address));
799 out_msg.Tokens := in_msg.Tokens;
800 out_msg.MessageSize := in_msg.MessageSize;
801 out_msg.DataBlk := in_msg.DataBlk;
802 out_msg.Dirty := in_msg.Dirty;
803 }
804 }
805 }
806
807 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
808 //
809 // currently only support a fixed timeout latency
810 //
811 if (reissueTimerTable.isSet(address)) {
812 reissueTimerTable.unset(address);
813 reissueTimerTable.set(address, fixed_timeout_latency);
814 }
815 }
816
817 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
818 //
819 // currently only support a fixed timeout latency
820 //
821 reissueTimerTable.set(address, fixed_timeout_latency);
822 }
823
824 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
825 if (reissueTimerTable.isSet(address)) {
826 reissueTimerTable.unset(address);
827 }
828 }
829
830 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
831 peek(responseNetwork_in, ResponseMsg) {
832 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
833 assert(in_msg.Dirty == false);
834 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
835
836 // NOTE: The following check would not be valid in a real
837 // implementation. We include the data in the "dataless"
838 // message so we can assert the clean data matches the datablock
839 // in memory
840 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
841
842 // Bounce the message, but "re-associate" the data and the owner
843 // token. In essence we're converting an ACK_OWNER message to a
844 // DATA_OWNER message, keeping the number of tokens the same.
845 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
846 out_msg.Address := address;
847 out_msg.Type := CoherenceResponseType:DATA_OWNER;
848 out_msg.Sender := machineID;
849 out_msg.Destination.add(persistentTable.findSmallest(address));
850 out_msg.Tokens := in_msg.Tokens;
851 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
852 out_msg.Dirty := in_msg.Dirty;
853 out_msg.MessageSize := MessageSizeType:Response_Data;
854 }
855 }
856 }
857
858 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
859 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
860 out_msg.PhysicalAddress := address;
861 out_msg.LineAddress := address;
862 out_msg.Type := DMAResponseType:ACK;
863 out_msg.Destination.add(tbe.DmaRequestor);
864 out_msg.MessageSize := MessageSizeType:Writeback_Control;
865 }
866 }
867
868 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
869 peek(memQueue_in, MemoryMsg) {
870 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
871 out_msg.PhysicalAddress := address;
872 out_msg.LineAddress := address;
873 out_msg.Type := DMAResponseType:DATA;
874 //
875 // we send the entire data block and rely on the dma controller to
876 // split it up if need be
877 //
878 out_msg.DataBlk := in_msg.DataBlk;
879 out_msg.Destination.add(tbe.DmaRequestor);
880 out_msg.MessageSize := MessageSizeType:Response_Data;
881 }
882 }
883 }
884
885 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
886 peek(responseNetwork_in, ResponseMsg) {
887 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
888 out_msg.PhysicalAddress := address;
889 out_msg.LineAddress := address;
890 out_msg.Type := DMAResponseType:DATA;
891 //
892 // we send the entire data block and rely on the dma controller to
893 // split it up if need be
894 //
895 out_msg.DataBlk := in_msg.DataBlk;
896 out_msg.Destination.add(tbe.DmaRequestor);
897 out_msg.MessageSize := MessageSizeType:Response_Data;
898 }
899 }
900 }
901
902 // TRANSITIONS
903
904 //
905 // Trans. from base state O
906 // the directory has valid data
907 //
908 transition(O, GETX, NO_W) {
909 qf_queueMemoryFetchRequest;
910 j_popIncomingRequestQueue;
911 }
912
913 transition(O, DMA_WRITE, O_DW) {
914 vd_allocateDmaRequestInTBE;
915 cd_writeCleanDataToTbe;
916 bw_broadcastWrite;
917 st_scheduleTimeout;
918 p_popDmaRequestQueue;
919 }
920
921 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
922 vd_allocateDmaRequestInTBE;
923 cd_writeCleanDataToTbe;
924 dwt_writeDmaDataFromTBE;
925 ld_queueMemoryDmaWriteFromTbe;
926 p_popDmaRequestQueue;
927 }
928
929 transition(O, GETS, NO_W) {
930 qf_queueMemoryFetchRequest;
931 j_popIncomingRequestQueue;
932 }
933
934 transition(O, DMA_READ, O_DR_W) {
935 vd_allocateDmaRequestInTBE;
936 fd_memoryDma;
937 st_scheduleTimeout;
938 p_popDmaRequestQueue;
939 }
940
941 transition(O, Lockdown, L_O_W) {
942 qp_queueMemoryForPersistent;
943 l_popIncomingPersistentQueue;
944 }
945
946 transition(O, {Tokens, Ack_All_Tokens}) {
947 f_incrementTokens;
948 k_popIncomingResponseQueue;
949 }
950
951 transition(O, {Data_Owner, Data_All_Tokens}) {
952 n_checkData;
953 f_incrementTokens;
954 k_popIncomingResponseQueue;
955 }
956
957 transition({O, NO}, Unlockdown) {
958 l_popIncomingPersistentQueue;
959 }
960
961 //
962 // transitioning to Owner, waiting for memory before DMA ack
963 // All other events should recycle/stall
964 //
965 transition(O_DR_W, Memory_Data, O) {
966 dm_sendMemoryDataToDma;
967 ut_unsetReissueTimer;
968 s_deallocateTBE;
969 l_popMemQueue;
970 }
971
972 //
973 // issued GETX for DMA write, waiting for all tokens
974 //
975 transition(O_DW, Request_Timeout) {
976 ut_unsetReissueTimer;
977 px_tryIssuingPersistentGETXRequest;
978 }
979
980 transition(O_DW, Tokens) {
981 f_incrementTokens;
982 k_popIncomingResponseQueue;
983 }
984
985 transition(O_DW, Data_Owner) {
986 f_incrementTokens;
987 rd_recordDataInTbe;
988 k_popIncomingResponseQueue;
989 }
990
991 transition(O_DW, Ack_Owner) {
992 f_incrementTokens;
993 cd_writeCleanDataToTbe;
994 k_popIncomingResponseQueue;
995 }
996
997 transition(O_DW, Lockdown, DW_L) {
998 de_sendTbeDataToStarver;
999 l_popIncomingPersistentQueue;
1000 }
1001
1002 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
1003 f_incrementTokens;
1004 rd_recordDataInTbe;
1005 dwt_writeDmaDataFromTBE;
1006 ld_queueMemoryDmaWriteFromTbe;
1007 ut_unsetReissueTimer;
1008 k_popIncomingResponseQueue;
1009 }
1010
1011 transition(O_DW, Ack_All_Tokens, O_DW_W) {
1012 f_incrementTokens;
1013 dwt_writeDmaDataFromTBE;
1014 ld_queueMemoryDmaWriteFromTbe;
1015 ut_unsetReissueTimer;
1016 k_popIncomingResponseQueue;
1017 }
1018
1019 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
1020 f_incrementTokens;
1021 cd_writeCleanDataToTbe;
1022 dwt_writeDmaDataFromTBE;
1023 ld_queueMemoryDmaWriteFromTbe;
1024 ut_unsetReissueTimer;
1025 k_popIncomingResponseQueue;
1026 }
1027
1028 transition(O_DW_W, Memory_Ack, O) {
1029 da_sendDmaAck;
1030 s_deallocateTBE;
1031 l_popMemQueue;
1032 }
1033
1034 //
1035 // Trans. from NO
1036 // The direcotry does not have valid data, but may have some tokens
1037 //
1038 transition(NO, GETX) {
1039 a_sendTokens;
1040 j_popIncomingRequestQueue;
1041 }
1042
1043 transition(NO, DMA_WRITE, NO_DW) {
1044 vd_allocateDmaRequestInTBE;
1045 bw_broadcastWrite;
1046 st_scheduleTimeout;
1047 p_popDmaRequestQueue;
1048 }
1049
1050 transition(NO, GETS) {
1051 j_popIncomingRequestQueue;
1052 }
1053
1054 transition(NO, DMA_READ, NO_DR) {
1055 vd_allocateDmaRequestInTBE;
1056 br_broadcastRead;
1057 st_scheduleTimeout;
1058 p_popDmaRequestQueue;
1059 }
1060
1061 transition(NO, Lockdown, L) {
1062 aa_sendTokensToStarver;
1063 l_popIncomingPersistentQueue;
1064 }
1065
1066 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1067 m_writeDataToMemory;
1068 f_incrementTokens;
1069 lq_queueMemoryWbRequest;
1070 k_popIncomingResponseQueue;
1071 }
1072
1073 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1074 n_checkData;
1075 f_incrementTokens;
1076 k_popIncomingResponseQueue;
1077 }
1078
1079 transition(NO, Tokens) {
1080 f_incrementTokens;
1081 k_popIncomingResponseQueue;
1082 }
1083
1084 transition(NO_W, Memory_Data, NO) {
1085 d_sendMemoryDataWithAllTokens;
1086 l_popMemQueue;
1087 }
1088
1089 // Trans. from NO_DW
1090 transition(NO_DW, Request_Timeout) {
1091 ut_unsetReissueTimer;
1092 px_tryIssuingPersistentGETXRequest;
1093 }
1094
1095 transition(NO_DW, Lockdown, DW_L) {
1096 aa_sendTokensToStarver;
1097 l_popIncomingPersistentQueue;
1098 }
1099
1100 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1101 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1102 // directory does not have valid data
1103
1104 transition(NO_DW, Data_Owner, O_DW) {
1105 f_incrementTokens;
1106 rd_recordDataInTbe;
1107 k_popIncomingResponseQueue;
1108 }
1109
1110 transition({NO_DW, NO_DR}, Tokens) {
1111 f_incrementTokens;
1112 k_popIncomingResponseQueue;
1113 }
1114
1115 // Trans. from NO_DR
1116 transition(NO_DR, Request_Timeout) {
1117 ut_unsetReissueTimer;
1118 ps_tryIssuingPersistentGETSRequest;
1119 }
1120
1121 transition(NO_DR, Lockdown, DR_L) {
1122 aa_sendTokensToStarver;
1123 l_popIncomingPersistentQueue;
1124 }
1125
1126 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1127 m_writeDataToMemory;
1128 f_incrementTokens;
1129 dd_sendDmaData;
1130 lr_queueMemoryDmaReadWriteback;
1131 ut_unsetReissueTimer;
1132 s_deallocateTBE;
1133 k_popIncomingResponseQueue;
1134 }
1135
1136 // Trans. from L
1137 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1138 j_popIncomingRequestQueue;
1139 }
1140
1141 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1142 l_popIncomingPersistentQueue;
1143 }
1144
1145 //
1146 // Received data for lockdown blocks
1147 // For blocks with outstanding dma requests to them
1148 // ...we could change this to write the data to memory and send it cleanly
1149 // ...we could also proactively complete our DMA requests
1150 // However, to keep my mind from spinning out-of-control, we won't for now :)
1151 //
1152 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1153 r_bounceResponse;
1154 k_popIncomingResponseQueue;
1155 }
1156
1157 transition({DW_L, DR_L, L}, Tokens) {
1158 r_bounceResponse;
1159 k_popIncomingResponseQueue;
1160 }
1161
1162 transition({DW_L, DR_L, L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1163 bd_bounceDatalessOwnerToken;
1164 k_popIncomingResponseQueue;
1165 }
1166
1167 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1168 l_popIncomingPersistentQueue;
1169 }
1170
1171 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1172 l_popIncomingPersistentQueue;
1173 }
1174
1175 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1176 dd_sendMemDataToStarver;
1177 l_popMemQueue;
1178 }
1179
1180 transition(L_O_W, Memory_Ack) {
1181 qp_queueMemoryForPersistent;
1182 l_popMemQueue;
1183 }
1184
1185 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1186 l_popIncomingPersistentQueue;
1187 }
1188
1189 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1190 l_popIncomingPersistentQueue;
1191 }
1192
1193 transition(DR_L_W, Memory_Data, DR_L) {
1194 dd_sendMemDataToStarver;
1195 l_popMemQueue;
1196 }
1197
1198 transition(DW_L_W, Memory_Ack, L) {
1199 aat_assertAllTokens;
1200 da_sendDmaAck;
1201 s_deallocateTBE;
1202 dd_sendMemDataToStarver;
1203 l_popMemQueue;
1204 }
1205
1206 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1207 l_popIncomingPersistentQueue;
1208 }
1209
1210 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1211 l_popIncomingPersistentQueue;
1212 }
1213
1214 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1215 l_popIncomingPersistentQueue;
1216 }
1217
1218 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1219 ut_unsetReissueTimer;
1220 px_tryIssuingPersistentGETXRequest;
1221 }
1222
1223 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1224 l_popIncomingPersistentQueue;
1225 }
1226
1227 transition(DR_L, Request_Timeout) {
1228 ut_unsetReissueTimer;
1229 ps_tryIssuingPersistentGETSRequest;
1230 }
1231
1232 //
1233 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1234 // presistent request is issued and resolve before memory returns with data
1235 //
1236 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1237 l_popMemQueue;
1238 }
1239
1240 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1241 l_popIncomingPersistentQueue;
1242 }
1243
1244 // Blocked states
1245 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1246 z_recycleRequest;
1247 }
1248
1249 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1250 y_recycleDmaRequestQueue;
1251 }
1252
1253 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1254 kz_recycleResponse;
1255 }
1256
1257 //
1258 // If we receive a request timeout while waiting for memory, it is likely that
1259 // the request will be satisfied and issuing a presistent request will do us
1260 // no good. Just wait.
1261 //
1262 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1263 rs_resetScheduleTimeout;
1264 }
1265
1266 transition(NO_W, Lockdown, L_NO_W) {
1267 l_popIncomingPersistentQueue;
1268 }
1269
1270 transition(O_W, Lockdown, L_O_W) {
1271 l_popIncomingPersistentQueue;
1272 }
1273
1274 transition(O_DR_W, Lockdown, DR_L_W) {
1275 l_popIncomingPersistentQueue;
1276 }
1277
1278 transition(O_DW_W, Lockdown, DW_L_W) {
1279 l_popIncomingPersistentQueue;
1280 }
1281
1282 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1283 l_popIncomingPersistentQueue;
1284 }
1285 }