mem-ruby: Replace SLICC queueMemory calls with enqueue
[gem5.git] / src / mem / ruby / protocol / MOESI_CMP_token-dir.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:Directory, "Token protocol")
30 : DirectoryMemory * directory;
31 int l2_select_num_bits;
32 Cycles directory_latency := 5;
33 bool distributed_persistent := "True";
34 Cycles fixed_timeout_latency := 100;
35 Cycles reissue_wakeup_latency := 10;
36 Cycles to_memory_controller_latency := 1;
37
38 // Message Queues from dir to other controllers / network
39 MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
40 vnet_type="response";
41
42 MessageBuffer * responseFromDir, network="To", virtual_network="4",
43 vnet_type="response";
44
45 MessageBuffer * persistentFromDir, network="To", virtual_network="3",
46 vnet_type="persistent";
47
48 MessageBuffer * requestFromDir, network="To", virtual_network="1",
49 vnet_type="request";
50
51 // Message Queues to dir from other controllers / network
52 MessageBuffer * responseToDir, network="From", virtual_network="4",
53 vnet_type="response";
54
55 MessageBuffer * persistentToDir, network="From", virtual_network="3",
56 vnet_type="persistent";
57
58 MessageBuffer * requestToDir, network="From", virtual_network="2",
59 vnet_type="request";
60
61 MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
62 vnet_type="request";
63
64 MessageBuffer * requestToMemory;
65 MessageBuffer * responseFromMemory;
66 {
67 // STATES
68 state_declaration(State, desc="Directory states", default="Directory_State_O") {
69 // Base states
70 O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
71 NO, AccessPermission:Maybe_Stale, desc="Not Owner";
72 L, AccessPermission:Busy, desc="Locked";
73
74 // Memory wait states - can block all messages including persistent requests
75 O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
76 L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
77 L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
78 DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
79 DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
80 NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
81 O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
82 O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
83
84 // DMA request transient states - must respond to persistent requests
85 O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
86 NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
87 NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
88
89 // DMA request in progress - competing with a CPU persistent request
90 DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
91 DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
92
93 }
94
95 // Events
96 enumeration(Event, desc="Directory events") {
97 GETX, desc="A GETX arrives";
98 GETS, desc="A GETS arrives";
99 Lockdown, desc="A lockdown request arrives";
100 Unlockdown, desc="An un-lockdown request arrives";
101 Own_Lock_or_Unlock, desc="own lock or unlock";
102 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
103 Data_Owner, desc="Data arrive";
104 Data_All_Tokens, desc="Data and all tokens";
105 Ack_Owner, desc="Owner token arrived without data because it was clean";
106 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
107 Tokens, desc="Tokens arrive";
108 Ack_All_Tokens, desc="All_Tokens arrive";
109 Request_Timeout, desc="A DMA request has timed out";
110
111 // Memory Controller
112 Memory_Data, desc="Fetched data from memory arrives";
113 Memory_Ack, desc="Writeback Ack from memory arrives";
114
115 // DMA requests
116 DMA_READ, desc="A DMA Read memory request";
117 DMA_WRITE, desc="A DMA Write memory request";
118 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
119 }
120
121 // TYPES
122
123 // DirectoryEntry
124 structure(Entry, desc="...", interface="AbstractCacheEntry", main="false") {
125 State DirectoryState, desc="Directory state";
126 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
127
128 // The following state is provided to allow for bandwidth
129 // efficient directory-like operation. However all of this state
130 // is 'soft state' that does not need to be correct (as long as
131 // you're eventually willing to resort to broadcast.)
132
133 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
134 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
135 }
136
137 structure(PersistentTable, external="yes") {
138 void persistentRequestLock(Addr, MachineID, AccessType);
139 void persistentRequestUnlock(Addr, MachineID);
140 bool okToIssueStarving(Addr, MachineID);
141 MachineID findSmallest(Addr);
142 AccessType typeOfSmallest(Addr);
143 void markEntries(Addr);
144 bool isLocked(Addr);
145 int countStarvingForAddress(Addr);
146 int countReadStarvingForAddress(Addr);
147 }
148
149 // TBE entries for DMA requests
150 structure(TBE, desc="TBE entries for outstanding DMA requests") {
151 Addr PhysicalAddress, desc="physical address";
152 State TBEState, desc="Transient State";
153 DataBlock DataBlk, desc="Current view of the associated address range";
154 int Len, desc="...";
155 MachineID DmaRequestor, desc="DMA requestor";
156 bool WentPersistent, desc="Did the DMA request require a persistent request";
157 }
158
159 structure(TBETable, external="yes") {
160 TBE lookup(Addr);
161 void allocate(Addr);
162 void deallocate(Addr);
163 bool isPresent(Addr);
164 }
165
166 // ** OBJECTS **
167
168 PersistentTable persistentTable;
169 TimerTable reissueTimerTable;
170
171 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
172
173 bool starving, default="false";
174 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
175
176 Tick clockEdge();
177 Tick clockEdge(Cycles c);
178 Tick cyclesToTicks(Cycles c);
179 void set_tbe(TBE b);
180 void unset_tbe();
181 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
182
183 Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
184 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
185
186 if (is_valid(dir_entry)) {
187 return dir_entry;
188 }
189
190 dir_entry := static_cast(Entry, "pointer",
191 directory.allocate(addr, new Entry));
192 return dir_entry;
193 }
194
195 State getState(TBE tbe, Addr addr) {
196 if (is_valid(tbe)) {
197 return tbe.TBEState;
198 } else {
199 return getDirectoryEntry(addr).DirectoryState;
200 }
201 }
202
203 void setState(TBE tbe, Addr addr, State state) {
204 if (is_valid(tbe)) {
205 tbe.TBEState := state;
206 }
207 getDirectoryEntry(addr).DirectoryState := state;
208
209 if (state == State:L || state == State:DW_L || state == State:DR_L) {
210 assert(getDirectoryEntry(addr).Tokens == 0);
211 }
212
213 // We have one or zero owners
214 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
215
216 // Make sure the token count is in range
217 assert(getDirectoryEntry(addr).Tokens >= 0);
218 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
219
220 if (state == State:O || state == State:O_W || state == State:O_DW) {
221 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
222 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
223 }
224 }
225
226 AccessPermission getAccessPermission(Addr addr) {
227 TBE tbe := TBEs[addr];
228 if(is_valid(tbe)) {
229 return Directory_State_to_permission(tbe.TBEState);
230 }
231
232 if (directory.isPresent(addr)) {
233 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
234 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
235 }
236
237 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
238 return AccessPermission:NotPresent;
239 }
240
241 void setAccessPermission(Addr addr, State state) {
242 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
243 }
244
245 bool okToIssueStarving(Addr addr, MachineID machinID) {
246 return persistentTable.okToIssueStarving(addr, machineID);
247 }
248
249 void markPersistentEntries(Addr addr) {
250 persistentTable.markEntries(addr);
251 }
252
253 void functionalRead(Addr addr, Packet *pkt) {
254 TBE tbe := TBEs[addr];
255 if(is_valid(tbe)) {
256 testAndRead(addr, tbe.DataBlk, pkt);
257 } else {
258 functionalMemoryRead(pkt);
259 }
260 }
261
262 int functionalWrite(Addr addr, Packet *pkt) {
263 int num_functional_writes := 0;
264
265 TBE tbe := TBEs[addr];
266 if(is_valid(tbe)) {
267 num_functional_writes := num_functional_writes +
268 testAndWrite(addr, tbe.DataBlk, pkt);
269 }
270
271 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
272 return num_functional_writes;
273 }
274
275 // ** OUT_PORTS **
276 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
277 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
278 out_port(requestNetwork_out, RequestMsg, requestFromDir);
279 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
280 out_port(memQueue_out, MemoryMsg, requestToMemory);
281
282 // ** IN_PORTS **
283 // off-chip memory request/response is done
284 in_port(memQueue_in, MemoryMsg, responseFromMemory) {
285 if (memQueue_in.isReady(clockEdge())) {
286 peek(memQueue_in, MemoryMsg) {
287 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
288 trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
289 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
290 trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
291 } else {
292 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
293 error("Invalid message");
294 }
295 }
296 }
297 }
298
299 // Reissue Timer
300 in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
301 Tick current_time := clockEdge();
302 if (reissueTimerTable_in.isReady(current_time)) {
303 Addr addr := reissueTimerTable.nextAddress();
304 trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
305 }
306 }
307
308 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
309 if (responseNetwork_in.isReady(clockEdge())) {
310 peek(responseNetwork_in, ResponseMsg) {
311 assert(in_msg.Destination.isElement(machineID));
312 if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
313 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
314 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
315 trigger(Event:Data_All_Tokens, in_msg.addr,
316 TBEs[in_msg.addr]);
317 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
318 trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
319 TBEs[in_msg.addr]);
320 } else if (in_msg.Type == CoherenceResponseType:ACK) {
321 trigger(Event:Ack_All_Tokens, in_msg.addr,
322 TBEs[in_msg.addr]);
323 } else {
324 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
325 error("Invalid message");
326 }
327 } else {
328 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
329 trigger(Event:Data_Owner, in_msg.addr,
330 TBEs[in_msg.addr]);
331 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
332 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
333 trigger(Event:Tokens, in_msg.addr,
334 TBEs[in_msg.addr]);
335 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
336 trigger(Event:Ack_Owner, in_msg.addr,
337 TBEs[in_msg.addr]);
338 } else {
339 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
340 error("Invalid message");
341 }
342 }
343 }
344 }
345 }
346
347 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
348 if (persistentNetwork_in.isReady(clockEdge())) {
349 peek(persistentNetwork_in, PersistentMsg) {
350 assert(in_msg.Destination.isElement(machineID));
351
352 if (distributed_persistent) {
353 // Apply the lockdown or unlockdown message to the table
354 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
355 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
356 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
357 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
358 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
359 persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
360 } else {
361 error("Invalid message");
362 }
363
364 // React to the message based on the current state of the table
365 if (persistentTable.isLocked(in_msg.addr)) {
366 if (persistentTable.findSmallest(in_msg.addr) == machineID) {
367 if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
368 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
369 TBEs[in_msg.addr]);
370 } else {
371 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
372 TBEs[in_msg.addr]);
373 }
374 } else {
375 // locked
376 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
377 }
378 } else {
379 // unlocked
380 trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
381 }
382 }
383 else {
384 if (persistentTable.findSmallest(in_msg.addr) == machineID) {
385 if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
386 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
387 TBEs[in_msg.addr]);
388 } else {
389 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
390 TBEs[in_msg.addr]);
391 }
392 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
393 // locked
394 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
395 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
396 // locked
397 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
398 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
399 // unlocked
400 trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
401 } else {
402 error("Invalid message");
403 }
404 }
405 }
406 }
407 }
408
409 in_port(requestNetwork_in, RequestMsg, requestToDir) {
410 if (requestNetwork_in.isReady(clockEdge())) {
411 peek(requestNetwork_in, RequestMsg) {
412 assert(in_msg.Destination.isElement(machineID));
413 if (in_msg.Type == CoherenceRequestType:GETS) {
414 trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
415 } else if (in_msg.Type == CoherenceRequestType:GETX) {
416 trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
417 } else {
418 error("Invalid message");
419 }
420 }
421 }
422 }
423
424 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
425 if (dmaRequestQueue_in.isReady(clockEdge())) {
426 peek(dmaRequestQueue_in, DMARequestMsg) {
427 if (in_msg.Type == DMARequestType:READ) {
428 trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
429 } else if (in_msg.Type == DMARequestType:WRITE) {
430 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
431 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
432 TBEs[in_msg.LineAddress]);
433 } else {
434 trigger(Event:DMA_WRITE, in_msg.LineAddress,
435 TBEs[in_msg.LineAddress]);
436 }
437 } else {
438 error("Invalid message");
439 }
440 }
441 }
442 }
443
444 // Actions
445
446 action(a_sendTokens, "a", desc="Send tokens to requestor") {
447 // Only send a message if we have tokens to send
448 if (getDirectoryEntry(address).Tokens > 0) {
449 peek(requestNetwork_in, RequestMsg) {
450 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
451 out_msg.addr := address;
452 out_msg.Type := CoherenceResponseType:ACK;
453 out_msg.Sender := machineID;
454 out_msg.Destination.add(in_msg.Requestor);
455 out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
456 out_msg.MessageSize := MessageSizeType:Response_Control;
457 }
458 }
459 getDirectoryEntry(address).Tokens := 0;
460 }
461 }
462
463 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
464 if (okToIssueStarving(address, machineID) && (starving == false)) {
465 enqueue(persistentNetwork_out, PersistentMsg, 1) {
466 out_msg.addr := address;
467 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
468 out_msg.Requestor := machineID;
469 out_msg.Destination.broadcast(MachineType:L1Cache);
470
471 //
472 // Currently the configuration system limits the system to only one
473 // chip. Therefore, if we assume one shared L2 cache, then only one
474 // pertinent L2 cache exist.
475 //
476 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
477
478 out_msg.Destination.add(mapAddressToRange(address,
479 MachineType:L2Cache, l2_select_low_bit,
480 l2_select_num_bits, intToID(0)));
481
482 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
483 out_msg.MessageSize := MessageSizeType:Persistent_Control;
484 out_msg.Prefetch := PrefetchBit:No;
485 out_msg.AccessMode := RubyAccessMode:Supervisor;
486 }
487 markPersistentEntries(address);
488 starving := true;
489
490 tbe.WentPersistent := true;
491
492 // Do not schedule a wakeup, a persistent requests will always complete
493 } else {
494
495 // We'd like to issue a persistent request, but are not allowed
496 // to issue a P.R. right now. This, we do not increment the
497 // IssueCount.
498
499 // Set a wakeup timer
500 reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
501 }
502 }
503
504 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
505 peek(dmaRequestQueue_in, DMARequestMsg) {
506 //
507 // Assser that we only send message if we don't already have all the tokens
508 //
509 assert(getDirectoryEntry(address).Tokens != max_tokens());
510 enqueue(requestNetwork_out, RequestMsg, 1) {
511 out_msg.addr := address;
512 out_msg.Type := CoherenceRequestType:GETX;
513 out_msg.Requestor := machineID;
514
515 //
516 // Since only one chip, assuming all L1 caches are local
517 //
518 out_msg.Destination.broadcast(MachineType:L1Cache);
519 out_msg.Destination.add(mapAddressToRange(address,
520 MachineType:L2Cache, l2_select_low_bit,
521 l2_select_num_bits, intToID(0)));
522
523 out_msg.RetryNum := 0;
524 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
525 out_msg.Prefetch := PrefetchBit:No;
526 out_msg.AccessMode := RubyAccessMode:Supervisor;
527 }
528 }
529 }
530
531 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
532 if (okToIssueStarving(address, machineID) && (starving == false)) {
533 enqueue(persistentNetwork_out, PersistentMsg, 1) {
534 out_msg.addr := address;
535 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
536 out_msg.Requestor := machineID;
537 out_msg.Destination.broadcast(MachineType:L1Cache);
538
539 //
540 // Currently the configuration system limits the system to only one
541 // chip. Therefore, if we assume one shared L2 cache, then only one
542 // pertinent L2 cache exist.
543 //
544 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
545
546 out_msg.Destination.add(mapAddressToRange(address,
547 MachineType:L2Cache, l2_select_low_bit,
548 l2_select_num_bits, intToID(0)));
549
550 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
551 out_msg.MessageSize := MessageSizeType:Persistent_Control;
552 out_msg.Prefetch := PrefetchBit:No;
553 out_msg.AccessMode := RubyAccessMode:Supervisor;
554 }
555 markPersistentEntries(address);
556 starving := true;
557
558 tbe.WentPersistent := true;
559
560 // Do not schedule a wakeup, a persistent requests will always complete
561 } else {
562
563 // We'd like to issue a persistent request, but are not allowed
564 // to issue a P.R. right now. This, we do not increment the
565 // IssueCount.
566
567 // Set a wakeup timer
568 reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
569 }
570 }
571
572 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
573 peek(dmaRequestQueue_in, DMARequestMsg) {
574 enqueue(requestNetwork_out, RequestMsg, 1) {
575 out_msg.addr := address;
576 out_msg.Type := CoherenceRequestType:GETS;
577 out_msg.Requestor := machineID;
578
579 //
580 // Since only one chip, assuming all L1 caches are local
581 //
582 out_msg.Destination.broadcast(MachineType:L1Cache);
583 out_msg.Destination.add(mapAddressToRange(address,
584 MachineType:L2Cache, l2_select_low_bit,
585 l2_select_num_bits, intToID(0)));
586
587 out_msg.RetryNum := 0;
588 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
589 out_msg.Prefetch := PrefetchBit:No;
590 out_msg.AccessMode := RubyAccessMode:Supervisor;
591 }
592 }
593 }
594
595 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
596 // Only send a message if we have tokens to send
597 if (getDirectoryEntry(address).Tokens > 0) {
598 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
599 out_msg.addr := address;
600 out_msg.Type := CoherenceResponseType:ACK;
601 out_msg.Sender := machineID;
602 out_msg.Destination.add(persistentTable.findSmallest(address));
603 out_msg.Tokens := getDirectoryEntry(address).Tokens;
604 out_msg.MessageSize := MessageSizeType:Response_Control;
605 }
606 getDirectoryEntry(address).Tokens := 0;
607 }
608 }
609
610 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
611 peek(memQueue_in, MemoryMsg) {
612 enqueue(responseNetwork_out, ResponseMsg, 1) {
613 out_msg.addr := address;
614 out_msg.Type := CoherenceResponseType:DATA_OWNER;
615 out_msg.Sender := machineID;
616 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
617 assert(getDirectoryEntry(address).Tokens > 0);
618 out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
619 out_msg.DataBlk := in_msg.DataBlk;
620 out_msg.Dirty := false;
621 out_msg.MessageSize := MessageSizeType:Response_Data;
622 }
623 }
624 getDirectoryEntry(address).Tokens := 0;
625 }
626
627 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
628 peek(memQueue_in, MemoryMsg) {
629 enqueue(responseNetwork_out, ResponseMsg, 1) {
630 out_msg.addr := address;
631 out_msg.Type := CoherenceResponseType:DATA_OWNER;
632 out_msg.Sender := machineID;
633 out_msg.Destination.add(persistentTable.findSmallest(address));
634 assert(getDirectoryEntry(address).Tokens > 0);
635 out_msg.Tokens := getDirectoryEntry(address).Tokens;
636 out_msg.DataBlk := in_msg.DataBlk;
637 out_msg.Dirty := false;
638 out_msg.MessageSize := MessageSizeType:Response_Data;
639 }
640 }
641 getDirectoryEntry(address).Tokens := 0;
642 }
643
644 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
645 enqueue(responseNetwork_out, ResponseMsg, 1) {
646 out_msg.addr := address;
647 out_msg.Type := CoherenceResponseType:DATA_OWNER;
648 out_msg.Sender := machineID;
649 out_msg.Destination.add(persistentTable.findSmallest(address));
650 assert(getDirectoryEntry(address).Tokens > 0);
651 out_msg.Tokens := getDirectoryEntry(address).Tokens;
652 out_msg.DataBlk := tbe.DataBlk;
653 out_msg.Dirty := false;
654 out_msg.MessageSize := MessageSizeType:Response_Data;
655 }
656 getDirectoryEntry(address).Tokens := 0;
657 }
658
659 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
660 peek(requestNetwork_in, RequestMsg) {
661 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
662 out_msg.addr := address;
663 out_msg.Type := MemoryRequestType:MEMORY_READ;
664 out_msg.Sender := in_msg.Requestor;
665 out_msg.MessageSize := MessageSizeType:Request_Control;
666 out_msg.Len := 0;
667 }
668 }
669 }
670
671 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
672 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
673 out_msg.addr := address;
674 out_msg.Type := MemoryRequestType:MEMORY_READ;
675 out_msg.Sender := persistentTable.findSmallest(address);
676 out_msg.MessageSize := MessageSizeType:Request_Control;
677 out_msg.Len := 0;
678 }
679 }
680
681 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
682 peek(dmaRequestQueue_in, DMARequestMsg) {
683 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
684 out_msg.addr := address;
685 out_msg.Type := MemoryRequestType:MEMORY_READ;
686 out_msg.Sender := in_msg.Requestor;
687 out_msg.MessageSize := MessageSizeType:Request_Control;
688 out_msg.Len := 0;
689 }
690 }
691 }
692
693 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
694 peek(responseNetwork_in, ResponseMsg) {
695 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
696 out_msg.addr := address;
697 out_msg.Type := MemoryRequestType:MEMORY_WB;
698 out_msg.Sender := in_msg.Sender;
699 out_msg.MessageSize := MessageSizeType:Writeback_Data;
700 out_msg.DataBlk := in_msg.DataBlk;
701 out_msg.Len := 0;
702 }
703 }
704 }
705
706 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
707 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
708 out_msg.addr := address;
709 out_msg.Type := MemoryRequestType:MEMORY_WB;
710 out_msg.Sender := tbe.DmaRequestor;
711 out_msg.MessageSize := MessageSizeType:Writeback_Data;
712 out_msg.DataBlk := tbe.DataBlk;
713 out_msg.Len := tbe.Len;
714 }
715 }
716
717 action(lr_queueMemoryDmaReadWriteback, "lr",
718 desc="Write DMA data from read to memory") {
719 peek(responseNetwork_in, ResponseMsg) {
720 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
721 out_msg.addr := address;
722 out_msg.Type := MemoryRequestType:MEMORY_WB;
723 out_msg.Sender := machineID;
724 out_msg.MessageSize := MessageSizeType:Writeback_Data;
725 out_msg.DataBlk := in_msg.DataBlk;
726 out_msg.Len := 0;
727 }
728 }
729 }
730
731 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
732 peek(dmaRequestQueue_in, DMARequestMsg) {
733 TBEs.allocate(address);
734 set_tbe(TBEs[address]);
735 tbe.DataBlk := in_msg.DataBlk;
736 tbe.PhysicalAddress := in_msg.PhysicalAddress;
737 tbe.Len := in_msg.Len;
738 tbe.DmaRequestor := in_msg.Requestor;
739 tbe.WentPersistent := false;
740 }
741 }
742
743 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
744
745 if (tbe.WentPersistent) {
746 assert(starving);
747
748 enqueue(persistentNetwork_out, PersistentMsg, 1) {
749 out_msg.addr := address;
750 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
751 out_msg.Requestor := machineID;
752 out_msg.Destination.broadcast(MachineType:L1Cache);
753
754 //
755 // Currently the configuration system limits the system to only one
756 // chip. Therefore, if we assume one shared L2 cache, then only one
757 // pertinent L2 cache exist.
758 //
759 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
760
761 out_msg.Destination.add(mapAddressToRange(address,
762 MachineType:L2Cache, l2_select_low_bit,
763 l2_select_num_bits, intToID(0)));
764
765 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
766 out_msg.MessageSize := MessageSizeType:Persistent_Control;
767 }
768 starving := false;
769 }
770
771 TBEs.deallocate(address);
772 unset_tbe();
773 }
774
775 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
776 peek(responseNetwork_in, ResponseMsg) {
777 DataBlock DataBlk := tbe.DataBlk;
778 tbe.DataBlk := in_msg.DataBlk;
779 tbe.DataBlk.copyPartial(DataBlk, getOffset(tbe.PhysicalAddress),
780 tbe.Len);
781 }
782 }
783
784 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
785 peek(responseNetwork_in, ResponseMsg) {
786 assert(in_msg.Tokens >= 1);
787 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
788 }
789 }
790
791 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
792 assert(getDirectoryEntry(address).Tokens == max_tokens());
793 }
794
795 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
796 requestNetwork_in.dequeue(clockEdge());
797 }
798
799 action(z_recycleRequest, "z", desc="Recycle the request queue") {
800 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
801 }
802
803 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
804 responseNetwork_in.dequeue(clockEdge());
805 }
806
807 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
808 responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
809 }
810
811 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
812 persistentNetwork_in.dequeue(clockEdge());
813 }
814
815 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
816 dmaRequestQueue_in.dequeue(clockEdge());
817 }
818
819 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
820 dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
821 }
822
823 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
824 memQueue_in.dequeue(clockEdge());
825 }
826
827 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
828 peek(responseNetwork_in, ResponseMsg) {
829 enqueue(responseNetwork_out, ResponseMsg, 1) {
830 out_msg.addr := address;
831 out_msg.Type := in_msg.Type;
832 out_msg.Sender := machineID;
833 out_msg.Destination.add(persistentTable.findSmallest(address));
834 out_msg.Tokens := in_msg.Tokens;
835 out_msg.MessageSize := in_msg.MessageSize;
836 out_msg.DataBlk := in_msg.DataBlk;
837 out_msg.Dirty := in_msg.Dirty;
838 }
839 }
840 }
841
842 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
843 //
844 // currently only support a fixed timeout latency
845 //
846 if (reissueTimerTable.isSet(address)) {
847 reissueTimerTable.unset(address);
848 reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
849 }
850 }
851
852 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
853 //
854 // currently only support a fixed timeout latency
855 //
856 reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
857 }
858
859 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
860 if (reissueTimerTable.isSet(address)) {
861 reissueTimerTable.unset(address);
862 }
863 }
864
865 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
866 peek(responseNetwork_in, ResponseMsg) {
867 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
868 assert(in_msg.Dirty == false);
869 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
870
871 // Bounce the message, but "re-associate" the data and the owner
872 // token. In essence we're converting an ACK_OWNER message to a
873 // DATA_OWNER message, keeping the number of tokens the same.
874 enqueue(responseNetwork_out, ResponseMsg, 1) {
875 out_msg.addr := address;
876 out_msg.Type := CoherenceResponseType:DATA_OWNER;
877 out_msg.Sender := machineID;
878 out_msg.Destination.add(persistentTable.findSmallest(address));
879 out_msg.Tokens := in_msg.Tokens;
880 out_msg.Dirty := in_msg.Dirty;
881 out_msg.MessageSize := MessageSizeType:Response_Data;
882 }
883 }
884 }
885
886 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
887 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
888 out_msg.PhysicalAddress := address;
889 out_msg.LineAddress := address;
890 out_msg.Type := DMAResponseType:ACK;
891 out_msg.Destination.add(tbe.DmaRequestor);
892 out_msg.MessageSize := MessageSizeType:Writeback_Control;
893 }
894 }
895
896 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
897 peek(memQueue_in, MemoryMsg) {
898 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
899 out_msg.PhysicalAddress := address;
900 out_msg.LineAddress := address;
901 out_msg.Type := DMAResponseType:DATA;
902 //
903 // we send the entire data block and rely on the dma controller to
904 // split it up if need be
905 //
906 out_msg.DataBlk := in_msg.DataBlk;
907 out_msg.Destination.add(tbe.DmaRequestor);
908 out_msg.MessageSize := MessageSizeType:Response_Data;
909 }
910 }
911 }
912
913 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
914 peek(responseNetwork_in, ResponseMsg) {
915 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
916 out_msg.PhysicalAddress := address;
917 out_msg.LineAddress := address;
918 out_msg.Type := DMAResponseType:DATA;
919 //
920 // we send the entire data block and rely on the dma controller to
921 // split it up if need be
922 //
923 out_msg.DataBlk := in_msg.DataBlk;
924 out_msg.Destination.add(tbe.DmaRequestor);
925 out_msg.MessageSize := MessageSizeType:Response_Data;
926 }
927 }
928 }
929
930 // TRANSITIONS
931
932 //
933 // Trans. from base state O
934 // the directory has valid data
935 //
936 transition(O, GETX, NO_W) {
937 qf_queueMemoryFetchRequest;
938 j_popIncomingRequestQueue;
939 }
940
941 transition(O, DMA_WRITE, O_DW) {
942 vd_allocateDmaRequestInTBE;
943 bw_broadcastWrite;
944 st_scheduleTimeout;
945 p_popDmaRequestQueue;
946 }
947
948 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
949 vd_allocateDmaRequestInTBE;
950 ld_queueMemoryDmaWriteFromTbe;
951 p_popDmaRequestQueue;
952 }
953
954 transition(O, GETS, NO_W) {
955 qf_queueMemoryFetchRequest;
956 j_popIncomingRequestQueue;
957 }
958
959 transition(O, DMA_READ, O_DR_W) {
960 vd_allocateDmaRequestInTBE;
961 fd_memoryDma;
962 st_scheduleTimeout;
963 p_popDmaRequestQueue;
964 }
965
966 transition(O, Lockdown, L_O_W) {
967 qp_queueMemoryForPersistent;
968 l_popIncomingPersistentQueue;
969 }
970
971 transition(O, {Tokens, Ack_All_Tokens}) {
972 f_incrementTokens;
973 k_popIncomingResponseQueue;
974 }
975
976 transition(O, {Data_Owner, Data_All_Tokens}) {
977 f_incrementTokens;
978 k_popIncomingResponseQueue;
979 }
980
981 transition({O, NO}, Unlockdown) {
982 l_popIncomingPersistentQueue;
983 }
984
985 //
986 // transitioning to Owner, waiting for memory before DMA ack
987 // All other events should recycle/stall
988 //
989 transition(O_DR_W, Memory_Data, O) {
990 dm_sendMemoryDataToDma;
991 ut_unsetReissueTimer;
992 s_deallocateTBE;
993 l_popMemQueue;
994 }
995
996 //
997 // issued GETX for DMA write, waiting for all tokens
998 //
999 transition(O_DW, Request_Timeout) {
1000 ut_unsetReissueTimer;
1001 px_tryIssuingPersistentGETXRequest;
1002 }
1003
1004 transition(O_DW, Tokens) {
1005 f_incrementTokens;
1006 k_popIncomingResponseQueue;
1007 }
1008
1009 transition(O_DW, Data_Owner) {
1010 f_incrementTokens;
1011 rd_recordDataInTbe;
1012 k_popIncomingResponseQueue;
1013 }
1014
1015 transition(O_DW, Ack_Owner) {
1016 f_incrementTokens;
1017 k_popIncomingResponseQueue;
1018 }
1019
1020 transition(O_DW, Lockdown, DW_L) {
1021 de_sendTbeDataToStarver;
1022 l_popIncomingPersistentQueue;
1023 }
1024
1025 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
1026 f_incrementTokens;
1027 rd_recordDataInTbe;
1028 ld_queueMemoryDmaWriteFromTbe;
1029 ut_unsetReissueTimer;
1030 k_popIncomingResponseQueue;
1031 }
1032
1033 transition(O_DW, Ack_All_Tokens, O_DW_W) {
1034 f_incrementTokens;
1035 ld_queueMemoryDmaWriteFromTbe;
1036 ut_unsetReissueTimer;
1037 k_popIncomingResponseQueue;
1038 }
1039
1040 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
1041 f_incrementTokens;
1042 ld_queueMemoryDmaWriteFromTbe;
1043 ut_unsetReissueTimer;
1044 k_popIncomingResponseQueue;
1045 }
1046
1047 transition(O_DW_W, Memory_Ack, O) {
1048 da_sendDmaAck;
1049 s_deallocateTBE;
1050 l_popMemQueue;
1051 }
1052
1053 //
1054 // Trans. from NO
1055 // The direcotry does not have valid data, but may have some tokens
1056 //
1057 transition(NO, GETX) {
1058 a_sendTokens;
1059 j_popIncomingRequestQueue;
1060 }
1061
1062 transition(NO, DMA_WRITE, NO_DW) {
1063 vd_allocateDmaRequestInTBE;
1064 bw_broadcastWrite;
1065 st_scheduleTimeout;
1066 p_popDmaRequestQueue;
1067 }
1068
1069 transition(NO, GETS) {
1070 j_popIncomingRequestQueue;
1071 }
1072
1073 transition(NO, DMA_READ, NO_DR) {
1074 vd_allocateDmaRequestInTBE;
1075 br_broadcastRead;
1076 st_scheduleTimeout;
1077 p_popDmaRequestQueue;
1078 }
1079
1080 transition(NO, Lockdown, L) {
1081 aa_sendTokensToStarver;
1082 l_popIncomingPersistentQueue;
1083 }
1084
1085 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1086 f_incrementTokens;
1087 lq_queueMemoryWbRequest;
1088 k_popIncomingResponseQueue;
1089 }
1090
1091 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1092 f_incrementTokens;
1093 k_popIncomingResponseQueue;
1094 }
1095
1096 transition(NO, Tokens) {
1097 f_incrementTokens;
1098 k_popIncomingResponseQueue;
1099 }
1100
1101 transition(NO_W, Memory_Data, NO) {
1102 d_sendMemoryDataWithAllTokens;
1103 l_popMemQueue;
1104 }
1105
1106 // Trans. from NO_DW
1107 transition(NO_DW, Request_Timeout) {
1108 ut_unsetReissueTimer;
1109 px_tryIssuingPersistentGETXRequest;
1110 }
1111
1112 transition(NO_DW, Lockdown, DW_L) {
1113 aa_sendTokensToStarver;
1114 l_popIncomingPersistentQueue;
1115 }
1116
1117 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1118 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1119 // directory does not have valid data
1120
1121 transition(NO_DW, Data_Owner, O_DW) {
1122 f_incrementTokens;
1123 rd_recordDataInTbe;
1124 k_popIncomingResponseQueue;
1125 }
1126
1127 transition({NO_DW, NO_DR}, Tokens) {
1128 f_incrementTokens;
1129 k_popIncomingResponseQueue;
1130 }
1131
1132 // Trans. from NO_DR
1133 transition(NO_DR, Request_Timeout) {
1134 ut_unsetReissueTimer;
1135 ps_tryIssuingPersistentGETSRequest;
1136 }
1137
1138 transition(NO_DR, Lockdown, DR_L) {
1139 aa_sendTokensToStarver;
1140 l_popIncomingPersistentQueue;
1141 }
1142
1143 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1144 f_incrementTokens;
1145 dd_sendDmaData;
1146 lr_queueMemoryDmaReadWriteback;
1147 ut_unsetReissueTimer;
1148 s_deallocateTBE;
1149 k_popIncomingResponseQueue;
1150 }
1151
1152 // Trans. from L
1153 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1154 j_popIncomingRequestQueue;
1155 }
1156
1157 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1158 l_popIncomingPersistentQueue;
1159 }
1160
1161 //
1162 // Received data for lockdown blocks
1163 // For blocks with outstanding dma requests to them
1164 // ...we could change this to write the data to memory and send it cleanly
1165 // ...we could also proactively complete our DMA requests
1166 // However, to keep my mind from spinning out-of-control, we won't for now :)
1167 //
1168 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1169 r_bounceResponse;
1170 k_popIncomingResponseQueue;
1171 }
1172
1173 transition({DW_L, DR_L, L}, Tokens) {
1174 r_bounceResponse;
1175 k_popIncomingResponseQueue;
1176 }
1177
1178 transition({DW_L, DR_L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1179 bd_bounceDatalessOwnerToken;
1180 k_popIncomingResponseQueue;
1181 }
1182
1183 transition(L, {Ack_Owner_All_Tokens, Ack_Owner}, L_O_W) {
1184 f_incrementTokens;
1185 qp_queueMemoryForPersistent;
1186 k_popIncomingResponseQueue;
1187 }
1188
1189 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1190 l_popIncomingPersistentQueue;
1191 }
1192
1193 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1194 l_popIncomingPersistentQueue;
1195 }
1196
1197 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1198 dd_sendMemDataToStarver;
1199 l_popMemQueue;
1200 }
1201
1202 transition(L_O_W, Memory_Ack) {
1203 qp_queueMemoryForPersistent;
1204 l_popMemQueue;
1205 }
1206
1207 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1208 l_popIncomingPersistentQueue;
1209 }
1210
1211 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1212 l_popIncomingPersistentQueue;
1213 }
1214
1215 transition(DR_L_W, Memory_Data, DR_L) {
1216 dd_sendMemDataToStarver;
1217 l_popMemQueue;
1218 }
1219
1220 transition(DW_L_W, Memory_Ack, L) {
1221 aat_assertAllTokens;
1222 da_sendDmaAck;
1223 s_deallocateTBE;
1224 dd_sendMemDataToStarver;
1225 l_popMemQueue;
1226 }
1227
1228 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1229 l_popIncomingPersistentQueue;
1230 }
1231
1232 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1233 l_popIncomingPersistentQueue;
1234 }
1235
1236 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1237 l_popIncomingPersistentQueue;
1238 }
1239
1240 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1241 ut_unsetReissueTimer;
1242 px_tryIssuingPersistentGETXRequest;
1243 }
1244
1245 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1246 l_popIncomingPersistentQueue;
1247 }
1248
1249 transition(DR_L, Request_Timeout) {
1250 ut_unsetReissueTimer;
1251 ps_tryIssuingPersistentGETSRequest;
1252 }
1253
1254 //
1255 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1256 // presistent request is issued and resolve before memory returns with data
1257 //
1258 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1259 l_popMemQueue;
1260 }
1261
1262 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1263 l_popIncomingPersistentQueue;
1264 }
1265
1266 // Blocked states
1267 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1268 z_recycleRequest;
1269 }
1270
1271 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1272 y_recycleDmaRequestQueue;
1273 }
1274
1275 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1276 kz_recycleResponse;
1277 }
1278
1279 //
1280 // If we receive a request timeout while waiting for memory, it is likely that
1281 // the request will be satisfied and issuing a presistent request will do us
1282 // no good. Just wait.
1283 //
1284 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1285 rs_resetScheduleTimeout;
1286 }
1287
1288 transition(NO_W, Lockdown, L_NO_W) {
1289 l_popIncomingPersistentQueue;
1290 }
1291
1292 transition(O_W, Lockdown, L_O_W) {
1293 l_popIncomingPersistentQueue;
1294 }
1295
1296 transition(O_DR_W, Lockdown, DR_L_W) {
1297 l_popIncomingPersistentQueue;
1298 }
1299
1300 transition(O_DW_W, Lockdown, DW_L_W) {
1301 l_popIncomingPersistentQueue;
1302 }
1303
1304 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1305 l_popIncomingPersistentQueue;
1306 }
1307 }