mem-cache: Create an address aware TempCacheBlk
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:Directory, "Token protocol")
30 : DirectoryMemory * directory;
31 int l2_select_num_bits;
32 Cycles directory_latency := 5;
33 bool distributed_persistent := "True";
34 Cycles fixed_timeout_latency := 100;
35 Cycles reissue_wakeup_latency := 10;
36 Cycles to_memory_controller_latency := 1;
37
38 // Message Queues from dir to other controllers / network
39 MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
40 vnet_type="response";
41
42 MessageBuffer * responseFromDir, network="To", virtual_network="4",
43 vnet_type="response";
44
45 MessageBuffer * persistentFromDir, network="To", virtual_network="3",
46 vnet_type="persistent";
47
48 MessageBuffer * requestFromDir, network="To", virtual_network="1",
49 vnet_type="request";
50
51 // Message Queues to dir from other controllers / network
52 MessageBuffer * responseToDir, network="From", virtual_network="4",
53 vnet_type="response";
54
55 MessageBuffer * persistentToDir, network="From", virtual_network="3",
56 vnet_type="persistent";
57
58 MessageBuffer * requestToDir, network="From", virtual_network="2",
59 vnet_type="request";
60
61 MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
62 vnet_type="request";
63
64 MessageBuffer * responseFromMemory;
65 {
66 // STATES
67 state_declaration(State, desc="Directory states", default="Directory_State_O") {
68 // Base states
69 O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
70 NO, AccessPermission:Maybe_Stale, desc="Not Owner";
71 L, AccessPermission:Busy, desc="Locked";
72
73 // Memory wait states - can block all messages including persistent requests
74 O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
75 L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
76 L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
77 DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
78 DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
79 NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
80 O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
81 O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
82
83 // DMA request transient states - must respond to persistent requests
84 O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
85 NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
86 NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
87
88 // DMA request in progress - competing with a CPU persistent request
89 DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
90 DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
91
92 }
93
94 // Events
95 enumeration(Event, desc="Directory events") {
96 GETX, desc="A GETX arrives";
97 GETS, desc="A GETS arrives";
98 Lockdown, desc="A lockdown request arrives";
99 Unlockdown, desc="An un-lockdown request arrives";
100 Own_Lock_or_Unlock, desc="own lock or unlock";
101 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
102 Data_Owner, desc="Data arrive";
103 Data_All_Tokens, desc="Data and all tokens";
104 Ack_Owner, desc="Owner token arrived without data because it was clean";
105 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
106 Tokens, desc="Tokens arrive";
107 Ack_All_Tokens, desc="All_Tokens arrive";
108 Request_Timeout, desc="A DMA request has timed out";
109
110 // Memory Controller
111 Memory_Data, desc="Fetched data from memory arrives";
112 Memory_Ack, desc="Writeback Ack from memory arrives";
113
114 // DMA requests
115 DMA_READ, desc="A DMA Read memory request";
116 DMA_WRITE, desc="A DMA Write memory request";
117 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
118 }
119
120 // TYPES
121
122 // DirectoryEntry
123 structure(Entry, desc="...", interface="AbstractEntry") {
124 State DirectoryState, desc="Directory state";
125 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
126
127 // The following state is provided to allow for bandwidth
128 // efficient directory-like operation. However all of this state
129 // is 'soft state' that does not need to be correct (as long as
130 // you're eventually willing to resort to broadcast.)
131
132 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
133 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
134 }
135
136 structure(PersistentTable, external="yes") {
137 void persistentRequestLock(Addr, MachineID, AccessType);
138 void persistentRequestUnlock(Addr, MachineID);
139 bool okToIssueStarving(Addr, MachineID);
140 MachineID findSmallest(Addr);
141 AccessType typeOfSmallest(Addr);
142 void markEntries(Addr);
143 bool isLocked(Addr);
144 int countStarvingForAddress(Addr);
145 int countReadStarvingForAddress(Addr);
146 }
147
148 // TBE entries for DMA requests
149 structure(TBE, desc="TBE entries for outstanding DMA requests") {
150 Addr PhysicalAddress, desc="physical address";
151 State TBEState, desc="Transient State";
152 DataBlock DataBlk, desc="Current view of the associated address range";
153 int Len, desc="...";
154 MachineID DmaRequestor, desc="DMA requestor";
155 bool WentPersistent, desc="Did the DMA request require a persistent request";
156 }
157
158 structure(TBETable, external="yes") {
159 TBE lookup(Addr);
160 void allocate(Addr);
161 void deallocate(Addr);
162 bool isPresent(Addr);
163 }
164
165 // ** OBJECTS **
166
167 PersistentTable persistentTable;
168 TimerTable reissueTimerTable;
169
170 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
171
172 bool starving, default="false";
173 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
174
175 Tick clockEdge();
176 Tick clockEdge(Cycles c);
177 Tick cyclesToTicks(Cycles c);
178 void set_tbe(TBE b);
179 void unset_tbe();
180 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
181
182 Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
183 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
184
185 if (is_valid(dir_entry)) {
186 return dir_entry;
187 }
188
189 dir_entry := static_cast(Entry, "pointer",
190 directory.allocate(addr, new Entry));
191 return dir_entry;
192 }
193
194 State getState(TBE tbe, Addr addr) {
195 if (is_valid(tbe)) {
196 return tbe.TBEState;
197 } else {
198 return getDirectoryEntry(addr).DirectoryState;
199 }
200 }
201
202 void setState(TBE tbe, Addr addr, State state) {
203 if (is_valid(tbe)) {
204 tbe.TBEState := state;
205 }
206 getDirectoryEntry(addr).DirectoryState := state;
207
208 if (state == State:L || state == State:DW_L || state == State:DR_L) {
209 assert(getDirectoryEntry(addr).Tokens == 0);
210 }
211
212 // We have one or zero owners
213 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
214
215 // Make sure the token count is in range
216 assert(getDirectoryEntry(addr).Tokens >= 0);
217 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
218
219 if (state == State:O || state == State:O_W || state == State:O_DW) {
220 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
221 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
222 }
223 }
224
225 AccessPermission getAccessPermission(Addr addr) {
226 TBE tbe := TBEs[addr];
227 if(is_valid(tbe)) {
228 return Directory_State_to_permission(tbe.TBEState);
229 }
230
231 if (directory.isPresent(addr)) {
232 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
233 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
234 }
235
236 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
237 return AccessPermission:NotPresent;
238 }
239
240 void setAccessPermission(Addr addr, State state) {
241 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
242 }
243
244 bool okToIssueStarving(Addr addr, MachineID machinID) {
245 return persistentTable.okToIssueStarving(addr, machineID);
246 }
247
248 void markPersistentEntries(Addr addr) {
249 persistentTable.markEntries(addr);
250 }
251
252 void functionalRead(Addr addr, Packet *pkt) {
253 TBE tbe := TBEs[addr];
254 if(is_valid(tbe)) {
255 testAndRead(addr, tbe.DataBlk, pkt);
256 } else {
257 functionalMemoryRead(pkt);
258 }
259 }
260
261 int functionalWrite(Addr addr, Packet *pkt) {
262 int num_functional_writes := 0;
263
264 TBE tbe := TBEs[addr];
265 if(is_valid(tbe)) {
266 num_functional_writes := num_functional_writes +
267 testAndWrite(addr, tbe.DataBlk, pkt);
268 }
269
270 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
271 return num_functional_writes;
272 }
273
274 // ** OUT_PORTS **
275 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
276 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
277 out_port(requestNetwork_out, RequestMsg, requestFromDir);
278 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
279
280 // ** IN_PORTS **
281 // off-chip memory request/response is done
282 in_port(memQueue_in, MemoryMsg, responseFromMemory) {
283 if (memQueue_in.isReady(clockEdge())) {
284 peek(memQueue_in, MemoryMsg) {
285 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
286 trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
287 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
288 trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
289 } else {
290 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
291 error("Invalid message");
292 }
293 }
294 }
295 }
296
297 // Reissue Timer
298 in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
299 Tick current_time := clockEdge();
300 if (reissueTimerTable_in.isReady(current_time)) {
301 Addr addr := reissueTimerTable.nextAddress();
302 trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
303 }
304 }
305
306 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
307 if (responseNetwork_in.isReady(clockEdge())) {
308 peek(responseNetwork_in, ResponseMsg) {
309 assert(in_msg.Destination.isElement(machineID));
310 if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
311 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
312 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
313 trigger(Event:Data_All_Tokens, in_msg.addr,
314 TBEs[in_msg.addr]);
315 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
316 trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
317 TBEs[in_msg.addr]);
318 } else if (in_msg.Type == CoherenceResponseType:ACK) {
319 trigger(Event:Ack_All_Tokens, in_msg.addr,
320 TBEs[in_msg.addr]);
321 } else {
322 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
323 error("Invalid message");
324 }
325 } else {
326 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
327 trigger(Event:Data_Owner, in_msg.addr,
328 TBEs[in_msg.addr]);
329 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
330 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
331 trigger(Event:Tokens, in_msg.addr,
332 TBEs[in_msg.addr]);
333 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
334 trigger(Event:Ack_Owner, in_msg.addr,
335 TBEs[in_msg.addr]);
336 } else {
337 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
338 error("Invalid message");
339 }
340 }
341 }
342 }
343 }
344
345 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
346 if (persistentNetwork_in.isReady(clockEdge())) {
347 peek(persistentNetwork_in, PersistentMsg) {
348 assert(in_msg.Destination.isElement(machineID));
349
350 if (distributed_persistent) {
351 // Apply the lockdown or unlockdown message to the table
352 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
353 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write);
354 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
355 persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read);
356 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
357 persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor);
358 } else {
359 error("Invalid message");
360 }
361
362 // React to the message based on the current state of the table
363 if (persistentTable.isLocked(in_msg.addr)) {
364 if (persistentTable.findSmallest(in_msg.addr) == machineID) {
365 if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
366 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
367 TBEs[in_msg.addr]);
368 } else {
369 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
370 TBEs[in_msg.addr]);
371 }
372 } else {
373 // locked
374 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
375 }
376 } else {
377 // unlocked
378 trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
379 }
380 }
381 else {
382 if (persistentTable.findSmallest(in_msg.addr) == machineID) {
383 if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
384 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
385 TBEs[in_msg.addr]);
386 } else {
387 trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
388 TBEs[in_msg.addr]);
389 }
390 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
391 // locked
392 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
393 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
394 // locked
395 trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
396 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
397 // unlocked
398 trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
399 } else {
400 error("Invalid message");
401 }
402 }
403 }
404 }
405 }
406
407 in_port(requestNetwork_in, RequestMsg, requestToDir) {
408 if (requestNetwork_in.isReady(clockEdge())) {
409 peek(requestNetwork_in, RequestMsg) {
410 assert(in_msg.Destination.isElement(machineID));
411 if (in_msg.Type == CoherenceRequestType:GETS) {
412 trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
413 } else if (in_msg.Type == CoherenceRequestType:GETX) {
414 trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
415 } else {
416 error("Invalid message");
417 }
418 }
419 }
420 }
421
422 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
423 if (dmaRequestQueue_in.isReady(clockEdge())) {
424 peek(dmaRequestQueue_in, DMARequestMsg) {
425 if (in_msg.Type == DMARequestType:READ) {
426 trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
427 } else if (in_msg.Type == DMARequestType:WRITE) {
428 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
429 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
430 TBEs[in_msg.LineAddress]);
431 } else {
432 trigger(Event:DMA_WRITE, in_msg.LineAddress,
433 TBEs[in_msg.LineAddress]);
434 }
435 } else {
436 error("Invalid message");
437 }
438 }
439 }
440 }
441
442 // Actions
443
444 action(a_sendTokens, "a", desc="Send tokens to requestor") {
445 // Only send a message if we have tokens to send
446 if (getDirectoryEntry(address).Tokens > 0) {
447 peek(requestNetwork_in, RequestMsg) {
448 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
449 out_msg.addr := address;
450 out_msg.Type := CoherenceResponseType:ACK;
451 out_msg.Sender := machineID;
452 out_msg.Destination.add(in_msg.Requestor);
453 out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
454 out_msg.MessageSize := MessageSizeType:Response_Control;
455 }
456 }
457 getDirectoryEntry(address).Tokens := 0;
458 }
459 }
460
461 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
462 if (okToIssueStarving(address, machineID) && (starving == false)) {
463 enqueue(persistentNetwork_out, PersistentMsg, 1) {
464 out_msg.addr := address;
465 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
466 out_msg.Requestor := machineID;
467 out_msg.Destination.broadcast(MachineType:L1Cache);
468
469 //
470 // Currently the configuration system limits the system to only one
471 // chip. Therefore, if we assume one shared L2 cache, then only one
472 // pertinent L2 cache exist.
473 //
474 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
475
476 out_msg.Destination.add(mapAddressToRange(address,
477 MachineType:L2Cache, l2_select_low_bit,
478 l2_select_num_bits, intToID(0)));
479
480 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
481 out_msg.MessageSize := MessageSizeType:Persistent_Control;
482 out_msg.Prefetch := PrefetchBit:No;
483 out_msg.AccessMode := RubyAccessMode:Supervisor;
484 }
485 markPersistentEntries(address);
486 starving := true;
487
488 tbe.WentPersistent := true;
489
490 // Do not schedule a wakeup, a persistent requests will always complete
491 } else {
492
493 // We'd like to issue a persistent request, but are not allowed
494 // to issue a P.R. right now. This, we do not increment the
495 // IssueCount.
496
497 // Set a wakeup timer
498 reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
499 }
500 }
501
502 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
503 peek(dmaRequestQueue_in, DMARequestMsg) {
504 //
505 // Assser that we only send message if we don't already have all the tokens
506 //
507 assert(getDirectoryEntry(address).Tokens != max_tokens());
508 enqueue(requestNetwork_out, RequestMsg, 1) {
509 out_msg.addr := address;
510 out_msg.Type := CoherenceRequestType:GETX;
511 out_msg.Requestor := machineID;
512
513 //
514 // Since only one chip, assuming all L1 caches are local
515 //
516 out_msg.Destination.broadcast(MachineType:L1Cache);
517 out_msg.Destination.add(mapAddressToRange(address,
518 MachineType:L2Cache, l2_select_low_bit,
519 l2_select_num_bits, intToID(0)));
520
521 out_msg.RetryNum := 0;
522 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
523 out_msg.Prefetch := PrefetchBit:No;
524 out_msg.AccessMode := RubyAccessMode:Supervisor;
525 }
526 }
527 }
528
529 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
530 if (okToIssueStarving(address, machineID) && (starving == false)) {
531 enqueue(persistentNetwork_out, PersistentMsg, 1) {
532 out_msg.addr := address;
533 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
534 out_msg.Requestor := machineID;
535 out_msg.Destination.broadcast(MachineType:L1Cache);
536
537 //
538 // Currently the configuration system limits the system to only one
539 // chip. Therefore, if we assume one shared L2 cache, then only one
540 // pertinent L2 cache exist.
541 //
542 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
543
544 out_msg.Destination.add(mapAddressToRange(address,
545 MachineType:L2Cache, l2_select_low_bit,
546 l2_select_num_bits, intToID(0)));
547
548 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
549 out_msg.MessageSize := MessageSizeType:Persistent_Control;
550 out_msg.Prefetch := PrefetchBit:No;
551 out_msg.AccessMode := RubyAccessMode:Supervisor;
552 }
553 markPersistentEntries(address);
554 starving := true;
555
556 tbe.WentPersistent := true;
557
558 // Do not schedule a wakeup, a persistent requests will always complete
559 } else {
560
561 // We'd like to issue a persistent request, but are not allowed
562 // to issue a P.R. right now. This, we do not increment the
563 // IssueCount.
564
565 // Set a wakeup timer
566 reissueTimerTable.set(address, clockEdge(reissue_wakeup_latency));
567 }
568 }
569
570 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
571 peek(dmaRequestQueue_in, DMARequestMsg) {
572 enqueue(requestNetwork_out, RequestMsg, 1) {
573 out_msg.addr := address;
574 out_msg.Type := CoherenceRequestType:GETS;
575 out_msg.Requestor := machineID;
576
577 //
578 // Since only one chip, assuming all L1 caches are local
579 //
580 out_msg.Destination.broadcast(MachineType:L1Cache);
581 out_msg.Destination.add(mapAddressToRange(address,
582 MachineType:L2Cache, l2_select_low_bit,
583 l2_select_num_bits, intToID(0)));
584
585 out_msg.RetryNum := 0;
586 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
587 out_msg.Prefetch := PrefetchBit:No;
588 out_msg.AccessMode := RubyAccessMode:Supervisor;
589 }
590 }
591 }
592
593 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
594 // Only send a message if we have tokens to send
595 if (getDirectoryEntry(address).Tokens > 0) {
596 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {// FIXME?
597 out_msg.addr := address;
598 out_msg.Type := CoherenceResponseType:ACK;
599 out_msg.Sender := machineID;
600 out_msg.Destination.add(persistentTable.findSmallest(address));
601 out_msg.Tokens := getDirectoryEntry(address).Tokens;
602 out_msg.MessageSize := MessageSizeType:Response_Control;
603 }
604 getDirectoryEntry(address).Tokens := 0;
605 }
606 }
607
608 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
609 peek(memQueue_in, MemoryMsg) {
610 enqueue(responseNetwork_out, ResponseMsg, 1) {
611 out_msg.addr := address;
612 out_msg.Type := CoherenceResponseType:DATA_OWNER;
613 out_msg.Sender := machineID;
614 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
615 assert(getDirectoryEntry(address).Tokens > 0);
616 out_msg.Tokens := getDirectoryEntry(in_msg.addr).Tokens;
617 out_msg.DataBlk := in_msg.DataBlk;
618 out_msg.Dirty := false;
619 out_msg.MessageSize := MessageSizeType:Response_Data;
620 }
621 }
622 getDirectoryEntry(address).Tokens := 0;
623 }
624
625 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
626 peek(memQueue_in, MemoryMsg) {
627 enqueue(responseNetwork_out, ResponseMsg, 1) {
628 out_msg.addr := address;
629 out_msg.Type := CoherenceResponseType:DATA_OWNER;
630 out_msg.Sender := machineID;
631 out_msg.Destination.add(persistentTable.findSmallest(address));
632 assert(getDirectoryEntry(address).Tokens > 0);
633 out_msg.Tokens := getDirectoryEntry(address).Tokens;
634 out_msg.DataBlk := in_msg.DataBlk;
635 out_msg.Dirty := false;
636 out_msg.MessageSize := MessageSizeType:Response_Data;
637 }
638 }
639 getDirectoryEntry(address).Tokens := 0;
640 }
641
642 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
643 enqueue(responseNetwork_out, ResponseMsg, 1) {
644 out_msg.addr := address;
645 out_msg.Type := CoherenceResponseType:DATA_OWNER;
646 out_msg.Sender := machineID;
647 out_msg.Destination.add(persistentTable.findSmallest(address));
648 assert(getDirectoryEntry(address).Tokens > 0);
649 out_msg.Tokens := getDirectoryEntry(address).Tokens;
650 out_msg.DataBlk := tbe.DataBlk;
651 out_msg.Dirty := false;
652 out_msg.MessageSize := MessageSizeType:Response_Data;
653 }
654 getDirectoryEntry(address).Tokens := 0;
655 }
656
657 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
658 peek(requestNetwork_in, RequestMsg) {
659 queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
660 }
661 }
662
663 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
664 queueMemoryRead(persistentTable.findSmallest(address), address,
665 to_memory_controller_latency);
666 }
667
668 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
669 peek(dmaRequestQueue_in, DMARequestMsg) {
670 queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
671 }
672 }
673
674 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
675 peek(responseNetwork_in, ResponseMsg) {
676 queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
677 in_msg.DataBlk);
678 }
679 }
680
681 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
682 queueMemoryWritePartial(tbe.DmaRequestor, address,
683 to_memory_controller_latency, tbe.DataBlk,
684 tbe.Len);
685 }
686
687 action(lr_queueMemoryDmaReadWriteback, "lr",
688 desc="Write DMA data from read to memory") {
689 peek(responseNetwork_in, ResponseMsg) {
690 queueMemoryWrite(machineID, address, to_memory_controller_latency,
691 in_msg.DataBlk);
692 }
693 }
694
695 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
696 peek(dmaRequestQueue_in, DMARequestMsg) {
697 TBEs.allocate(address);
698 set_tbe(TBEs[address]);
699 tbe.DataBlk := in_msg.DataBlk;
700 tbe.PhysicalAddress := in_msg.PhysicalAddress;
701 tbe.Len := in_msg.Len;
702 tbe.DmaRequestor := in_msg.Requestor;
703 tbe.WentPersistent := false;
704 }
705 }
706
707 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
708
709 if (tbe.WentPersistent) {
710 assert(starving);
711
712 enqueue(persistentNetwork_out, PersistentMsg, 1) {
713 out_msg.addr := address;
714 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
715 out_msg.Requestor := machineID;
716 out_msg.Destination.broadcast(MachineType:L1Cache);
717
718 //
719 // Currently the configuration system limits the system to only one
720 // chip. Therefore, if we assume one shared L2 cache, then only one
721 // pertinent L2 cache exist.
722 //
723 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
724
725 out_msg.Destination.add(mapAddressToRange(address,
726 MachineType:L2Cache, l2_select_low_bit,
727 l2_select_num_bits, intToID(0)));
728
729 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
730 out_msg.MessageSize := MessageSizeType:Persistent_Control;
731 }
732 starving := false;
733 }
734
735 TBEs.deallocate(address);
736 unset_tbe();
737 }
738
739 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
740 peek(responseNetwork_in, ResponseMsg) {
741 DataBlock DataBlk := tbe.DataBlk;
742 tbe.DataBlk := in_msg.DataBlk;
743 tbe.DataBlk.copyPartial(DataBlk, getOffset(tbe.PhysicalAddress),
744 tbe.Len);
745 }
746 }
747
748 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
749 peek(responseNetwork_in, ResponseMsg) {
750 assert(in_msg.Tokens >= 1);
751 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
752 }
753 }
754
755 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
756 assert(getDirectoryEntry(address).Tokens == max_tokens());
757 }
758
759 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
760 requestNetwork_in.dequeue(clockEdge());
761 }
762
763 action(z_recycleRequest, "z", desc="Recycle the request queue") {
764 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
765 }
766
767 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
768 responseNetwork_in.dequeue(clockEdge());
769 }
770
771 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
772 responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
773 }
774
775 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
776 persistentNetwork_in.dequeue(clockEdge());
777 }
778
779 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
780 dmaRequestQueue_in.dequeue(clockEdge());
781 }
782
783 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
784 dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
785 }
786
787 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
788 memQueue_in.dequeue(clockEdge());
789 }
790
791 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
792 peek(responseNetwork_in, ResponseMsg) {
793 enqueue(responseNetwork_out, ResponseMsg, 1) {
794 out_msg.addr := address;
795 out_msg.Type := in_msg.Type;
796 out_msg.Sender := machineID;
797 out_msg.Destination.add(persistentTable.findSmallest(address));
798 out_msg.Tokens := in_msg.Tokens;
799 out_msg.MessageSize := in_msg.MessageSize;
800 out_msg.DataBlk := in_msg.DataBlk;
801 out_msg.Dirty := in_msg.Dirty;
802 }
803 }
804 }
805
806 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
807 //
808 // currently only support a fixed timeout latency
809 //
810 if (reissueTimerTable.isSet(address)) {
811 reissueTimerTable.unset(address);
812 reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
813 }
814 }
815
816 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
817 //
818 // currently only support a fixed timeout latency
819 //
820 reissueTimerTable.set(address, clockEdge(fixed_timeout_latency));
821 }
822
823 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
824 if (reissueTimerTable.isSet(address)) {
825 reissueTimerTable.unset(address);
826 }
827 }
828
829 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
830 peek(responseNetwork_in, ResponseMsg) {
831 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
832 assert(in_msg.Dirty == false);
833 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
834
835 // Bounce the message, but "re-associate" the data and the owner
836 // token. In essence we're converting an ACK_OWNER message to a
837 // DATA_OWNER message, keeping the number of tokens the same.
838 enqueue(responseNetwork_out, ResponseMsg, 1) {
839 out_msg.addr := address;
840 out_msg.Type := CoherenceResponseType:DATA_OWNER;
841 out_msg.Sender := machineID;
842 out_msg.Destination.add(persistentTable.findSmallest(address));
843 out_msg.Tokens := in_msg.Tokens;
844 out_msg.Dirty := in_msg.Dirty;
845 out_msg.MessageSize := MessageSizeType:Response_Data;
846 }
847 }
848 }
849
850 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
851 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
852 out_msg.PhysicalAddress := address;
853 out_msg.LineAddress := address;
854 out_msg.Type := DMAResponseType:ACK;
855 out_msg.Destination.add(tbe.DmaRequestor);
856 out_msg.MessageSize := MessageSizeType:Writeback_Control;
857 }
858 }
859
860 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
861 peek(memQueue_in, MemoryMsg) {
862 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
863 out_msg.PhysicalAddress := address;
864 out_msg.LineAddress := address;
865 out_msg.Type := DMAResponseType:DATA;
866 //
867 // we send the entire data block and rely on the dma controller to
868 // split it up if need be
869 //
870 out_msg.DataBlk := in_msg.DataBlk;
871 out_msg.Destination.add(tbe.DmaRequestor);
872 out_msg.MessageSize := MessageSizeType:Response_Data;
873 }
874 }
875 }
876
877 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
878 peek(responseNetwork_in, ResponseMsg) {
879 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
880 out_msg.PhysicalAddress := address;
881 out_msg.LineAddress := address;
882 out_msg.Type := DMAResponseType:DATA;
883 //
884 // we send the entire data block and rely on the dma controller to
885 // split it up if need be
886 //
887 out_msg.DataBlk := in_msg.DataBlk;
888 out_msg.Destination.add(tbe.DmaRequestor);
889 out_msg.MessageSize := MessageSizeType:Response_Data;
890 }
891 }
892 }
893
894 // TRANSITIONS
895
896 //
897 // Trans. from base state O
898 // the directory has valid data
899 //
900 transition(O, GETX, NO_W) {
901 qf_queueMemoryFetchRequest;
902 j_popIncomingRequestQueue;
903 }
904
905 transition(O, DMA_WRITE, O_DW) {
906 vd_allocateDmaRequestInTBE;
907 bw_broadcastWrite;
908 st_scheduleTimeout;
909 p_popDmaRequestQueue;
910 }
911
912 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
913 vd_allocateDmaRequestInTBE;
914 ld_queueMemoryDmaWriteFromTbe;
915 p_popDmaRequestQueue;
916 }
917
918 transition(O, GETS, NO_W) {
919 qf_queueMemoryFetchRequest;
920 j_popIncomingRequestQueue;
921 }
922
923 transition(O, DMA_READ, O_DR_W) {
924 vd_allocateDmaRequestInTBE;
925 fd_memoryDma;
926 st_scheduleTimeout;
927 p_popDmaRequestQueue;
928 }
929
930 transition(O, Lockdown, L_O_W) {
931 qp_queueMemoryForPersistent;
932 l_popIncomingPersistentQueue;
933 }
934
935 transition(O, {Tokens, Ack_All_Tokens}) {
936 f_incrementTokens;
937 k_popIncomingResponseQueue;
938 }
939
940 transition(O, {Data_Owner, Data_All_Tokens}) {
941 f_incrementTokens;
942 k_popIncomingResponseQueue;
943 }
944
945 transition({O, NO}, Unlockdown) {
946 l_popIncomingPersistentQueue;
947 }
948
949 //
950 // transitioning to Owner, waiting for memory before DMA ack
951 // All other events should recycle/stall
952 //
953 transition(O_DR_W, Memory_Data, O) {
954 dm_sendMemoryDataToDma;
955 ut_unsetReissueTimer;
956 s_deallocateTBE;
957 l_popMemQueue;
958 }
959
960 //
961 // issued GETX for DMA write, waiting for all tokens
962 //
963 transition(O_DW, Request_Timeout) {
964 ut_unsetReissueTimer;
965 px_tryIssuingPersistentGETXRequest;
966 }
967
968 transition(O_DW, Tokens) {
969 f_incrementTokens;
970 k_popIncomingResponseQueue;
971 }
972
973 transition(O_DW, Data_Owner) {
974 f_incrementTokens;
975 rd_recordDataInTbe;
976 k_popIncomingResponseQueue;
977 }
978
979 transition(O_DW, Ack_Owner) {
980 f_incrementTokens;
981 k_popIncomingResponseQueue;
982 }
983
984 transition(O_DW, Lockdown, DW_L) {
985 de_sendTbeDataToStarver;
986 l_popIncomingPersistentQueue;
987 }
988
989 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
990 f_incrementTokens;
991 rd_recordDataInTbe;
992 ld_queueMemoryDmaWriteFromTbe;
993 ut_unsetReissueTimer;
994 k_popIncomingResponseQueue;
995 }
996
997 transition(O_DW, Ack_All_Tokens, O_DW_W) {
998 f_incrementTokens;
999 ld_queueMemoryDmaWriteFromTbe;
1000 ut_unsetReissueTimer;
1001 k_popIncomingResponseQueue;
1002 }
1003
1004 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
1005 f_incrementTokens;
1006 ld_queueMemoryDmaWriteFromTbe;
1007 ut_unsetReissueTimer;
1008 k_popIncomingResponseQueue;
1009 }
1010
1011 transition(O_DW_W, Memory_Ack, O) {
1012 da_sendDmaAck;
1013 s_deallocateTBE;
1014 l_popMemQueue;
1015 }
1016
1017 //
1018 // Trans. from NO
1019 // The direcotry does not have valid data, but may have some tokens
1020 //
1021 transition(NO, GETX) {
1022 a_sendTokens;
1023 j_popIncomingRequestQueue;
1024 }
1025
1026 transition(NO, DMA_WRITE, NO_DW) {
1027 vd_allocateDmaRequestInTBE;
1028 bw_broadcastWrite;
1029 st_scheduleTimeout;
1030 p_popDmaRequestQueue;
1031 }
1032
1033 transition(NO, GETS) {
1034 j_popIncomingRequestQueue;
1035 }
1036
1037 transition(NO, DMA_READ, NO_DR) {
1038 vd_allocateDmaRequestInTBE;
1039 br_broadcastRead;
1040 st_scheduleTimeout;
1041 p_popDmaRequestQueue;
1042 }
1043
1044 transition(NO, Lockdown, L) {
1045 aa_sendTokensToStarver;
1046 l_popIncomingPersistentQueue;
1047 }
1048
1049 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1050 f_incrementTokens;
1051 lq_queueMemoryWbRequest;
1052 k_popIncomingResponseQueue;
1053 }
1054
1055 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1056 f_incrementTokens;
1057 k_popIncomingResponseQueue;
1058 }
1059
1060 transition(NO, Tokens) {
1061 f_incrementTokens;
1062 k_popIncomingResponseQueue;
1063 }
1064
1065 transition(NO_W, Memory_Data, NO) {
1066 d_sendMemoryDataWithAllTokens;
1067 l_popMemQueue;
1068 }
1069
1070 // Trans. from NO_DW
1071 transition(NO_DW, Request_Timeout) {
1072 ut_unsetReissueTimer;
1073 px_tryIssuingPersistentGETXRequest;
1074 }
1075
1076 transition(NO_DW, Lockdown, DW_L) {
1077 aa_sendTokensToStarver;
1078 l_popIncomingPersistentQueue;
1079 }
1080
1081 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1082 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1083 // directory does not have valid data
1084
1085 transition(NO_DW, Data_Owner, O_DW) {
1086 f_incrementTokens;
1087 rd_recordDataInTbe;
1088 k_popIncomingResponseQueue;
1089 }
1090
1091 transition({NO_DW, NO_DR}, Tokens) {
1092 f_incrementTokens;
1093 k_popIncomingResponseQueue;
1094 }
1095
1096 // Trans. from NO_DR
1097 transition(NO_DR, Request_Timeout) {
1098 ut_unsetReissueTimer;
1099 ps_tryIssuingPersistentGETSRequest;
1100 }
1101
1102 transition(NO_DR, Lockdown, DR_L) {
1103 aa_sendTokensToStarver;
1104 l_popIncomingPersistentQueue;
1105 }
1106
1107 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1108 f_incrementTokens;
1109 dd_sendDmaData;
1110 lr_queueMemoryDmaReadWriteback;
1111 ut_unsetReissueTimer;
1112 s_deallocateTBE;
1113 k_popIncomingResponseQueue;
1114 }
1115
1116 // Trans. from L
1117 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1118 j_popIncomingRequestQueue;
1119 }
1120
1121 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1122 l_popIncomingPersistentQueue;
1123 }
1124
1125 //
1126 // Received data for lockdown blocks
1127 // For blocks with outstanding dma requests to them
1128 // ...we could change this to write the data to memory and send it cleanly
1129 // ...we could also proactively complete our DMA requests
1130 // However, to keep my mind from spinning out-of-control, we won't for now :)
1131 //
1132 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1133 r_bounceResponse;
1134 k_popIncomingResponseQueue;
1135 }
1136
1137 transition({DW_L, DR_L, L}, Tokens) {
1138 r_bounceResponse;
1139 k_popIncomingResponseQueue;
1140 }
1141
1142 transition({DW_L, DR_L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1143 bd_bounceDatalessOwnerToken;
1144 k_popIncomingResponseQueue;
1145 }
1146
1147 transition(L, {Ack_Owner_All_Tokens, Ack_Owner}, L_O_W) {
1148 f_incrementTokens;
1149 qp_queueMemoryForPersistent;
1150 k_popIncomingResponseQueue;
1151 }
1152
1153 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1154 l_popIncomingPersistentQueue;
1155 }
1156
1157 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1158 l_popIncomingPersistentQueue;
1159 }
1160
1161 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1162 dd_sendMemDataToStarver;
1163 l_popMemQueue;
1164 }
1165
1166 transition(L_O_W, Memory_Ack) {
1167 qp_queueMemoryForPersistent;
1168 l_popMemQueue;
1169 }
1170
1171 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1172 l_popIncomingPersistentQueue;
1173 }
1174
1175 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1176 l_popIncomingPersistentQueue;
1177 }
1178
1179 transition(DR_L_W, Memory_Data, DR_L) {
1180 dd_sendMemDataToStarver;
1181 l_popMemQueue;
1182 }
1183
1184 transition(DW_L_W, Memory_Ack, L) {
1185 aat_assertAllTokens;
1186 da_sendDmaAck;
1187 s_deallocateTBE;
1188 dd_sendMemDataToStarver;
1189 l_popMemQueue;
1190 }
1191
1192 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1193 l_popIncomingPersistentQueue;
1194 }
1195
1196 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1197 l_popIncomingPersistentQueue;
1198 }
1199
1200 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1201 l_popIncomingPersistentQueue;
1202 }
1203
1204 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1205 ut_unsetReissueTimer;
1206 px_tryIssuingPersistentGETXRequest;
1207 }
1208
1209 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1210 l_popIncomingPersistentQueue;
1211 }
1212
1213 transition(DR_L, Request_Timeout) {
1214 ut_unsetReissueTimer;
1215 ps_tryIssuingPersistentGETSRequest;
1216 }
1217
1218 //
1219 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1220 // presistent request is issued and resolve before memory returns with data
1221 //
1222 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1223 l_popMemQueue;
1224 }
1225
1226 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1227 l_popIncomingPersistentQueue;
1228 }
1229
1230 // Blocked states
1231 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1232 z_recycleRequest;
1233 }
1234
1235 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1236 y_recycleDmaRequestQueue;
1237 }
1238
1239 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1240 kz_recycleResponse;
1241 }
1242
1243 //
1244 // If we receive a request timeout while waiting for memory, it is likely that
1245 // the request will be satisfied and issuing a presistent request will do us
1246 // no good. Just wait.
1247 //
1248 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1249 rs_resetScheduleTimeout;
1250 }
1251
1252 transition(NO_W, Lockdown, L_NO_W) {
1253 l_popIncomingPersistentQueue;
1254 }
1255
1256 transition(O_W, Lockdown, L_O_W) {
1257 l_popIncomingPersistentQueue;
1258 }
1259
1260 transition(O_DR_W, Lockdown, DR_L_W) {
1261 l_popIncomingPersistentQueue;
1262 }
1263
1264 transition(O_DW_W, Lockdown, DW_L_W) {
1265 l_popIncomingPersistentQueue;
1266 }
1267
1268 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1269 l_popIncomingPersistentQueue;
1270 }
1271 }