Ruby: Add support for functional accesses
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34
35 machine(Directory, "Token protocol")
36 : DirectoryMemory * directory,
37 MemoryControl * memBuffer,
38 int l2_select_num_bits,
39 int directory_latency = 5,
40 bool distributed_persistent = true,
41 int fixed_timeout_latency = 100
42 {
43
44 MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
45 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
46 MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
47 MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
48
49 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
50 MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
51 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request";
52 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
53
54 // STATES
55 state_declaration(State, desc="Directory states", default="Directory_State_O") {
56 // Base states
57 O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
58 NO, AccessPermission:Maybe_Stale, desc="Not Owner";
59 L, AccessPermission:Busy, desc="Locked";
60
61 // Memory wait states - can block all messages including persistent requests
62 O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
63 L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
64 L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
65 DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
66 DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
67 NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
68 O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
69 O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
70
71 // DMA request transient states - must respond to persistent requests
72 O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
73 NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
74 NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
75
76 // DMA request in progress - competing with a CPU persistent request
77 DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
78 DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
79
80 }
81
82 // Events
83 enumeration(Event, desc="Directory events") {
84 GETX, desc="A GETX arrives";
85 GETS, desc="A GETS arrives";
86 Lockdown, desc="A lockdown request arrives";
87 Unlockdown, desc="An un-lockdown request arrives";
88 Own_Lock_or_Unlock, desc="own lock or unlock";
89 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
90 Data_Owner, desc="Data arrive";
91 Data_All_Tokens, desc="Data and all tokens";
92 Ack_Owner, desc="Owner token arrived without data because it was clean";
93 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
94 Tokens, desc="Tokens arrive";
95 Ack_All_Tokens, desc="All_Tokens arrive";
96 Request_Timeout, desc="A DMA request has timed out";
97
98 // Memory Controller
99 Memory_Data, desc="Fetched data from memory arrives";
100 Memory_Ack, desc="Writeback Ack from memory arrives";
101
102 // DMA requests
103 DMA_READ, desc="A DMA Read memory request";
104 DMA_WRITE, desc="A DMA Write memory request";
105 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
106 }
107
108 // TYPES
109
110 // DirectoryEntry
111 structure(Entry, desc="...", interface="AbstractEntry") {
112 State DirectoryState, desc="Directory state";
113 DataBlock DataBlk, desc="data for the block";
114 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
115
116 // The following state is provided to allow for bandwidth
117 // efficient directory-like operation. However all of this state
118 // is 'soft state' that does not need to be correct (as long as
119 // you're eventually willing to resort to broadcast.)
120
121 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
122 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
123 }
124
125 structure(PersistentTable, external="yes") {
126 void persistentRequestLock(Address, MachineID, AccessType);
127 void persistentRequestUnlock(Address, MachineID);
128 bool okToIssueStarving(Address, MachineID);
129 MachineID findSmallest(Address);
130 AccessType typeOfSmallest(Address);
131 void markEntries(Address);
132 bool isLocked(Address);
133 int countStarvingForAddress(Address);
134 int countReadStarvingForAddress(Address);
135 }
136
137 // TBE entries for DMA requests
138 structure(TBE, desc="TBE entries for outstanding DMA requests") {
139 Address PhysicalAddress, desc="physical address";
140 State TBEState, desc="Transient State";
141 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
142 DataBlock DataBlk, desc="The current view of system memory";
143 int Len, desc="...";
144 MachineID DmaRequestor, desc="DMA requestor";
145 bool WentPersistent, desc="Did the DMA request require a persistent request";
146 }
147
148 structure(TBETable, external="yes") {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 // ** OBJECTS **
156
157 PersistentTable persistentTable;
158 TimerTable reissueTimerTable;
159
160 TBETable TBEs, template_hack="<Directory_TBE>";
161
162 bool starving, default="false";
163 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
164
165 void set_tbe(TBE b);
166 void unset_tbe();
167
168 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
169 return static_cast(Entry, directory[addr]);
170 }
171
172 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
173 return getDirectoryEntry(addr).DataBlk;
174 }
175
176 State getState(TBE tbe, Address addr) {
177 if (is_valid(tbe)) {
178 return tbe.TBEState;
179 } else {
180 return getDirectoryEntry(addr).DirectoryState;
181 }
182 }
183
184 void setState(TBE tbe, Address addr, State state) {
185 if (is_valid(tbe)) {
186 tbe.TBEState := state;
187 }
188 getDirectoryEntry(addr).DirectoryState := state;
189
190 if (state == State:L || state == State:DW_L || state == State:DR_L) {
191 assert(getDirectoryEntry(addr).Tokens == 0);
192 }
193
194 // We have one or zero owners
195 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
196
197 // Make sure the token count is in range
198 assert(getDirectoryEntry(addr).Tokens >= 0);
199 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
200
201 if (state == State:O || state == State:O_W || state == State:O_DW) {
202 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
203 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
204 }
205 }
206
207 AccessPermission getAccessPermission(Address addr) {
208 TBE tbe := TBEs[addr];
209 if(is_valid(tbe)) {
210 return Directory_State_to_permission(tbe.TBEState);
211 }
212
213 if (directory.isPresent(addr)) {
214 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
215 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
216 }
217
218 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
219 return AccessPermission:NotPresent;
220 }
221
222 void setAccessPermission(Address addr, State state) {
223 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
224 }
225
226 bool okToIssueStarving(Address addr, MachineID machinID) {
227 return persistentTable.okToIssueStarving(addr, machineID);
228 }
229
230 void markPersistentEntries(Address addr) {
231 persistentTable.markEntries(addr);
232 }
233
234 // ** OUT_PORTS **
235 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
236 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
237 out_port(requestNetwork_out, RequestMsg, requestFromDir);
238 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
239
240 //
241 // Memory buffer for memory controller to DIMM communication
242 //
243 out_port(memQueue_out, MemoryMsg, memBuffer);
244
245 // ** IN_PORTS **
246
247 // off-chip memory request/response is done
248 in_port(memQueue_in, MemoryMsg, memBuffer) {
249 if (memQueue_in.isReady()) {
250 peek(memQueue_in, MemoryMsg) {
251 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
252 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
253 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
254 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
255 } else {
256 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
257 error("Invalid message");
258 }
259 }
260 }
261 }
262
263 // Reissue Timer
264 in_port(reissueTimerTable_in, Address, reissueTimerTable) {
265 if (reissueTimerTable_in.isReady()) {
266 trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
267 TBEs[reissueTimerTable.readyAddress()]);
268 }
269 }
270
271 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
272 if (responseNetwork_in.isReady()) {
273 peek(responseNetwork_in, ResponseMsg) {
274 assert(in_msg.Destination.isElement(machineID));
275 if (getDirectoryEntry(in_msg.Address).Tokens + in_msg.Tokens == max_tokens()) {
276 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
277 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
278 trigger(Event:Data_All_Tokens, in_msg.Address,
279 TBEs[in_msg.Address]);
280 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
281 trigger(Event:Ack_Owner_All_Tokens, in_msg.Address,
282 TBEs[in_msg.Address]);
283 } else if (in_msg.Type == CoherenceResponseType:ACK) {
284 trigger(Event:Ack_All_Tokens, in_msg.Address,
285 TBEs[in_msg.Address]);
286 } else {
287 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
288 error("Invalid message");
289 }
290 } else {
291 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
292 trigger(Event:Data_Owner, in_msg.Address,
293 TBEs[in_msg.Address]);
294 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
295 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
296 trigger(Event:Tokens, in_msg.Address,
297 TBEs[in_msg.Address]);
298 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
299 trigger(Event:Ack_Owner, in_msg.Address,
300 TBEs[in_msg.Address]);
301 } else {
302 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
303 error("Invalid message");
304 }
305 }
306 }
307 }
308 }
309
310 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
311 if (persistentNetwork_in.isReady()) {
312 peek(persistentNetwork_in, PersistentMsg) {
313 assert(in_msg.Destination.isElement(machineID));
314
315 if (distributed_persistent) {
316 // Apply the lockdown or unlockdown message to the table
317 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
318 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
319 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
320 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
321 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
322 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
323 } else {
324 error("Invalid message");
325 }
326
327 // React to the message based on the current state of the table
328 if (persistentTable.isLocked(in_msg.Address)) {
329 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
330 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
331 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
332 TBEs[in_msg.Address]);
333 } else {
334 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
335 TBEs[in_msg.Address]);
336 }
337 } else {
338 // locked
339 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
340 }
341 } else {
342 // unlocked
343 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
344 }
345 }
346 else {
347 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
348 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
349 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
350 TBEs[in_msg.Address]);
351 } else {
352 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
353 TBEs[in_msg.Address]);
354 }
355 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
356 // locked
357 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
358 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
359 // locked
360 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
361 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
362 // unlocked
363 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
364 } else {
365 error("Invalid message");
366 }
367 }
368 }
369 }
370 }
371
372 in_port(requestNetwork_in, RequestMsg, requestToDir) {
373 if (requestNetwork_in.isReady()) {
374 peek(requestNetwork_in, RequestMsg) {
375 assert(in_msg.Destination.isElement(machineID));
376 if (in_msg.Type == CoherenceRequestType:GETS) {
377 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
378 } else if (in_msg.Type == CoherenceRequestType:GETX) {
379 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
380 } else {
381 error("Invalid message");
382 }
383 }
384 }
385 }
386
387 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
388 if (dmaRequestQueue_in.isReady()) {
389 peek(dmaRequestQueue_in, DMARequestMsg) {
390 if (in_msg.Type == DMARequestType:READ) {
391 trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
392 } else if (in_msg.Type == DMARequestType:WRITE) {
393 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
394 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
395 TBEs[in_msg.LineAddress]);
396 } else {
397 trigger(Event:DMA_WRITE, in_msg.LineAddress,
398 TBEs[in_msg.LineAddress]);
399 }
400 } else {
401 error("Invalid message");
402 }
403 }
404 }
405 }
406
407 // Actions
408
409 action(a_sendTokens, "a", desc="Send tokens to requestor") {
410 // Only send a message if we have tokens to send
411 if (getDirectoryEntry(address).Tokens > 0) {
412 peek(requestNetwork_in, RequestMsg) {
413 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
414 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
415 out_msg.Address := address;
416 out_msg.Type := CoherenceResponseType:ACK;
417 out_msg.Sender := machineID;
418 out_msg.Destination.add(in_msg.Requestor);
419 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
420 out_msg.MessageSize := MessageSizeType:Response_Control;
421 }
422 }
423 getDirectoryEntry(address).Tokens := 0;
424 }
425 }
426
427 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
428 if (okToIssueStarving(address, machineID) && (starving == false)) {
429 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
430 out_msg.Address := address;
431 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
432 out_msg.Requestor := machineID;
433 out_msg.Destination.broadcast(MachineType:L1Cache);
434
435 //
436 // Currently the configuration system limits the system to only one
437 // chip. Therefore, if we assume one shared L2 cache, then only one
438 // pertinent L2 cache exist.
439 //
440 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
441
442 out_msg.Destination.add(mapAddressToRange(address,
443 MachineType:L2Cache,
444 l2_select_low_bit,
445 l2_select_num_bits));
446
447 out_msg.Destination.add(map_Address_to_Directory(address));
448 out_msg.MessageSize := MessageSizeType:Persistent_Control;
449 out_msg.Prefetch := PrefetchBit:No;
450 out_msg.AccessMode := RubyAccessMode:Supervisor;
451 }
452 markPersistentEntries(address);
453 starving := true;
454
455 tbe.WentPersistent := true;
456
457 // Do not schedule a wakeup, a persistent requests will always complete
458 } else {
459
460 // We'd like to issue a persistent request, but are not allowed
461 // to issue a P.R. right now. This, we do not increment the
462 // IssueCount.
463
464 // Set a wakeup timer
465 reissueTimerTable.set(address, 10);
466 }
467 }
468
469 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
470 peek(dmaRequestQueue_in, DMARequestMsg) {
471 //
472 // Assser that we only send message if we don't already have all the tokens
473 //
474 assert(getDirectoryEntry(address).Tokens != max_tokens());
475 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
476 out_msg.Address := address;
477 out_msg.Type := CoherenceRequestType:GETX;
478 out_msg.Requestor := machineID;
479
480 //
481 // Since only one chip, assuming all L1 caches are local
482 //
483 out_msg.Destination.broadcast(MachineType:L1Cache);
484 out_msg.Destination.add(mapAddressToRange(address,
485 MachineType:L2Cache,
486 l2_select_low_bit,
487 l2_select_num_bits));
488
489 out_msg.RetryNum := 0;
490 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
491 out_msg.Prefetch := PrefetchBit:No;
492 out_msg.AccessMode := RubyAccessMode:Supervisor;
493 }
494 }
495 }
496
497 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
498 if (okToIssueStarving(address, machineID) && (starving == false)) {
499 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
500 out_msg.Address := address;
501 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
502 out_msg.Requestor := machineID;
503 out_msg.Destination.broadcast(MachineType:L1Cache);
504
505 //
506 // Currently the configuration system limits the system to only one
507 // chip. Therefore, if we assume one shared L2 cache, then only one
508 // pertinent L2 cache exist.
509 //
510 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
511
512 out_msg.Destination.add(mapAddressToRange(address,
513 MachineType:L2Cache,
514 l2_select_low_bit,
515 l2_select_num_bits));
516
517 out_msg.Destination.add(map_Address_to_Directory(address));
518 out_msg.MessageSize := MessageSizeType:Persistent_Control;
519 out_msg.Prefetch := PrefetchBit:No;
520 out_msg.AccessMode := RubyAccessMode:Supervisor;
521 }
522 markPersistentEntries(address);
523 starving := true;
524
525 tbe.WentPersistent := true;
526
527 // Do not schedule a wakeup, a persistent requests will always complete
528 } else {
529
530 // We'd like to issue a persistent request, but are not allowed
531 // to issue a P.R. right now. This, we do not increment the
532 // IssueCount.
533
534 // Set a wakeup timer
535 reissueTimerTable.set(address, 10);
536 }
537 }
538
539 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
540 peek(dmaRequestQueue_in, DMARequestMsg) {
541 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
542 out_msg.Address := address;
543 out_msg.Type := CoherenceRequestType:GETS;
544 out_msg.Requestor := machineID;
545
546 //
547 // Since only one chip, assuming all L1 caches are local
548 //
549 out_msg.Destination.broadcast(MachineType:L1Cache);
550 out_msg.Destination.add(mapAddressToRange(address,
551 MachineType:L2Cache,
552 l2_select_low_bit,
553 l2_select_num_bits));
554
555 out_msg.RetryNum := 0;
556 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
557 out_msg.Prefetch := PrefetchBit:No;
558 out_msg.AccessMode := RubyAccessMode:Supervisor;
559 }
560 }
561 }
562
563 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
564 // Only send a message if we have tokens to send
565 if (getDirectoryEntry(address).Tokens > 0) {
566 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
567 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
568 out_msg.Address := address;
569 out_msg.Type := CoherenceResponseType:ACK;
570 out_msg.Sender := machineID;
571 out_msg.Destination.add(persistentTable.findSmallest(address));
572 out_msg.Tokens := getDirectoryEntry(address).Tokens;
573 out_msg.MessageSize := MessageSizeType:Response_Control;
574 }
575 getDirectoryEntry(address).Tokens := 0;
576 }
577 }
578
579 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
580 peek(memQueue_in, MemoryMsg) {
581 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
582 out_msg.Address := address;
583 out_msg.Type := CoherenceResponseType:DATA_OWNER;
584 out_msg.Sender := machineID;
585 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
586 assert(getDirectoryEntry(address).Tokens > 0);
587 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
588 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
589 out_msg.Dirty := false;
590 out_msg.MessageSize := MessageSizeType:Response_Data;
591 }
592 }
593 getDirectoryEntry(address).Tokens := 0;
594 }
595
596 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
597 peek(memQueue_in, MemoryMsg) {
598 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
599 out_msg.Address := address;
600 out_msg.Type := CoherenceResponseType:DATA_OWNER;
601 out_msg.Sender := machineID;
602 out_msg.Destination.add(persistentTable.findSmallest(address));
603 assert(getDirectoryEntry(address).Tokens > 0);
604 out_msg.Tokens := getDirectoryEntry(address).Tokens;
605 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
606 out_msg.Dirty := false;
607 out_msg.MessageSize := MessageSizeType:Response_Data;
608 }
609 }
610 getDirectoryEntry(address).Tokens := 0;
611 }
612
613 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
614 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
615 out_msg.Address := address;
616 out_msg.Type := CoherenceResponseType:DATA_OWNER;
617 out_msg.Sender := machineID;
618 out_msg.Destination.add(persistentTable.findSmallest(address));
619 assert(getDirectoryEntry(address).Tokens > 0);
620 out_msg.Tokens := getDirectoryEntry(address).Tokens;
621 out_msg.DataBlk := tbe.DataBlk;
622 out_msg.Dirty := false;
623 out_msg.MessageSize := MessageSizeType:Response_Data;
624 }
625 getDirectoryEntry(address).Tokens := 0;
626 }
627
628 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
629 peek(requestNetwork_in, RequestMsg) {
630 enqueue(memQueue_out, MemoryMsg, latency="1") {
631 out_msg.Address := address;
632 out_msg.Type := MemoryRequestType:MEMORY_READ;
633 out_msg.Sender := machineID;
634 out_msg.OriginalRequestorMachId := in_msg.Requestor;
635 out_msg.MessageSize := in_msg.MessageSize;
636 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
637 DPRINTF(RubySlicc, "%s\n", out_msg);
638 }
639 }
640 }
641
642 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
643 enqueue(memQueue_out, MemoryMsg, latency="1") {
644 out_msg.Address := address;
645 out_msg.Type := MemoryRequestType:MEMORY_READ;
646 out_msg.Sender := machineID;
647 out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
648 out_msg.MessageSize := MessageSizeType:Request_Control;
649 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
650 DPRINTF(RubySlicc, "%s\n", out_msg);
651 }
652 }
653
654 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
655 peek(dmaRequestQueue_in, DMARequestMsg) {
656 enqueue(memQueue_out, MemoryMsg, latency="1") {
657 out_msg.Address := address;
658 out_msg.Type := MemoryRequestType:MEMORY_READ;
659 out_msg.Sender := machineID;
660 out_msg.OriginalRequestorMachId := in_msg.Requestor;
661 out_msg.MessageSize := in_msg.MessageSize;
662 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
663 DPRINTF(RubySlicc, "%s\n", out_msg);
664 }
665 }
666 }
667
668 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
669 enqueue(memQueue_out, MemoryMsg, latency="1") {
670 out_msg.Address := address;
671 out_msg.Type := MemoryRequestType:MEMORY_WB;
672 DPRINTF(RubySlicc, "%s\n", out_msg);
673 }
674 }
675
676 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
677 enqueue(memQueue_out, MemoryMsg, latency="1") {
678 out_msg.Address := address;
679 out_msg.Type := MemoryRequestType:MEMORY_WB;
680 // first, initialize the data blk to the current version of system memory
681 out_msg.DataBlk := tbe.DataBlk;
682 // then add the dma write data
683 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
684 DPRINTF(RubySlicc, "%s\n", out_msg);
685 }
686 }
687
688 action(lr_queueMemoryDmaReadWriteback, "lr", desc="Write DMA data from read to memory") {
689 enqueue(memQueue_out, MemoryMsg, latency="1") {
690 out_msg.Address := address;
691 out_msg.Type := MemoryRequestType:MEMORY_WB;
692 // first, initialize the data blk to the current version of system memory
693 out_msg.DataBlk := tbe.DataBlk;
694 DPRINTF(RubySlicc, "%s\n", out_msg);
695 }
696 }
697
698 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
699 peek(dmaRequestQueue_in, DMARequestMsg) {
700 TBEs.allocate(address);
701 set_tbe(TBEs[address]);
702 tbe.DmaDataBlk := in_msg.DataBlk;
703 tbe.PhysicalAddress := in_msg.PhysicalAddress;
704 tbe.Len := in_msg.Len;
705 tbe.DmaRequestor := in_msg.Requestor;
706 tbe.WentPersistent := false;
707 }
708 }
709
710 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
711
712 if (tbe.WentPersistent) {
713 assert(starving == true);
714
715 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
716 out_msg.Address := address;
717 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
718 out_msg.Requestor := machineID;
719 out_msg.Destination.broadcast(MachineType:L1Cache);
720
721 //
722 // Currently the configuration system limits the system to only one
723 // chip. Therefore, if we assume one shared L2 cache, then only one
724 // pertinent L2 cache exist.
725 //
726 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
727
728 out_msg.Destination.add(mapAddressToRange(address,
729 MachineType:L2Cache,
730 l2_select_low_bit,
731 l2_select_num_bits));
732
733 out_msg.Destination.add(map_Address_to_Directory(address));
734 out_msg.MessageSize := MessageSizeType:Persistent_Control;
735 }
736 starving := false;
737 }
738
739 TBEs.deallocate(address);
740 unset_tbe();
741 }
742
743 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
744 peek(responseNetwork_in, ResponseMsg) {
745 tbe.DataBlk := in_msg.DataBlk;
746 }
747 }
748
749 action(cd_writeCleanDataToTbe, "cd", desc="Write clean memory data to TBE") {
750 tbe.DataBlk := getDirectoryEntry(address).DataBlk;
751 }
752
753 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
754 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
755 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
756 }
757
758 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
759 peek(responseNetwork_in, ResponseMsg) {
760 assert(in_msg.Tokens >= 1);
761 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
762 }
763 }
764
765 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
766 assert(getDirectoryEntry(address).Tokens == max_tokens());
767 }
768
769 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
770 requestNetwork_in.dequeue();
771 }
772
773 action(z_recycleRequest, "z", desc="Recycle the request queue") {
774 requestNetwork_in.recycle();
775 }
776
777 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
778 responseNetwork_in.dequeue();
779 }
780
781 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
782 responseNetwork_in.recycle();
783 }
784
785 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
786 persistentNetwork_in.dequeue();
787 }
788
789 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
790 dmaRequestQueue_in.dequeue();
791 }
792
793 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
794 dmaRequestQueue_in.recycle();
795 }
796
797 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
798 memQueue_in.dequeue();
799 }
800
801 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
802 peek(responseNetwork_in, ResponseMsg) {
803 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
804 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
805 in_msg.Address, in_msg.DataBlk);
806 }
807 }
808
809 action(n_checkData, "n", desc="Check incoming clean data message") {
810 peek(responseNetwork_in, ResponseMsg) {
811 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
812 }
813 }
814
815 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
816 peek(responseNetwork_in, ResponseMsg) {
817 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
818 out_msg.Address := address;
819 out_msg.Type := in_msg.Type;
820 out_msg.Sender := machineID;
821 out_msg.Destination.add(persistentTable.findSmallest(address));
822 out_msg.Tokens := in_msg.Tokens;
823 out_msg.MessageSize := in_msg.MessageSize;
824 out_msg.DataBlk := in_msg.DataBlk;
825 out_msg.Dirty := in_msg.Dirty;
826 }
827 }
828 }
829
830 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
831 //
832 // currently only support a fixed timeout latency
833 //
834 if (reissueTimerTable.isSet(address)) {
835 reissueTimerTable.unset(address);
836 reissueTimerTable.set(address, fixed_timeout_latency);
837 }
838 }
839
840 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
841 //
842 // currently only support a fixed timeout latency
843 //
844 reissueTimerTable.set(address, fixed_timeout_latency);
845 }
846
847 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
848 if (reissueTimerTable.isSet(address)) {
849 reissueTimerTable.unset(address);
850 }
851 }
852
853 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
854 peek(responseNetwork_in, ResponseMsg) {
855 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
856 assert(in_msg.Dirty == false);
857 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
858
859 // NOTE: The following check would not be valid in a real
860 // implementation. We include the data in the "dataless"
861 // message so we can assert the clean data matches the datablock
862 // in memory
863 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
864
865 // Bounce the message, but "re-associate" the data and the owner
866 // token. In essence we're converting an ACK_OWNER message to a
867 // DATA_OWNER message, keeping the number of tokens the same.
868 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
869 out_msg.Address := address;
870 out_msg.Type := CoherenceResponseType:DATA_OWNER;
871 out_msg.Sender := machineID;
872 out_msg.Destination.add(persistentTable.findSmallest(address));
873 out_msg.Tokens := in_msg.Tokens;
874 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
875 out_msg.Dirty := in_msg.Dirty;
876 out_msg.MessageSize := MessageSizeType:Response_Data;
877 }
878 }
879 }
880
881 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
882 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
883 out_msg.PhysicalAddress := address;
884 out_msg.LineAddress := address;
885 out_msg.Type := DMAResponseType:ACK;
886 out_msg.Destination.add(tbe.DmaRequestor);
887 out_msg.MessageSize := MessageSizeType:Writeback_Control;
888 }
889 }
890
891 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
892 peek(memQueue_in, MemoryMsg) {
893 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
894 out_msg.PhysicalAddress := address;
895 out_msg.LineAddress := address;
896 out_msg.Type := DMAResponseType:DATA;
897 //
898 // we send the entire data block and rely on the dma controller to
899 // split it up if need be
900 //
901 out_msg.DataBlk := in_msg.DataBlk;
902 out_msg.Destination.add(tbe.DmaRequestor);
903 out_msg.MessageSize := MessageSizeType:Response_Data;
904 }
905 }
906 }
907
908 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
909 peek(responseNetwork_in, ResponseMsg) {
910 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
911 out_msg.PhysicalAddress := address;
912 out_msg.LineAddress := address;
913 out_msg.Type := DMAResponseType:DATA;
914 //
915 // we send the entire data block and rely on the dma controller to
916 // split it up if need be
917 //
918 out_msg.DataBlk := in_msg.DataBlk;
919 out_msg.Destination.add(tbe.DmaRequestor);
920 out_msg.MessageSize := MessageSizeType:Response_Data;
921 }
922 }
923 }
924
925 // TRANSITIONS
926
927 //
928 // Trans. from base state O
929 // the directory has valid data
930 //
931 transition(O, GETX, NO_W) {
932 qf_queueMemoryFetchRequest;
933 j_popIncomingRequestQueue;
934 }
935
936 transition(O, DMA_WRITE, O_DW) {
937 vd_allocateDmaRequestInTBE;
938 cd_writeCleanDataToTbe;
939 bw_broadcastWrite;
940 st_scheduleTimeout;
941 p_popDmaRequestQueue;
942 }
943
944 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
945 vd_allocateDmaRequestInTBE;
946 cd_writeCleanDataToTbe;
947 dwt_writeDmaDataFromTBE;
948 ld_queueMemoryDmaWriteFromTbe;
949 p_popDmaRequestQueue;
950 }
951
952 transition(O, GETS, NO_W) {
953 qf_queueMemoryFetchRequest;
954 j_popIncomingRequestQueue;
955 }
956
957 transition(O, DMA_READ, O_DR_W) {
958 vd_allocateDmaRequestInTBE;
959 fd_memoryDma;
960 st_scheduleTimeout;
961 p_popDmaRequestQueue;
962 }
963
964 transition(O, Lockdown, L_O_W) {
965 qp_queueMemoryForPersistent;
966 l_popIncomingPersistentQueue;
967 }
968
969 transition(O, {Tokens, Ack_All_Tokens}) {
970 f_incrementTokens;
971 k_popIncomingResponseQueue;
972 }
973
974 transition(O, {Data_Owner, Data_All_Tokens}) {
975 n_checkData;
976 f_incrementTokens;
977 k_popIncomingResponseQueue;
978 }
979
980 transition({O, NO}, Unlockdown) {
981 l_popIncomingPersistentQueue;
982 }
983
984 //
985 // transitioning to Owner, waiting for memory before DMA ack
986 // All other events should recycle/stall
987 //
988 transition(O_DR_W, Memory_Data, O) {
989 dm_sendMemoryDataToDma;
990 ut_unsetReissueTimer;
991 s_deallocateTBE;
992 l_popMemQueue;
993 }
994
995 //
996 // issued GETX for DMA write, waiting for all tokens
997 //
998 transition(O_DW, Request_Timeout) {
999 ut_unsetReissueTimer;
1000 px_tryIssuingPersistentGETXRequest;
1001 }
1002
1003 transition(O_DW, Tokens) {
1004 f_incrementTokens;
1005 k_popIncomingResponseQueue;
1006 }
1007
1008 transition(O_DW, Data_Owner) {
1009 f_incrementTokens;
1010 rd_recordDataInTbe;
1011 k_popIncomingResponseQueue;
1012 }
1013
1014 transition(O_DW, Ack_Owner) {
1015 f_incrementTokens;
1016 cd_writeCleanDataToTbe;
1017 k_popIncomingResponseQueue;
1018 }
1019
1020 transition(O_DW, Lockdown, DW_L) {
1021 de_sendTbeDataToStarver;
1022 l_popIncomingPersistentQueue;
1023 }
1024
1025 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
1026 f_incrementTokens;
1027 rd_recordDataInTbe;
1028 dwt_writeDmaDataFromTBE;
1029 ld_queueMemoryDmaWriteFromTbe;
1030 ut_unsetReissueTimer;
1031 k_popIncomingResponseQueue;
1032 }
1033
1034 transition(O_DW, Ack_All_Tokens, O_DW_W) {
1035 f_incrementTokens;
1036 dwt_writeDmaDataFromTBE;
1037 ld_queueMemoryDmaWriteFromTbe;
1038 ut_unsetReissueTimer;
1039 k_popIncomingResponseQueue;
1040 }
1041
1042 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
1043 f_incrementTokens;
1044 cd_writeCleanDataToTbe;
1045 dwt_writeDmaDataFromTBE;
1046 ld_queueMemoryDmaWriteFromTbe;
1047 ut_unsetReissueTimer;
1048 k_popIncomingResponseQueue;
1049 }
1050
1051 transition(O_DW_W, Memory_Ack, O) {
1052 da_sendDmaAck;
1053 s_deallocateTBE;
1054 l_popMemQueue;
1055 }
1056
1057 //
1058 // Trans. from NO
1059 // The direcotry does not have valid data, but may have some tokens
1060 //
1061 transition(NO, GETX) {
1062 a_sendTokens;
1063 j_popIncomingRequestQueue;
1064 }
1065
1066 transition(NO, DMA_WRITE, NO_DW) {
1067 vd_allocateDmaRequestInTBE;
1068 bw_broadcastWrite;
1069 st_scheduleTimeout;
1070 p_popDmaRequestQueue;
1071 }
1072
1073 transition(NO, GETS) {
1074 j_popIncomingRequestQueue;
1075 }
1076
1077 transition(NO, DMA_READ, NO_DR) {
1078 vd_allocateDmaRequestInTBE;
1079 br_broadcastRead;
1080 st_scheduleTimeout;
1081 p_popDmaRequestQueue;
1082 }
1083
1084 transition(NO, Lockdown, L) {
1085 aa_sendTokensToStarver;
1086 l_popIncomingPersistentQueue;
1087 }
1088
1089 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1090 m_writeDataToMemory;
1091 f_incrementTokens;
1092 lq_queueMemoryWbRequest;
1093 k_popIncomingResponseQueue;
1094 }
1095
1096 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1097 n_checkData;
1098 f_incrementTokens;
1099 k_popIncomingResponseQueue;
1100 }
1101
1102 transition(NO, Tokens) {
1103 f_incrementTokens;
1104 k_popIncomingResponseQueue;
1105 }
1106
1107 transition(NO_W, Memory_Data, NO) {
1108 d_sendMemoryDataWithAllTokens;
1109 l_popMemQueue;
1110 }
1111
1112 // Trans. from NO_DW
1113 transition(NO_DW, Request_Timeout) {
1114 ut_unsetReissueTimer;
1115 px_tryIssuingPersistentGETXRequest;
1116 }
1117
1118 transition(NO_DW, Lockdown, DW_L) {
1119 aa_sendTokensToStarver;
1120 l_popIncomingPersistentQueue;
1121 }
1122
1123 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1124 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1125 // directory does not have valid data
1126
1127 transition(NO_DW, Data_Owner, O_DW) {
1128 f_incrementTokens;
1129 rd_recordDataInTbe;
1130 k_popIncomingResponseQueue;
1131 }
1132
1133 transition({NO_DW, NO_DR}, Tokens) {
1134 f_incrementTokens;
1135 k_popIncomingResponseQueue;
1136 }
1137
1138 // Trans. from NO_DR
1139 transition(NO_DR, Request_Timeout) {
1140 ut_unsetReissueTimer;
1141 ps_tryIssuingPersistentGETSRequest;
1142 }
1143
1144 transition(NO_DR, Lockdown, DR_L) {
1145 aa_sendTokensToStarver;
1146 l_popIncomingPersistentQueue;
1147 }
1148
1149 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1150 m_writeDataToMemory;
1151 f_incrementTokens;
1152 dd_sendDmaData;
1153 lr_queueMemoryDmaReadWriteback;
1154 ut_unsetReissueTimer;
1155 s_deallocateTBE;
1156 k_popIncomingResponseQueue;
1157 }
1158
1159 // Trans. from L
1160 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1161 j_popIncomingRequestQueue;
1162 }
1163
1164 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1165 l_popIncomingPersistentQueue;
1166 }
1167
1168 //
1169 // Received data for lockdown blocks
1170 // For blocks with outstanding dma requests to them
1171 // ...we could change this to write the data to memory and send it cleanly
1172 // ...we could also proactively complete our DMA requests
1173 // However, to keep my mind from spinning out-of-control, we won't for now :)
1174 //
1175 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1176 r_bounceResponse;
1177 k_popIncomingResponseQueue;
1178 }
1179
1180 transition({DW_L, DR_L, L}, Tokens) {
1181 r_bounceResponse;
1182 k_popIncomingResponseQueue;
1183 }
1184
1185 transition({DW_L, DR_L, L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1186 bd_bounceDatalessOwnerToken;
1187 k_popIncomingResponseQueue;
1188 }
1189
1190 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1191 l_popIncomingPersistentQueue;
1192 }
1193
1194 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1195 l_popIncomingPersistentQueue;
1196 }
1197
1198 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1199 dd_sendMemDataToStarver;
1200 l_popMemQueue;
1201 }
1202
1203 transition(L_O_W, Memory_Ack) {
1204 qp_queueMemoryForPersistent;
1205 l_popMemQueue;
1206 }
1207
1208 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1209 l_popIncomingPersistentQueue;
1210 }
1211
1212 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1213 l_popIncomingPersistentQueue;
1214 }
1215
1216 transition(DR_L_W, Memory_Data, DR_L) {
1217 dd_sendMemDataToStarver;
1218 l_popMemQueue;
1219 }
1220
1221 transition(DW_L_W, Memory_Ack, L) {
1222 aat_assertAllTokens;
1223 da_sendDmaAck;
1224 s_deallocateTBE;
1225 dd_sendMemDataToStarver;
1226 l_popMemQueue;
1227 }
1228
1229 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1230 l_popIncomingPersistentQueue;
1231 }
1232
1233 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1234 l_popIncomingPersistentQueue;
1235 }
1236
1237 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1238 l_popIncomingPersistentQueue;
1239 }
1240
1241 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1242 ut_unsetReissueTimer;
1243 px_tryIssuingPersistentGETXRequest;
1244 }
1245
1246 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1247 l_popIncomingPersistentQueue;
1248 }
1249
1250 transition(DR_L, Request_Timeout) {
1251 ut_unsetReissueTimer;
1252 ps_tryIssuingPersistentGETSRequest;
1253 }
1254
1255 //
1256 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1257 // presistent request is issued and resolve before memory returns with data
1258 //
1259 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1260 l_popMemQueue;
1261 }
1262
1263 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1264 l_popIncomingPersistentQueue;
1265 }
1266
1267 // Blocked states
1268 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1269 z_recycleRequest;
1270 }
1271
1272 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1273 y_recycleDmaRequestQueue;
1274 }
1275
1276 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1277 kz_recycleResponse;
1278 }
1279
1280 //
1281 // If we receive a request timeout while waiting for memory, it is likely that
1282 // the request will be satisfied and issuing a presistent request will do us
1283 // no good. Just wait.
1284 //
1285 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1286 rs_resetScheduleTimeout;
1287 }
1288
1289 transition(NO_W, Lockdown, L_NO_W) {
1290 l_popIncomingPersistentQueue;
1291 }
1292
1293 transition(O_W, Lockdown, L_O_W) {
1294 l_popIncomingPersistentQueue;
1295 }
1296
1297 transition(O_DR_W, Lockdown, DR_L_W) {
1298 l_popIncomingPersistentQueue;
1299 }
1300
1301 transition(O_DW_W, Lockdown, DW_L_W) {
1302 l_popIncomingPersistentQueue;
1303 }
1304
1305 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1306 l_popIncomingPersistentQueue;
1307 }
1308 }