x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34
35 machine(Directory, "Token protocol")
36 : DirectoryMemory * directory,
37 MemoryControl * memBuffer,
38 int l2_select_num_bits,
39 Cycles directory_latency = 5,
40 bool distributed_persistent = true,
41 Cycles fixed_timeout_latency = 100,
42 Cycles reissue_wakeup_latency = 10
43 {
44
45 MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
47 MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
48 MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
49
50 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
51 MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
52 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request";
53 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
54
55 // STATES
56 state_declaration(State, desc="Directory states", default="Directory_State_O") {
57 // Base states
58 O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
59 NO, AccessPermission:Maybe_Stale, desc="Not Owner";
60 L, AccessPermission:Busy, desc="Locked";
61
62 // Memory wait states - can block all messages including persistent requests
63 O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
64 L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
65 L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
66 DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
67 DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
68 NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
69 O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
70 O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
71
72 // DMA request transient states - must respond to persistent requests
73 O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
74 NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
75 NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
76
77 // DMA request in progress - competing with a CPU persistent request
78 DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
79 DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
80
81 }
82
83 // Events
84 enumeration(Event, desc="Directory events") {
85 GETX, desc="A GETX arrives";
86 GETS, desc="A GETS arrives";
87 Lockdown, desc="A lockdown request arrives";
88 Unlockdown, desc="An un-lockdown request arrives";
89 Own_Lock_or_Unlock, desc="own lock or unlock";
90 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
91 Data_Owner, desc="Data arrive";
92 Data_All_Tokens, desc="Data and all tokens";
93 Ack_Owner, desc="Owner token arrived without data because it was clean";
94 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
95 Tokens, desc="Tokens arrive";
96 Ack_All_Tokens, desc="All_Tokens arrive";
97 Request_Timeout, desc="A DMA request has timed out";
98
99 // Memory Controller
100 Memory_Data, desc="Fetched data from memory arrives";
101 Memory_Ack, desc="Writeback Ack from memory arrives";
102
103 // DMA requests
104 DMA_READ, desc="A DMA Read memory request";
105 DMA_WRITE, desc="A DMA Write memory request";
106 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
107 }
108
109 // TYPES
110
111 // DirectoryEntry
112 structure(Entry, desc="...", interface="AbstractEntry") {
113 State DirectoryState, desc="Directory state";
114 DataBlock DataBlk, desc="data for the block";
115 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
116
117 // The following state is provided to allow for bandwidth
118 // efficient directory-like operation. However all of this state
119 // is 'soft state' that does not need to be correct (as long as
120 // you're eventually willing to resort to broadcast.)
121
122 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
123 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
124 }
125
126 structure(PersistentTable, external="yes") {
127 void persistentRequestLock(Address, MachineID, AccessType);
128 void persistentRequestUnlock(Address, MachineID);
129 bool okToIssueStarving(Address, MachineID);
130 MachineID findSmallest(Address);
131 AccessType typeOfSmallest(Address);
132 void markEntries(Address);
133 bool isLocked(Address);
134 int countStarvingForAddress(Address);
135 int countReadStarvingForAddress(Address);
136 }
137
138 // TBE entries for DMA requests
139 structure(TBE, desc="TBE entries for outstanding DMA requests") {
140 Address PhysicalAddress, desc="physical address";
141 State TBEState, desc="Transient State";
142 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
143 DataBlock DataBlk, desc="The current view of system memory";
144 int Len, desc="...";
145 MachineID DmaRequestor, desc="DMA requestor";
146 bool WentPersistent, desc="Did the DMA request require a persistent request";
147 }
148
149 structure(TBETable, external="yes") {
150 TBE lookup(Address);
151 void allocate(Address);
152 void deallocate(Address);
153 bool isPresent(Address);
154 }
155
156 // ** OBJECTS **
157
158 PersistentTable persistentTable;
159 TimerTable reissueTimerTable;
160
161 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
162
163 bool starving, default="false";
164 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
165
166 void set_tbe(TBE b);
167 void unset_tbe();
168
169 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
170 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
171
172 if (is_valid(dir_entry)) {
173 return dir_entry;
174 }
175
176 dir_entry := static_cast(Entry, "pointer",
177 directory.allocate(addr, new Entry));
178 return dir_entry;
179 }
180
181 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
182 return getDirectoryEntry(addr).DataBlk;
183 }
184
185 State getState(TBE tbe, Address addr) {
186 if (is_valid(tbe)) {
187 return tbe.TBEState;
188 } else {
189 return getDirectoryEntry(addr).DirectoryState;
190 }
191 }
192
193 void setState(TBE tbe, Address addr, State state) {
194 if (is_valid(tbe)) {
195 tbe.TBEState := state;
196 }
197 getDirectoryEntry(addr).DirectoryState := state;
198
199 if (state == State:L || state == State:DW_L || state == State:DR_L) {
200 assert(getDirectoryEntry(addr).Tokens == 0);
201 }
202
203 // We have one or zero owners
204 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
205
206 // Make sure the token count is in range
207 assert(getDirectoryEntry(addr).Tokens >= 0);
208 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
209
210 if (state == State:O || state == State:O_W || state == State:O_DW) {
211 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
212 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
213 }
214 }
215
216 AccessPermission getAccessPermission(Address addr) {
217 TBE tbe := TBEs[addr];
218 if(is_valid(tbe)) {
219 return Directory_State_to_permission(tbe.TBEState);
220 }
221
222 if (directory.isPresent(addr)) {
223 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
224 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
225 }
226
227 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
228 return AccessPermission:NotPresent;
229 }
230
231 void setAccessPermission(Address addr, State state) {
232 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
233 }
234
235 bool okToIssueStarving(Address addr, MachineID machinID) {
236 return persistentTable.okToIssueStarving(addr, machineID);
237 }
238
239 void markPersistentEntries(Address addr) {
240 persistentTable.markEntries(addr);
241 }
242
243 // ** OUT_PORTS **
244 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
245 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
246 out_port(requestNetwork_out, RequestMsg, requestFromDir);
247 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
248
249 //
250 // Memory buffer for memory controller to DIMM communication
251 //
252 out_port(memQueue_out, MemoryMsg, memBuffer);
253
254 // ** IN_PORTS **
255
256 // off-chip memory request/response is done
257 in_port(memQueue_in, MemoryMsg, memBuffer) {
258 if (memQueue_in.isReady()) {
259 peek(memQueue_in, MemoryMsg) {
260 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
261 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
262 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
263 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
264 } else {
265 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
266 error("Invalid message");
267 }
268 }
269 }
270 }
271
272 // Reissue Timer
273 in_port(reissueTimerTable_in, Address, reissueTimerTable) {
274 if (reissueTimerTable_in.isReady()) {
275 trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
276 TBEs[reissueTimerTable.readyAddress()]);
277 }
278 }
279
280 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
281 if (responseNetwork_in.isReady()) {
282 peek(responseNetwork_in, ResponseMsg) {
283 assert(in_msg.Destination.isElement(machineID));
284 if (getDirectoryEntry(in_msg.Address).Tokens + in_msg.Tokens == max_tokens()) {
285 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
286 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
287 trigger(Event:Data_All_Tokens, in_msg.Address,
288 TBEs[in_msg.Address]);
289 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
290 trigger(Event:Ack_Owner_All_Tokens, in_msg.Address,
291 TBEs[in_msg.Address]);
292 } else if (in_msg.Type == CoherenceResponseType:ACK) {
293 trigger(Event:Ack_All_Tokens, in_msg.Address,
294 TBEs[in_msg.Address]);
295 } else {
296 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
297 error("Invalid message");
298 }
299 } else {
300 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
301 trigger(Event:Data_Owner, in_msg.Address,
302 TBEs[in_msg.Address]);
303 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
304 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
305 trigger(Event:Tokens, in_msg.Address,
306 TBEs[in_msg.Address]);
307 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
308 trigger(Event:Ack_Owner, in_msg.Address,
309 TBEs[in_msg.Address]);
310 } else {
311 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
312 error("Invalid message");
313 }
314 }
315 }
316 }
317 }
318
319 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
320 if (persistentNetwork_in.isReady()) {
321 peek(persistentNetwork_in, PersistentMsg) {
322 assert(in_msg.Destination.isElement(machineID));
323
324 if (distributed_persistent) {
325 // Apply the lockdown or unlockdown message to the table
326 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
327 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
328 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
329 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
330 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
331 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
332 } else {
333 error("Invalid message");
334 }
335
336 // React to the message based on the current state of the table
337 if (persistentTable.isLocked(in_msg.Address)) {
338 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
339 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
340 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
341 TBEs[in_msg.Address]);
342 } else {
343 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
344 TBEs[in_msg.Address]);
345 }
346 } else {
347 // locked
348 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
349 }
350 } else {
351 // unlocked
352 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
353 }
354 }
355 else {
356 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
357 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
358 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
359 TBEs[in_msg.Address]);
360 } else {
361 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
362 TBEs[in_msg.Address]);
363 }
364 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
365 // locked
366 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
367 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
368 // locked
369 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
370 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
371 // unlocked
372 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
373 } else {
374 error("Invalid message");
375 }
376 }
377 }
378 }
379 }
380
381 in_port(requestNetwork_in, RequestMsg, requestToDir) {
382 if (requestNetwork_in.isReady()) {
383 peek(requestNetwork_in, RequestMsg) {
384 assert(in_msg.Destination.isElement(machineID));
385 if (in_msg.Type == CoherenceRequestType:GETS) {
386 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
387 } else if (in_msg.Type == CoherenceRequestType:GETX) {
388 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
389 } else {
390 error("Invalid message");
391 }
392 }
393 }
394 }
395
396 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
397 if (dmaRequestQueue_in.isReady()) {
398 peek(dmaRequestQueue_in, DMARequestMsg) {
399 if (in_msg.Type == DMARequestType:READ) {
400 trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
401 } else if (in_msg.Type == DMARequestType:WRITE) {
402 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
403 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
404 TBEs[in_msg.LineAddress]);
405 } else {
406 trigger(Event:DMA_WRITE, in_msg.LineAddress,
407 TBEs[in_msg.LineAddress]);
408 }
409 } else {
410 error("Invalid message");
411 }
412 }
413 }
414 }
415
416 // Actions
417
418 action(a_sendTokens, "a", desc="Send tokens to requestor") {
419 // Only send a message if we have tokens to send
420 if (getDirectoryEntry(address).Tokens > 0) {
421 peek(requestNetwork_in, RequestMsg) {
422 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
423 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
424 out_msg.Address := address;
425 out_msg.Type := CoherenceResponseType:ACK;
426 out_msg.Sender := machineID;
427 out_msg.Destination.add(in_msg.Requestor);
428 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
429 out_msg.MessageSize := MessageSizeType:Response_Control;
430 }
431 }
432 getDirectoryEntry(address).Tokens := 0;
433 }
434 }
435
436 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
437 if (okToIssueStarving(address, machineID) && (starving == false)) {
438 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
439 out_msg.Address := address;
440 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
441 out_msg.Requestor := machineID;
442 out_msg.Destination.broadcast(MachineType:L1Cache);
443
444 //
445 // Currently the configuration system limits the system to only one
446 // chip. Therefore, if we assume one shared L2 cache, then only one
447 // pertinent L2 cache exist.
448 //
449 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
450
451 out_msg.Destination.add(mapAddressToRange(address,
452 MachineType:L2Cache,
453 l2_select_low_bit,
454 l2_select_num_bits));
455
456 out_msg.Destination.add(map_Address_to_Directory(address));
457 out_msg.MessageSize := MessageSizeType:Persistent_Control;
458 out_msg.Prefetch := PrefetchBit:No;
459 out_msg.AccessMode := RubyAccessMode:Supervisor;
460 }
461 markPersistentEntries(address);
462 starving := true;
463
464 tbe.WentPersistent := true;
465
466 // Do not schedule a wakeup, a persistent requests will always complete
467 } else {
468
469 // We'd like to issue a persistent request, but are not allowed
470 // to issue a P.R. right now. This, we do not increment the
471 // IssueCount.
472
473 // Set a wakeup timer
474 reissueTimerTable.set(address, reissue_wakeup_latency);
475 }
476 }
477
478 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
479 peek(dmaRequestQueue_in, DMARequestMsg) {
480 //
481 // Assser that we only send message if we don't already have all the tokens
482 //
483 assert(getDirectoryEntry(address).Tokens != max_tokens());
484 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
485 out_msg.Address := address;
486 out_msg.Type := CoherenceRequestType:GETX;
487 out_msg.Requestor := machineID;
488
489 //
490 // Since only one chip, assuming all L1 caches are local
491 //
492 out_msg.Destination.broadcast(MachineType:L1Cache);
493 out_msg.Destination.add(mapAddressToRange(address,
494 MachineType:L2Cache,
495 l2_select_low_bit,
496 l2_select_num_bits));
497
498 out_msg.RetryNum := 0;
499 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
500 out_msg.Prefetch := PrefetchBit:No;
501 out_msg.AccessMode := RubyAccessMode:Supervisor;
502 }
503 }
504 }
505
506 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
507 if (okToIssueStarving(address, machineID) && (starving == false)) {
508 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
509 out_msg.Address := address;
510 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
511 out_msg.Requestor := machineID;
512 out_msg.Destination.broadcast(MachineType:L1Cache);
513
514 //
515 // Currently the configuration system limits the system to only one
516 // chip. Therefore, if we assume one shared L2 cache, then only one
517 // pertinent L2 cache exist.
518 //
519 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
520
521 out_msg.Destination.add(mapAddressToRange(address,
522 MachineType:L2Cache,
523 l2_select_low_bit,
524 l2_select_num_bits));
525
526 out_msg.Destination.add(map_Address_to_Directory(address));
527 out_msg.MessageSize := MessageSizeType:Persistent_Control;
528 out_msg.Prefetch := PrefetchBit:No;
529 out_msg.AccessMode := RubyAccessMode:Supervisor;
530 }
531 markPersistentEntries(address);
532 starving := true;
533
534 tbe.WentPersistent := true;
535
536 // Do not schedule a wakeup, a persistent requests will always complete
537 } else {
538
539 // We'd like to issue a persistent request, but are not allowed
540 // to issue a P.R. right now. This, we do not increment the
541 // IssueCount.
542
543 // Set a wakeup timer
544 reissueTimerTable.set(address, reissue_wakeup_latency);
545 }
546 }
547
548 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
549 peek(dmaRequestQueue_in, DMARequestMsg) {
550 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
551 out_msg.Address := address;
552 out_msg.Type := CoherenceRequestType:GETS;
553 out_msg.Requestor := machineID;
554
555 //
556 // Since only one chip, assuming all L1 caches are local
557 //
558 out_msg.Destination.broadcast(MachineType:L1Cache);
559 out_msg.Destination.add(mapAddressToRange(address,
560 MachineType:L2Cache,
561 l2_select_low_bit,
562 l2_select_num_bits));
563
564 out_msg.RetryNum := 0;
565 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
566 out_msg.Prefetch := PrefetchBit:No;
567 out_msg.AccessMode := RubyAccessMode:Supervisor;
568 }
569 }
570 }
571
572 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
573 // Only send a message if we have tokens to send
574 if (getDirectoryEntry(address).Tokens > 0) {
575 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
576 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
577 out_msg.Address := address;
578 out_msg.Type := CoherenceResponseType:ACK;
579 out_msg.Sender := machineID;
580 out_msg.Destination.add(persistentTable.findSmallest(address));
581 out_msg.Tokens := getDirectoryEntry(address).Tokens;
582 out_msg.MessageSize := MessageSizeType:Response_Control;
583 }
584 getDirectoryEntry(address).Tokens := 0;
585 }
586 }
587
588 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
589 peek(memQueue_in, MemoryMsg) {
590 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
591 out_msg.Address := address;
592 out_msg.Type := CoherenceResponseType:DATA_OWNER;
593 out_msg.Sender := machineID;
594 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
595 assert(getDirectoryEntry(address).Tokens > 0);
596 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
597 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
598 out_msg.Dirty := false;
599 out_msg.MessageSize := MessageSizeType:Response_Data;
600 }
601 }
602 getDirectoryEntry(address).Tokens := 0;
603 }
604
605 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
606 peek(memQueue_in, MemoryMsg) {
607 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
608 out_msg.Address := address;
609 out_msg.Type := CoherenceResponseType:DATA_OWNER;
610 out_msg.Sender := machineID;
611 out_msg.Destination.add(persistentTable.findSmallest(address));
612 assert(getDirectoryEntry(address).Tokens > 0);
613 out_msg.Tokens := getDirectoryEntry(address).Tokens;
614 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
615 out_msg.Dirty := false;
616 out_msg.MessageSize := MessageSizeType:Response_Data;
617 }
618 }
619 getDirectoryEntry(address).Tokens := 0;
620 }
621
622 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
623 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
624 out_msg.Address := address;
625 out_msg.Type := CoherenceResponseType:DATA_OWNER;
626 out_msg.Sender := machineID;
627 out_msg.Destination.add(persistentTable.findSmallest(address));
628 assert(getDirectoryEntry(address).Tokens > 0);
629 out_msg.Tokens := getDirectoryEntry(address).Tokens;
630 out_msg.DataBlk := tbe.DataBlk;
631 out_msg.Dirty := false;
632 out_msg.MessageSize := MessageSizeType:Response_Data;
633 }
634 getDirectoryEntry(address).Tokens := 0;
635 }
636
637 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
638 peek(requestNetwork_in, RequestMsg) {
639 enqueue(memQueue_out, MemoryMsg, latency="1") {
640 out_msg.Address := address;
641 out_msg.Type := MemoryRequestType:MEMORY_READ;
642 out_msg.Sender := machineID;
643 out_msg.OriginalRequestorMachId := in_msg.Requestor;
644 out_msg.MessageSize := in_msg.MessageSize;
645 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
646 DPRINTF(RubySlicc, "%s\n", out_msg);
647 }
648 }
649 }
650
651 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
652 enqueue(memQueue_out, MemoryMsg, latency="1") {
653 out_msg.Address := address;
654 out_msg.Type := MemoryRequestType:MEMORY_READ;
655 out_msg.Sender := machineID;
656 out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
657 out_msg.MessageSize := MessageSizeType:Request_Control;
658 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
659 DPRINTF(RubySlicc, "%s\n", out_msg);
660 }
661 }
662
663 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
664 peek(dmaRequestQueue_in, DMARequestMsg) {
665 enqueue(memQueue_out, MemoryMsg, latency="1") {
666 out_msg.Address := address;
667 out_msg.Type := MemoryRequestType:MEMORY_READ;
668 out_msg.Sender := machineID;
669 out_msg.OriginalRequestorMachId := in_msg.Requestor;
670 out_msg.MessageSize := in_msg.MessageSize;
671 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
672 DPRINTF(RubySlicc, "%s\n", out_msg);
673 }
674 }
675 }
676
677 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
678 enqueue(memQueue_out, MemoryMsg, latency="1") {
679 out_msg.Address := address;
680 out_msg.Type := MemoryRequestType:MEMORY_WB;
681 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
682 DPRINTF(RubySlicc, "%s\n", out_msg);
683 }
684 }
685
686 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
687 enqueue(memQueue_out, MemoryMsg, latency="1") {
688 out_msg.Address := address;
689 out_msg.Type := MemoryRequestType:MEMORY_WB;
690 // first, initialize the data blk to the current version of system memory
691 out_msg.DataBlk := tbe.DataBlk;
692 // then add the dma write data
693 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
694 DPRINTF(RubySlicc, "%s\n", out_msg);
695 }
696 }
697
698 action(lr_queueMemoryDmaReadWriteback, "lr", desc="Write DMA data from read to memory") {
699 enqueue(memQueue_out, MemoryMsg, latency="1") {
700 out_msg.Address := address;
701 out_msg.Type := MemoryRequestType:MEMORY_WB;
702 // first, initialize the data blk to the current version of system memory
703 out_msg.DataBlk := tbe.DataBlk;
704 DPRINTF(RubySlicc, "%s\n", out_msg);
705 }
706 }
707
708 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
709 peek(dmaRequestQueue_in, DMARequestMsg) {
710 TBEs.allocate(address);
711 set_tbe(TBEs[address]);
712 tbe.DmaDataBlk := in_msg.DataBlk;
713 tbe.PhysicalAddress := in_msg.PhysicalAddress;
714 tbe.Len := in_msg.Len;
715 tbe.DmaRequestor := in_msg.Requestor;
716 tbe.WentPersistent := false;
717 }
718 }
719
720 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
721
722 if (tbe.WentPersistent) {
723 assert(starving == true);
724
725 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
726 out_msg.Address := address;
727 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
728 out_msg.Requestor := machineID;
729 out_msg.Destination.broadcast(MachineType:L1Cache);
730
731 //
732 // Currently the configuration system limits the system to only one
733 // chip. Therefore, if we assume one shared L2 cache, then only one
734 // pertinent L2 cache exist.
735 //
736 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
737
738 out_msg.Destination.add(mapAddressToRange(address,
739 MachineType:L2Cache,
740 l2_select_low_bit,
741 l2_select_num_bits));
742
743 out_msg.Destination.add(map_Address_to_Directory(address));
744 out_msg.MessageSize := MessageSizeType:Persistent_Control;
745 }
746 starving := false;
747 }
748
749 TBEs.deallocate(address);
750 unset_tbe();
751 }
752
753 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
754 peek(responseNetwork_in, ResponseMsg) {
755 tbe.DataBlk := in_msg.DataBlk;
756 }
757 }
758
759 action(cd_writeCleanDataToTbe, "cd", desc="Write clean memory data to TBE") {
760 tbe.DataBlk := getDirectoryEntry(address).DataBlk;
761 }
762
763 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
764 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
765 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
766 }
767
768 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
769 peek(responseNetwork_in, ResponseMsg) {
770 assert(in_msg.Tokens >= 1);
771 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
772 }
773 }
774
775 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
776 assert(getDirectoryEntry(address).Tokens == max_tokens());
777 }
778
779 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
780 requestNetwork_in.dequeue();
781 }
782
783 action(z_recycleRequest, "z", desc="Recycle the request queue") {
784 requestNetwork_in.recycle();
785 }
786
787 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
788 responseNetwork_in.dequeue();
789 }
790
791 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
792 responseNetwork_in.recycle();
793 }
794
795 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
796 persistentNetwork_in.dequeue();
797 }
798
799 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
800 dmaRequestQueue_in.dequeue();
801 }
802
803 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
804 dmaRequestQueue_in.recycle();
805 }
806
807 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
808 memQueue_in.dequeue();
809 }
810
811 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
812 peek(responseNetwork_in, ResponseMsg) {
813 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
814 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
815 in_msg.Address, in_msg.DataBlk);
816 }
817 }
818
819 action(n_checkData, "n", desc="Check incoming clean data message") {
820 peek(responseNetwork_in, ResponseMsg) {
821 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
822 }
823 }
824
825 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
826 peek(responseNetwork_in, ResponseMsg) {
827 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
828 out_msg.Address := address;
829 out_msg.Type := in_msg.Type;
830 out_msg.Sender := machineID;
831 out_msg.Destination.add(persistentTable.findSmallest(address));
832 out_msg.Tokens := in_msg.Tokens;
833 out_msg.MessageSize := in_msg.MessageSize;
834 out_msg.DataBlk := in_msg.DataBlk;
835 out_msg.Dirty := in_msg.Dirty;
836 }
837 }
838 }
839
840 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
841 //
842 // currently only support a fixed timeout latency
843 //
844 if (reissueTimerTable.isSet(address)) {
845 reissueTimerTable.unset(address);
846 reissueTimerTable.set(address, fixed_timeout_latency);
847 }
848 }
849
850 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
851 //
852 // currently only support a fixed timeout latency
853 //
854 reissueTimerTable.set(address, fixed_timeout_latency);
855 }
856
857 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
858 if (reissueTimerTable.isSet(address)) {
859 reissueTimerTable.unset(address);
860 }
861 }
862
863 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
864 peek(responseNetwork_in, ResponseMsg) {
865 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
866 assert(in_msg.Dirty == false);
867 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
868
869 // NOTE: The following check would not be valid in a real
870 // implementation. We include the data in the "dataless"
871 // message so we can assert the clean data matches the datablock
872 // in memory
873 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
874
875 // Bounce the message, but "re-associate" the data and the owner
876 // token. In essence we're converting an ACK_OWNER message to a
877 // DATA_OWNER message, keeping the number of tokens the same.
878 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
879 out_msg.Address := address;
880 out_msg.Type := CoherenceResponseType:DATA_OWNER;
881 out_msg.Sender := machineID;
882 out_msg.Destination.add(persistentTable.findSmallest(address));
883 out_msg.Tokens := in_msg.Tokens;
884 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
885 out_msg.Dirty := in_msg.Dirty;
886 out_msg.MessageSize := MessageSizeType:Response_Data;
887 }
888 }
889 }
890
891 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
892 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
893 out_msg.PhysicalAddress := address;
894 out_msg.LineAddress := address;
895 out_msg.Type := DMAResponseType:ACK;
896 out_msg.Destination.add(tbe.DmaRequestor);
897 out_msg.MessageSize := MessageSizeType:Writeback_Control;
898 }
899 }
900
901 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
902 peek(memQueue_in, MemoryMsg) {
903 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
904 out_msg.PhysicalAddress := address;
905 out_msg.LineAddress := address;
906 out_msg.Type := DMAResponseType:DATA;
907 //
908 // we send the entire data block and rely on the dma controller to
909 // split it up if need be
910 //
911 out_msg.DataBlk := in_msg.DataBlk;
912 out_msg.Destination.add(tbe.DmaRequestor);
913 out_msg.MessageSize := MessageSizeType:Response_Data;
914 }
915 }
916 }
917
918 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
919 peek(responseNetwork_in, ResponseMsg) {
920 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
921 out_msg.PhysicalAddress := address;
922 out_msg.LineAddress := address;
923 out_msg.Type := DMAResponseType:DATA;
924 //
925 // we send the entire data block and rely on the dma controller to
926 // split it up if need be
927 //
928 out_msg.DataBlk := in_msg.DataBlk;
929 out_msg.Destination.add(tbe.DmaRequestor);
930 out_msg.MessageSize := MessageSizeType:Response_Data;
931 }
932 }
933 }
934
935 // TRANSITIONS
936
937 //
938 // Trans. from base state O
939 // the directory has valid data
940 //
941 transition(O, GETX, NO_W) {
942 qf_queueMemoryFetchRequest;
943 j_popIncomingRequestQueue;
944 }
945
946 transition(O, DMA_WRITE, O_DW) {
947 vd_allocateDmaRequestInTBE;
948 cd_writeCleanDataToTbe;
949 bw_broadcastWrite;
950 st_scheduleTimeout;
951 p_popDmaRequestQueue;
952 }
953
954 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
955 vd_allocateDmaRequestInTBE;
956 cd_writeCleanDataToTbe;
957 dwt_writeDmaDataFromTBE;
958 ld_queueMemoryDmaWriteFromTbe;
959 p_popDmaRequestQueue;
960 }
961
962 transition(O, GETS, NO_W) {
963 qf_queueMemoryFetchRequest;
964 j_popIncomingRequestQueue;
965 }
966
967 transition(O, DMA_READ, O_DR_W) {
968 vd_allocateDmaRequestInTBE;
969 fd_memoryDma;
970 st_scheduleTimeout;
971 p_popDmaRequestQueue;
972 }
973
974 transition(O, Lockdown, L_O_W) {
975 qp_queueMemoryForPersistent;
976 l_popIncomingPersistentQueue;
977 }
978
979 transition(O, {Tokens, Ack_All_Tokens}) {
980 f_incrementTokens;
981 k_popIncomingResponseQueue;
982 }
983
984 transition(O, {Data_Owner, Data_All_Tokens}) {
985 n_checkData;
986 f_incrementTokens;
987 k_popIncomingResponseQueue;
988 }
989
990 transition({O, NO}, Unlockdown) {
991 l_popIncomingPersistentQueue;
992 }
993
994 //
995 // transitioning to Owner, waiting for memory before DMA ack
996 // All other events should recycle/stall
997 //
998 transition(O_DR_W, Memory_Data, O) {
999 dm_sendMemoryDataToDma;
1000 ut_unsetReissueTimer;
1001 s_deallocateTBE;
1002 l_popMemQueue;
1003 }
1004
1005 //
1006 // issued GETX for DMA write, waiting for all tokens
1007 //
1008 transition(O_DW, Request_Timeout) {
1009 ut_unsetReissueTimer;
1010 px_tryIssuingPersistentGETXRequest;
1011 }
1012
1013 transition(O_DW, Tokens) {
1014 f_incrementTokens;
1015 k_popIncomingResponseQueue;
1016 }
1017
1018 transition(O_DW, Data_Owner) {
1019 f_incrementTokens;
1020 rd_recordDataInTbe;
1021 k_popIncomingResponseQueue;
1022 }
1023
1024 transition(O_DW, Ack_Owner) {
1025 f_incrementTokens;
1026 cd_writeCleanDataToTbe;
1027 k_popIncomingResponseQueue;
1028 }
1029
1030 transition(O_DW, Lockdown, DW_L) {
1031 de_sendTbeDataToStarver;
1032 l_popIncomingPersistentQueue;
1033 }
1034
1035 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
1036 f_incrementTokens;
1037 rd_recordDataInTbe;
1038 dwt_writeDmaDataFromTBE;
1039 ld_queueMemoryDmaWriteFromTbe;
1040 ut_unsetReissueTimer;
1041 k_popIncomingResponseQueue;
1042 }
1043
1044 transition(O_DW, Ack_All_Tokens, O_DW_W) {
1045 f_incrementTokens;
1046 dwt_writeDmaDataFromTBE;
1047 ld_queueMemoryDmaWriteFromTbe;
1048 ut_unsetReissueTimer;
1049 k_popIncomingResponseQueue;
1050 }
1051
1052 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
1053 f_incrementTokens;
1054 cd_writeCleanDataToTbe;
1055 dwt_writeDmaDataFromTBE;
1056 ld_queueMemoryDmaWriteFromTbe;
1057 ut_unsetReissueTimer;
1058 k_popIncomingResponseQueue;
1059 }
1060
1061 transition(O_DW_W, Memory_Ack, O) {
1062 da_sendDmaAck;
1063 s_deallocateTBE;
1064 l_popMemQueue;
1065 }
1066
1067 //
1068 // Trans. from NO
1069 // The direcotry does not have valid data, but may have some tokens
1070 //
1071 transition(NO, GETX) {
1072 a_sendTokens;
1073 j_popIncomingRequestQueue;
1074 }
1075
1076 transition(NO, DMA_WRITE, NO_DW) {
1077 vd_allocateDmaRequestInTBE;
1078 bw_broadcastWrite;
1079 st_scheduleTimeout;
1080 p_popDmaRequestQueue;
1081 }
1082
1083 transition(NO, GETS) {
1084 j_popIncomingRequestQueue;
1085 }
1086
1087 transition(NO, DMA_READ, NO_DR) {
1088 vd_allocateDmaRequestInTBE;
1089 br_broadcastRead;
1090 st_scheduleTimeout;
1091 p_popDmaRequestQueue;
1092 }
1093
1094 transition(NO, Lockdown, L) {
1095 aa_sendTokensToStarver;
1096 l_popIncomingPersistentQueue;
1097 }
1098
1099 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1100 m_writeDataToMemory;
1101 f_incrementTokens;
1102 lq_queueMemoryWbRequest;
1103 k_popIncomingResponseQueue;
1104 }
1105
1106 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1107 n_checkData;
1108 f_incrementTokens;
1109 k_popIncomingResponseQueue;
1110 }
1111
1112 transition(NO, Tokens) {
1113 f_incrementTokens;
1114 k_popIncomingResponseQueue;
1115 }
1116
1117 transition(NO_W, Memory_Data, NO) {
1118 d_sendMemoryDataWithAllTokens;
1119 l_popMemQueue;
1120 }
1121
1122 // Trans. from NO_DW
1123 transition(NO_DW, Request_Timeout) {
1124 ut_unsetReissueTimer;
1125 px_tryIssuingPersistentGETXRequest;
1126 }
1127
1128 transition(NO_DW, Lockdown, DW_L) {
1129 aa_sendTokensToStarver;
1130 l_popIncomingPersistentQueue;
1131 }
1132
1133 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1134 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1135 // directory does not have valid data
1136
1137 transition(NO_DW, Data_Owner, O_DW) {
1138 f_incrementTokens;
1139 rd_recordDataInTbe;
1140 k_popIncomingResponseQueue;
1141 }
1142
1143 transition({NO_DW, NO_DR}, Tokens) {
1144 f_incrementTokens;
1145 k_popIncomingResponseQueue;
1146 }
1147
1148 // Trans. from NO_DR
1149 transition(NO_DR, Request_Timeout) {
1150 ut_unsetReissueTimer;
1151 ps_tryIssuingPersistentGETSRequest;
1152 }
1153
1154 transition(NO_DR, Lockdown, DR_L) {
1155 aa_sendTokensToStarver;
1156 l_popIncomingPersistentQueue;
1157 }
1158
1159 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1160 m_writeDataToMemory;
1161 f_incrementTokens;
1162 dd_sendDmaData;
1163 lr_queueMemoryDmaReadWriteback;
1164 ut_unsetReissueTimer;
1165 s_deallocateTBE;
1166 k_popIncomingResponseQueue;
1167 }
1168
1169 // Trans. from L
1170 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1171 j_popIncomingRequestQueue;
1172 }
1173
1174 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1175 l_popIncomingPersistentQueue;
1176 }
1177
1178 //
1179 // Received data for lockdown blocks
1180 // For blocks with outstanding dma requests to them
1181 // ...we could change this to write the data to memory and send it cleanly
1182 // ...we could also proactively complete our DMA requests
1183 // However, to keep my mind from spinning out-of-control, we won't for now :)
1184 //
1185 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1186 r_bounceResponse;
1187 k_popIncomingResponseQueue;
1188 }
1189
1190 transition({DW_L, DR_L, L}, Tokens) {
1191 r_bounceResponse;
1192 k_popIncomingResponseQueue;
1193 }
1194
1195 transition({DW_L, DR_L, L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1196 bd_bounceDatalessOwnerToken;
1197 k_popIncomingResponseQueue;
1198 }
1199
1200 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1201 l_popIncomingPersistentQueue;
1202 }
1203
1204 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1205 l_popIncomingPersistentQueue;
1206 }
1207
1208 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1209 dd_sendMemDataToStarver;
1210 l_popMemQueue;
1211 }
1212
1213 transition(L_O_W, Memory_Ack) {
1214 qp_queueMemoryForPersistent;
1215 l_popMemQueue;
1216 }
1217
1218 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1219 l_popIncomingPersistentQueue;
1220 }
1221
1222 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1223 l_popIncomingPersistentQueue;
1224 }
1225
1226 transition(DR_L_W, Memory_Data, DR_L) {
1227 dd_sendMemDataToStarver;
1228 l_popMemQueue;
1229 }
1230
1231 transition(DW_L_W, Memory_Ack, L) {
1232 aat_assertAllTokens;
1233 da_sendDmaAck;
1234 s_deallocateTBE;
1235 dd_sendMemDataToStarver;
1236 l_popMemQueue;
1237 }
1238
1239 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1240 l_popIncomingPersistentQueue;
1241 }
1242
1243 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1244 l_popIncomingPersistentQueue;
1245 }
1246
1247 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1248 l_popIncomingPersistentQueue;
1249 }
1250
1251 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1252 ut_unsetReissueTimer;
1253 px_tryIssuingPersistentGETXRequest;
1254 }
1255
1256 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1257 l_popIncomingPersistentQueue;
1258 }
1259
1260 transition(DR_L, Request_Timeout) {
1261 ut_unsetReissueTimer;
1262 ps_tryIssuingPersistentGETSRequest;
1263 }
1264
1265 //
1266 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1267 // presistent request is issued and resolve before memory returns with data
1268 //
1269 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1270 l_popMemQueue;
1271 }
1272
1273 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1274 l_popIncomingPersistentQueue;
1275 }
1276
1277 // Blocked states
1278 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1279 z_recycleRequest;
1280 }
1281
1282 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1283 y_recycleDmaRequestQueue;
1284 }
1285
1286 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1287 kz_recycleResponse;
1288 }
1289
1290 //
1291 // If we receive a request timeout while waiting for memory, it is likely that
1292 // the request will be satisfied and issuing a presistent request will do us
1293 // no good. Just wait.
1294 //
1295 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1296 rs_resetScheduleTimeout;
1297 }
1298
1299 transition(NO_W, Lockdown, L_NO_W) {
1300 l_popIncomingPersistentQueue;
1301 }
1302
1303 transition(O_W, Lockdown, L_O_W) {
1304 l_popIncomingPersistentQueue;
1305 }
1306
1307 transition(O_DR_W, Lockdown, DR_L_W) {
1308 l_popIncomingPersistentQueue;
1309 }
1310
1311 transition(O_DW_W, Lockdown, DW_L_W) {
1312 l_popIncomingPersistentQueue;
1313 }
1314
1315 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1316 l_popIncomingPersistentQueue;
1317 }
1318 }