mem: Fix guest corruption when caches handle uncacheable accesses
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34
35 machine(Directory, "Token protocol")
36 : DirectoryMemory * directory,
37 MemoryControl * memBuffer,
38 int l2_select_num_bits,
39 int directory_latency = 5,
40 bool distributed_persistent = true,
41 int fixed_timeout_latency = 100
42 {
43
44 MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
45 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
46 MessageBuffer persistentFromDir, network="To", virtual_network="3", ordered="true", vnet_type="persistent";
47 MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false", vnet_type="request";
48
49 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
50 MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
51 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request";
52 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
53
54 // STATES
55 state_declaration(State, desc="Directory states", default="Directory_State_O") {
56 // Base states
57 O, AccessPermission:Read_Only, desc="Owner, memory has valid data, but not necessarily all the tokens";
58 NO, AccessPermission:Maybe_Stale, desc="Not Owner";
59 L, AccessPermission:Busy, desc="Locked";
60
61 // Memory wait states - can block all messages including persistent requests
62 O_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory write";
63 L_O_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, could eventually return to O";
64 L_NO_W, AccessPermission:Busy, desc="transitioning to Locked, waiting for memory read, eventually return to NO";
65 DR_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA read, waiting for memory data";
66 DW_L_W, AccessPermission:Busy, desc="transitioning to Locked underneath a DMA write, waiting for memory ack";
67 NO_W, AccessPermission:Busy, desc="transitioning to Not Owner, waiting for memory read";
68 O_DW_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA ack";
69 O_DR_W, AccessPermission:Busy, desc="transitioning to Owner, waiting for memory before DMA data";
70
71 // DMA request transient states - must respond to persistent requests
72 O_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
73 NO_DW, AccessPermission:Busy, desc="issued GETX for DMA write, waiting for all tokens";
74 NO_DR, AccessPermission:Busy, desc="issued GETS for DMA read, waiting for data";
75
76 // DMA request in progress - competing with a CPU persistent request
77 DW_L, AccessPermission:Busy, desc="issued GETX for DMA write, CPU persistent request must complete first";
78 DR_L, AccessPermission:Busy, desc="issued GETS for DMA read, CPU persistent request must complete first";
79
80 }
81
82 // Events
83 enumeration(Event, desc="Directory events") {
84 GETX, desc="A GETX arrives";
85 GETS, desc="A GETS arrives";
86 Lockdown, desc="A lockdown request arrives";
87 Unlockdown, desc="An un-lockdown request arrives";
88 Own_Lock_or_Unlock, desc="own lock or unlock";
89 Own_Lock_or_Unlock_Tokens, desc="own lock or unlock with tokens";
90 Data_Owner, desc="Data arrive";
91 Data_All_Tokens, desc="Data and all tokens";
92 Ack_Owner, desc="Owner token arrived without data because it was clean";
93 Ack_Owner_All_Tokens, desc="All tokens including owner arrived without data because it was clean";
94 Tokens, desc="Tokens arrive";
95 Ack_All_Tokens, desc="All_Tokens arrive";
96 Request_Timeout, desc="A DMA request has timed out";
97
98 // Memory Controller
99 Memory_Data, desc="Fetched data from memory arrives";
100 Memory_Ack, desc="Writeback Ack from memory arrives";
101
102 // DMA requests
103 DMA_READ, desc="A DMA Read memory request";
104 DMA_WRITE, desc="A DMA Write memory request";
105 DMA_WRITE_All_Tokens, desc="A DMA Write memory request, directory has all tokens";
106 }
107
108 // TYPES
109
110 // DirectoryEntry
111 structure(Entry, desc="...", interface="AbstractEntry") {
112 State DirectoryState, desc="Directory state";
113 DataBlock DataBlk, desc="data for the block";
114 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
115
116 // The following state is provided to allow for bandwidth
117 // efficient directory-like operation. However all of this state
118 // is 'soft state' that does not need to be correct (as long as
119 // you're eventually willing to resort to broadcast.)
120
121 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
122 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
123 }
124
125 structure(PersistentTable, external="yes") {
126 void persistentRequestLock(Address, MachineID, AccessType);
127 void persistentRequestUnlock(Address, MachineID);
128 bool okToIssueStarving(Address, MachineID);
129 MachineID findSmallest(Address);
130 AccessType typeOfSmallest(Address);
131 void markEntries(Address);
132 bool isLocked(Address);
133 int countStarvingForAddress(Address);
134 int countReadStarvingForAddress(Address);
135 }
136
137 // TBE entries for DMA requests
138 structure(TBE, desc="TBE entries for outstanding DMA requests") {
139 Address PhysicalAddress, desc="physical address";
140 State TBEState, desc="Transient State";
141 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
142 DataBlock DataBlk, desc="The current view of system memory";
143 int Len, desc="...";
144 MachineID DmaRequestor, desc="DMA requestor";
145 bool WentPersistent, desc="Did the DMA request require a persistent request";
146 }
147
148 structure(TBETable, external="yes") {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 // ** OBJECTS **
156
157 PersistentTable persistentTable;
158 TimerTable reissueTimerTable;
159
160 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
161
162 bool starving, default="false";
163 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
164
165 void set_tbe(TBE b);
166 void unset_tbe();
167
168 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
169 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
170
171 if (is_valid(dir_entry)) {
172 return dir_entry;
173 }
174
175 dir_entry := static_cast(Entry, "pointer",
176 directory.allocate(addr, new Entry));
177 return dir_entry;
178 }
179
180 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
181 return getDirectoryEntry(addr).DataBlk;
182 }
183
184 State getState(TBE tbe, Address addr) {
185 if (is_valid(tbe)) {
186 return tbe.TBEState;
187 } else {
188 return getDirectoryEntry(addr).DirectoryState;
189 }
190 }
191
192 void setState(TBE tbe, Address addr, State state) {
193 if (is_valid(tbe)) {
194 tbe.TBEState := state;
195 }
196 getDirectoryEntry(addr).DirectoryState := state;
197
198 if (state == State:L || state == State:DW_L || state == State:DR_L) {
199 assert(getDirectoryEntry(addr).Tokens == 0);
200 }
201
202 // We have one or zero owners
203 assert((getDirectoryEntry(addr).Owner.count() == 0) || (getDirectoryEntry(addr).Owner.count() == 1));
204
205 // Make sure the token count is in range
206 assert(getDirectoryEntry(addr).Tokens >= 0);
207 assert(getDirectoryEntry(addr).Tokens <= max_tokens());
208
209 if (state == State:O || state == State:O_W || state == State:O_DW) {
210 assert(getDirectoryEntry(addr).Tokens >= 1); // Must have at least one token
211 // assert(getDirectoryEntry(addr).Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
212 }
213 }
214
215 AccessPermission getAccessPermission(Address addr) {
216 TBE tbe := TBEs[addr];
217 if(is_valid(tbe)) {
218 return Directory_State_to_permission(tbe.TBEState);
219 }
220
221 if (directory.isPresent(addr)) {
222 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
223 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
224 }
225
226 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
227 return AccessPermission:NotPresent;
228 }
229
230 void setAccessPermission(Address addr, State state) {
231 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
232 }
233
234 bool okToIssueStarving(Address addr, MachineID machinID) {
235 return persistentTable.okToIssueStarving(addr, machineID);
236 }
237
238 void markPersistentEntries(Address addr) {
239 persistentTable.markEntries(addr);
240 }
241
242 // ** OUT_PORTS **
243 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
244 out_port(persistentNetwork_out, PersistentMsg, persistentFromDir);
245 out_port(requestNetwork_out, RequestMsg, requestFromDir);
246 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
247
248 //
249 // Memory buffer for memory controller to DIMM communication
250 //
251 out_port(memQueue_out, MemoryMsg, memBuffer);
252
253 // ** IN_PORTS **
254
255 // off-chip memory request/response is done
256 in_port(memQueue_in, MemoryMsg, memBuffer) {
257 if (memQueue_in.isReady()) {
258 peek(memQueue_in, MemoryMsg) {
259 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
260 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
261 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
262 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
263 } else {
264 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
265 error("Invalid message");
266 }
267 }
268 }
269 }
270
271 // Reissue Timer
272 in_port(reissueTimerTable_in, Address, reissueTimerTable) {
273 if (reissueTimerTable_in.isReady()) {
274 trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
275 TBEs[reissueTimerTable.readyAddress()]);
276 }
277 }
278
279 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
280 if (responseNetwork_in.isReady()) {
281 peek(responseNetwork_in, ResponseMsg) {
282 assert(in_msg.Destination.isElement(machineID));
283 if (getDirectoryEntry(in_msg.Address).Tokens + in_msg.Tokens == max_tokens()) {
284 if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
285 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
286 trigger(Event:Data_All_Tokens, in_msg.Address,
287 TBEs[in_msg.Address]);
288 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
289 trigger(Event:Ack_Owner_All_Tokens, in_msg.Address,
290 TBEs[in_msg.Address]);
291 } else if (in_msg.Type == CoherenceResponseType:ACK) {
292 trigger(Event:Ack_All_Tokens, in_msg.Address,
293 TBEs[in_msg.Address]);
294 } else {
295 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
296 error("Invalid message");
297 }
298 } else {
299 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
300 trigger(Event:Data_Owner, in_msg.Address,
301 TBEs[in_msg.Address]);
302 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
303 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
304 trigger(Event:Tokens, in_msg.Address,
305 TBEs[in_msg.Address]);
306 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
307 trigger(Event:Ack_Owner, in_msg.Address,
308 TBEs[in_msg.Address]);
309 } else {
310 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
311 error("Invalid message");
312 }
313 }
314 }
315 }
316 }
317
318 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
319 if (persistentNetwork_in.isReady()) {
320 peek(persistentNetwork_in, PersistentMsg) {
321 assert(in_msg.Destination.isElement(machineID));
322
323 if (distributed_persistent) {
324 // Apply the lockdown or unlockdown message to the table
325 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
326 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
327 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
328 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
329 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
330 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
331 } else {
332 error("Invalid message");
333 }
334
335 // React to the message based on the current state of the table
336 if (persistentTable.isLocked(in_msg.Address)) {
337 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
338 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
339 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
340 TBEs[in_msg.Address]);
341 } else {
342 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
343 TBEs[in_msg.Address]);
344 }
345 } else {
346 // locked
347 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
348 }
349 } else {
350 // unlocked
351 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
352 }
353 }
354 else {
355 if (persistentTable.findSmallest(in_msg.Address) == machineID) {
356 if (getDirectoryEntry(in_msg.Address).Tokens > 0) {
357 trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.Address,
358 TBEs[in_msg.Address]);
359 } else {
360 trigger(Event:Own_Lock_or_Unlock, in_msg.Address,
361 TBEs[in_msg.Address]);
362 }
363 } else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
364 // locked
365 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
366 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
367 // locked
368 trigger(Event:Lockdown, in_msg.Address, TBEs[in_msg.Address]);
369 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
370 // unlocked
371 trigger(Event:Unlockdown, in_msg.Address, TBEs[in_msg.Address]);
372 } else {
373 error("Invalid message");
374 }
375 }
376 }
377 }
378 }
379
380 in_port(requestNetwork_in, RequestMsg, requestToDir) {
381 if (requestNetwork_in.isReady()) {
382 peek(requestNetwork_in, RequestMsg) {
383 assert(in_msg.Destination.isElement(machineID));
384 if (in_msg.Type == CoherenceRequestType:GETS) {
385 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
386 } else if (in_msg.Type == CoherenceRequestType:GETX) {
387 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
388 } else {
389 error("Invalid message");
390 }
391 }
392 }
393 }
394
395 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
396 if (dmaRequestQueue_in.isReady()) {
397 peek(dmaRequestQueue_in, DMARequestMsg) {
398 if (in_msg.Type == DMARequestType:READ) {
399 trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
400 } else if (in_msg.Type == DMARequestType:WRITE) {
401 if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
402 trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
403 TBEs[in_msg.LineAddress]);
404 } else {
405 trigger(Event:DMA_WRITE, in_msg.LineAddress,
406 TBEs[in_msg.LineAddress]);
407 }
408 } else {
409 error("Invalid message");
410 }
411 }
412 }
413 }
414
415 // Actions
416
417 action(a_sendTokens, "a", desc="Send tokens to requestor") {
418 // Only send a message if we have tokens to send
419 if (getDirectoryEntry(address).Tokens > 0) {
420 peek(requestNetwork_in, RequestMsg) {
421 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
422 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
423 out_msg.Address := address;
424 out_msg.Type := CoherenceResponseType:ACK;
425 out_msg.Sender := machineID;
426 out_msg.Destination.add(in_msg.Requestor);
427 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
428 out_msg.MessageSize := MessageSizeType:Response_Control;
429 }
430 }
431 getDirectoryEntry(address).Tokens := 0;
432 }
433 }
434
435 action(px_tryIssuingPersistentGETXRequest, "px", desc="...") {
436 if (okToIssueStarving(address, machineID) && (starving == false)) {
437 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
438 out_msg.Address := address;
439 out_msg.Type := PersistentRequestType:GETX_PERSISTENT;
440 out_msg.Requestor := machineID;
441 out_msg.Destination.broadcast(MachineType:L1Cache);
442
443 //
444 // Currently the configuration system limits the system to only one
445 // chip. Therefore, if we assume one shared L2 cache, then only one
446 // pertinent L2 cache exist.
447 //
448 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
449
450 out_msg.Destination.add(mapAddressToRange(address,
451 MachineType:L2Cache,
452 l2_select_low_bit,
453 l2_select_num_bits));
454
455 out_msg.Destination.add(map_Address_to_Directory(address));
456 out_msg.MessageSize := MessageSizeType:Persistent_Control;
457 out_msg.Prefetch := PrefetchBit:No;
458 out_msg.AccessMode := RubyAccessMode:Supervisor;
459 }
460 markPersistentEntries(address);
461 starving := true;
462
463 tbe.WentPersistent := true;
464
465 // Do not schedule a wakeup, a persistent requests will always complete
466 } else {
467
468 // We'd like to issue a persistent request, but are not allowed
469 // to issue a P.R. right now. This, we do not increment the
470 // IssueCount.
471
472 // Set a wakeup timer
473 reissueTimerTable.set(address, 10);
474 }
475 }
476
477 action(bw_broadcastWrite, "bw", desc="Broadcast GETX if we need tokens") {
478 peek(dmaRequestQueue_in, DMARequestMsg) {
479 //
480 // Assser that we only send message if we don't already have all the tokens
481 //
482 assert(getDirectoryEntry(address).Tokens != max_tokens());
483 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
484 out_msg.Address := address;
485 out_msg.Type := CoherenceRequestType:GETX;
486 out_msg.Requestor := machineID;
487
488 //
489 // Since only one chip, assuming all L1 caches are local
490 //
491 out_msg.Destination.broadcast(MachineType:L1Cache);
492 out_msg.Destination.add(mapAddressToRange(address,
493 MachineType:L2Cache,
494 l2_select_low_bit,
495 l2_select_num_bits));
496
497 out_msg.RetryNum := 0;
498 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
499 out_msg.Prefetch := PrefetchBit:No;
500 out_msg.AccessMode := RubyAccessMode:Supervisor;
501 }
502 }
503 }
504
505 action(ps_tryIssuingPersistentGETSRequest, "ps", desc="...") {
506 if (okToIssueStarving(address, machineID) && (starving == false)) {
507 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
508 out_msg.Address := address;
509 out_msg.Type := PersistentRequestType:GETS_PERSISTENT;
510 out_msg.Requestor := machineID;
511 out_msg.Destination.broadcast(MachineType:L1Cache);
512
513 //
514 // Currently the configuration system limits the system to only one
515 // chip. Therefore, if we assume one shared L2 cache, then only one
516 // pertinent L2 cache exist.
517 //
518 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
519
520 out_msg.Destination.add(mapAddressToRange(address,
521 MachineType:L2Cache,
522 l2_select_low_bit,
523 l2_select_num_bits));
524
525 out_msg.Destination.add(map_Address_to_Directory(address));
526 out_msg.MessageSize := MessageSizeType:Persistent_Control;
527 out_msg.Prefetch := PrefetchBit:No;
528 out_msg.AccessMode := RubyAccessMode:Supervisor;
529 }
530 markPersistentEntries(address);
531 starving := true;
532
533 tbe.WentPersistent := true;
534
535 // Do not schedule a wakeup, a persistent requests will always complete
536 } else {
537
538 // We'd like to issue a persistent request, but are not allowed
539 // to issue a P.R. right now. This, we do not increment the
540 // IssueCount.
541
542 // Set a wakeup timer
543 reissueTimerTable.set(address, 10);
544 }
545 }
546
547 action(br_broadcastRead, "br", desc="Broadcast GETS for data") {
548 peek(dmaRequestQueue_in, DMARequestMsg) {
549 enqueue(requestNetwork_out, RequestMsg, latency = "1") {
550 out_msg.Address := address;
551 out_msg.Type := CoherenceRequestType:GETS;
552 out_msg.Requestor := machineID;
553
554 //
555 // Since only one chip, assuming all L1 caches are local
556 //
557 out_msg.Destination.broadcast(MachineType:L1Cache);
558 out_msg.Destination.add(mapAddressToRange(address,
559 MachineType:L2Cache,
560 l2_select_low_bit,
561 l2_select_num_bits));
562
563 out_msg.RetryNum := 0;
564 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
565 out_msg.Prefetch := PrefetchBit:No;
566 out_msg.AccessMode := RubyAccessMode:Supervisor;
567 }
568 }
569 }
570
571 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
572 // Only send a message if we have tokens to send
573 if (getDirectoryEntry(address).Tokens > 0) {
574 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
575 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {// FIXME?
576 out_msg.Address := address;
577 out_msg.Type := CoherenceResponseType:ACK;
578 out_msg.Sender := machineID;
579 out_msg.Destination.add(persistentTable.findSmallest(address));
580 out_msg.Tokens := getDirectoryEntry(address).Tokens;
581 out_msg.MessageSize := MessageSizeType:Response_Control;
582 }
583 getDirectoryEntry(address).Tokens := 0;
584 }
585 }
586
587 action(d_sendMemoryDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
588 peek(memQueue_in, MemoryMsg) {
589 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
590 out_msg.Address := address;
591 out_msg.Type := CoherenceResponseType:DATA_OWNER;
592 out_msg.Sender := machineID;
593 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
594 assert(getDirectoryEntry(address).Tokens > 0);
595 out_msg.Tokens := getDirectoryEntry(in_msg.Address).Tokens;
596 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
597 out_msg.Dirty := false;
598 out_msg.MessageSize := MessageSizeType:Response_Data;
599 }
600 }
601 getDirectoryEntry(address).Tokens := 0;
602 }
603
604 action(dd_sendMemDataToStarver, "\d", desc="Send data and tokens to starver") {
605 peek(memQueue_in, MemoryMsg) {
606 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
607 out_msg.Address := address;
608 out_msg.Type := CoherenceResponseType:DATA_OWNER;
609 out_msg.Sender := machineID;
610 out_msg.Destination.add(persistentTable.findSmallest(address));
611 assert(getDirectoryEntry(address).Tokens > 0);
612 out_msg.Tokens := getDirectoryEntry(address).Tokens;
613 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
614 out_msg.Dirty := false;
615 out_msg.MessageSize := MessageSizeType:Response_Data;
616 }
617 }
618 getDirectoryEntry(address).Tokens := 0;
619 }
620
621 action(de_sendTbeDataToStarver, "de", desc="Send data and tokens to starver") {
622 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
623 out_msg.Address := address;
624 out_msg.Type := CoherenceResponseType:DATA_OWNER;
625 out_msg.Sender := machineID;
626 out_msg.Destination.add(persistentTable.findSmallest(address));
627 assert(getDirectoryEntry(address).Tokens > 0);
628 out_msg.Tokens := getDirectoryEntry(address).Tokens;
629 out_msg.DataBlk := tbe.DataBlk;
630 out_msg.Dirty := false;
631 out_msg.MessageSize := MessageSizeType:Response_Data;
632 }
633 getDirectoryEntry(address).Tokens := 0;
634 }
635
636 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
637 peek(requestNetwork_in, RequestMsg) {
638 enqueue(memQueue_out, MemoryMsg, latency="1") {
639 out_msg.Address := address;
640 out_msg.Type := MemoryRequestType:MEMORY_READ;
641 out_msg.Sender := machineID;
642 out_msg.OriginalRequestorMachId := in_msg.Requestor;
643 out_msg.MessageSize := in_msg.MessageSize;
644 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
645 DPRINTF(RubySlicc, "%s\n", out_msg);
646 }
647 }
648 }
649
650 action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
651 enqueue(memQueue_out, MemoryMsg, latency="1") {
652 out_msg.Address := address;
653 out_msg.Type := MemoryRequestType:MEMORY_READ;
654 out_msg.Sender := machineID;
655 out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
656 out_msg.MessageSize := MessageSizeType:Request_Control;
657 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
658 DPRINTF(RubySlicc, "%s\n", out_msg);
659 }
660 }
661
662 action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
663 peek(dmaRequestQueue_in, DMARequestMsg) {
664 enqueue(memQueue_out, MemoryMsg, latency="1") {
665 out_msg.Address := address;
666 out_msg.Type := MemoryRequestType:MEMORY_READ;
667 out_msg.Sender := machineID;
668 out_msg.OriginalRequestorMachId := in_msg.Requestor;
669 out_msg.MessageSize := in_msg.MessageSize;
670 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
671 DPRINTF(RubySlicc, "%s\n", out_msg);
672 }
673 }
674 }
675
676 action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
677 enqueue(memQueue_out, MemoryMsg, latency="1") {
678 out_msg.Address := address;
679 out_msg.Type := MemoryRequestType:MEMORY_WB;
680 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
681 DPRINTF(RubySlicc, "%s\n", out_msg);
682 }
683 }
684
685 action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
686 enqueue(memQueue_out, MemoryMsg, latency="1") {
687 out_msg.Address := address;
688 out_msg.Type := MemoryRequestType:MEMORY_WB;
689 // first, initialize the data blk to the current version of system memory
690 out_msg.DataBlk := tbe.DataBlk;
691 // then add the dma write data
692 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
693 DPRINTF(RubySlicc, "%s\n", out_msg);
694 }
695 }
696
697 action(lr_queueMemoryDmaReadWriteback, "lr", desc="Write DMA data from read to memory") {
698 enqueue(memQueue_out, MemoryMsg, latency="1") {
699 out_msg.Address := address;
700 out_msg.Type := MemoryRequestType:MEMORY_WB;
701 // first, initialize the data blk to the current version of system memory
702 out_msg.DataBlk := tbe.DataBlk;
703 DPRINTF(RubySlicc, "%s\n", out_msg);
704 }
705 }
706
707 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
708 peek(dmaRequestQueue_in, DMARequestMsg) {
709 TBEs.allocate(address);
710 set_tbe(TBEs[address]);
711 tbe.DmaDataBlk := in_msg.DataBlk;
712 tbe.PhysicalAddress := in_msg.PhysicalAddress;
713 tbe.Len := in_msg.Len;
714 tbe.DmaRequestor := in_msg.Requestor;
715 tbe.WentPersistent := false;
716 }
717 }
718
719 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
720
721 if (tbe.WentPersistent) {
722 assert(starving == true);
723
724 enqueue(persistentNetwork_out, PersistentMsg, latency = "1") {
725 out_msg.Address := address;
726 out_msg.Type := PersistentRequestType:DEACTIVATE_PERSISTENT;
727 out_msg.Requestor := machineID;
728 out_msg.Destination.broadcast(MachineType:L1Cache);
729
730 //
731 // Currently the configuration system limits the system to only one
732 // chip. Therefore, if we assume one shared L2 cache, then only one
733 // pertinent L2 cache exist.
734 //
735 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
736
737 out_msg.Destination.add(mapAddressToRange(address,
738 MachineType:L2Cache,
739 l2_select_low_bit,
740 l2_select_num_bits));
741
742 out_msg.Destination.add(map_Address_to_Directory(address));
743 out_msg.MessageSize := MessageSizeType:Persistent_Control;
744 }
745 starving := false;
746 }
747
748 TBEs.deallocate(address);
749 unset_tbe();
750 }
751
752 action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
753 peek(responseNetwork_in, ResponseMsg) {
754 tbe.DataBlk := in_msg.DataBlk;
755 }
756 }
757
758 action(cd_writeCleanDataToTbe, "cd", desc="Write clean memory data to TBE") {
759 tbe.DataBlk := getDirectoryEntry(address).DataBlk;
760 }
761
762 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
763 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
764 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
765 }
766
767 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
768 peek(responseNetwork_in, ResponseMsg) {
769 assert(in_msg.Tokens >= 1);
770 getDirectoryEntry(address).Tokens := getDirectoryEntry(address).Tokens + in_msg.Tokens;
771 }
772 }
773
774 action(aat_assertAllTokens, "aat", desc="assert that we have all tokens") {
775 assert(getDirectoryEntry(address).Tokens == max_tokens());
776 }
777
778 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
779 requestNetwork_in.dequeue();
780 }
781
782 action(z_recycleRequest, "z", desc="Recycle the request queue") {
783 requestNetwork_in.recycle();
784 }
785
786 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
787 responseNetwork_in.dequeue();
788 }
789
790 action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
791 responseNetwork_in.recycle();
792 }
793
794 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
795 persistentNetwork_in.dequeue();
796 }
797
798 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
799 dmaRequestQueue_in.dequeue();
800 }
801
802 action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
803 dmaRequestQueue_in.recycle();
804 }
805
806 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
807 memQueue_in.dequeue();
808 }
809
810 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
811 peek(responseNetwork_in, ResponseMsg) {
812 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
813 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
814 in_msg.Address, in_msg.DataBlk);
815 }
816 }
817
818 action(n_checkData, "n", desc="Check incoming clean data message") {
819 peek(responseNetwork_in, ResponseMsg) {
820 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
821 }
822 }
823
824 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
825 peek(responseNetwork_in, ResponseMsg) {
826 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
827 out_msg.Address := address;
828 out_msg.Type := in_msg.Type;
829 out_msg.Sender := machineID;
830 out_msg.Destination.add(persistentTable.findSmallest(address));
831 out_msg.Tokens := in_msg.Tokens;
832 out_msg.MessageSize := in_msg.MessageSize;
833 out_msg.DataBlk := in_msg.DataBlk;
834 out_msg.Dirty := in_msg.Dirty;
835 }
836 }
837 }
838
839 action(rs_resetScheduleTimeout, "rs", desc="Reschedule Schedule Timeout") {
840 //
841 // currently only support a fixed timeout latency
842 //
843 if (reissueTimerTable.isSet(address)) {
844 reissueTimerTable.unset(address);
845 reissueTimerTable.set(address, fixed_timeout_latency);
846 }
847 }
848
849 action(st_scheduleTimeout, "st", desc="Schedule Timeout") {
850 //
851 // currently only support a fixed timeout latency
852 //
853 reissueTimerTable.set(address, fixed_timeout_latency);
854 }
855
856 action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
857 if (reissueTimerTable.isSet(address)) {
858 reissueTimerTable.unset(address);
859 }
860 }
861
862 action(bd_bounceDatalessOwnerToken, "bd", desc="Bounce clean owner token to starving processor") {
863 peek(responseNetwork_in, ResponseMsg) {
864 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
865 assert(in_msg.Dirty == false);
866 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
867
868 // NOTE: The following check would not be valid in a real
869 // implementation. We include the data in the "dataless"
870 // message so we can assert the clean data matches the datablock
871 // in memory
872 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
873
874 // Bounce the message, but "re-associate" the data and the owner
875 // token. In essence we're converting an ACK_OWNER message to a
876 // DATA_OWNER message, keeping the number of tokens the same.
877 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
878 out_msg.Address := address;
879 out_msg.Type := CoherenceResponseType:DATA_OWNER;
880 out_msg.Sender := machineID;
881 out_msg.Destination.add(persistentTable.findSmallest(address));
882 out_msg.Tokens := in_msg.Tokens;
883 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
884 out_msg.Dirty := in_msg.Dirty;
885 out_msg.MessageSize := MessageSizeType:Response_Data;
886 }
887 }
888 }
889
890 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
891 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
892 out_msg.PhysicalAddress := address;
893 out_msg.LineAddress := address;
894 out_msg.Type := DMAResponseType:ACK;
895 out_msg.Destination.add(tbe.DmaRequestor);
896 out_msg.MessageSize := MessageSizeType:Writeback_Control;
897 }
898 }
899
900 action(dm_sendMemoryDataToDma, "dm", desc="Send Data to DMA controller from memory") {
901 peek(memQueue_in, MemoryMsg) {
902 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
903 out_msg.PhysicalAddress := address;
904 out_msg.LineAddress := address;
905 out_msg.Type := DMAResponseType:DATA;
906 //
907 // we send the entire data block and rely on the dma controller to
908 // split it up if need be
909 //
910 out_msg.DataBlk := in_msg.DataBlk;
911 out_msg.Destination.add(tbe.DmaRequestor);
912 out_msg.MessageSize := MessageSizeType:Response_Data;
913 }
914 }
915 }
916
917 action(dd_sendDmaData, "dd", desc="Send Data to DMA controller") {
918 peek(responseNetwork_in, ResponseMsg) {
919 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
920 out_msg.PhysicalAddress := address;
921 out_msg.LineAddress := address;
922 out_msg.Type := DMAResponseType:DATA;
923 //
924 // we send the entire data block and rely on the dma controller to
925 // split it up if need be
926 //
927 out_msg.DataBlk := in_msg.DataBlk;
928 out_msg.Destination.add(tbe.DmaRequestor);
929 out_msg.MessageSize := MessageSizeType:Response_Data;
930 }
931 }
932 }
933
934 // TRANSITIONS
935
936 //
937 // Trans. from base state O
938 // the directory has valid data
939 //
940 transition(O, GETX, NO_W) {
941 qf_queueMemoryFetchRequest;
942 j_popIncomingRequestQueue;
943 }
944
945 transition(O, DMA_WRITE, O_DW) {
946 vd_allocateDmaRequestInTBE;
947 cd_writeCleanDataToTbe;
948 bw_broadcastWrite;
949 st_scheduleTimeout;
950 p_popDmaRequestQueue;
951 }
952
953 transition(O, DMA_WRITE_All_Tokens, O_DW_W) {
954 vd_allocateDmaRequestInTBE;
955 cd_writeCleanDataToTbe;
956 dwt_writeDmaDataFromTBE;
957 ld_queueMemoryDmaWriteFromTbe;
958 p_popDmaRequestQueue;
959 }
960
961 transition(O, GETS, NO_W) {
962 qf_queueMemoryFetchRequest;
963 j_popIncomingRequestQueue;
964 }
965
966 transition(O, DMA_READ, O_DR_W) {
967 vd_allocateDmaRequestInTBE;
968 fd_memoryDma;
969 st_scheduleTimeout;
970 p_popDmaRequestQueue;
971 }
972
973 transition(O, Lockdown, L_O_W) {
974 qp_queueMemoryForPersistent;
975 l_popIncomingPersistentQueue;
976 }
977
978 transition(O, {Tokens, Ack_All_Tokens}) {
979 f_incrementTokens;
980 k_popIncomingResponseQueue;
981 }
982
983 transition(O, {Data_Owner, Data_All_Tokens}) {
984 n_checkData;
985 f_incrementTokens;
986 k_popIncomingResponseQueue;
987 }
988
989 transition({O, NO}, Unlockdown) {
990 l_popIncomingPersistentQueue;
991 }
992
993 //
994 // transitioning to Owner, waiting for memory before DMA ack
995 // All other events should recycle/stall
996 //
997 transition(O_DR_W, Memory_Data, O) {
998 dm_sendMemoryDataToDma;
999 ut_unsetReissueTimer;
1000 s_deallocateTBE;
1001 l_popMemQueue;
1002 }
1003
1004 //
1005 // issued GETX for DMA write, waiting for all tokens
1006 //
1007 transition(O_DW, Request_Timeout) {
1008 ut_unsetReissueTimer;
1009 px_tryIssuingPersistentGETXRequest;
1010 }
1011
1012 transition(O_DW, Tokens) {
1013 f_incrementTokens;
1014 k_popIncomingResponseQueue;
1015 }
1016
1017 transition(O_DW, Data_Owner) {
1018 f_incrementTokens;
1019 rd_recordDataInTbe;
1020 k_popIncomingResponseQueue;
1021 }
1022
1023 transition(O_DW, Ack_Owner) {
1024 f_incrementTokens;
1025 cd_writeCleanDataToTbe;
1026 k_popIncomingResponseQueue;
1027 }
1028
1029 transition(O_DW, Lockdown, DW_L) {
1030 de_sendTbeDataToStarver;
1031 l_popIncomingPersistentQueue;
1032 }
1033
1034 transition({NO_DW, O_DW}, Data_All_Tokens, O_DW_W) {
1035 f_incrementTokens;
1036 rd_recordDataInTbe;
1037 dwt_writeDmaDataFromTBE;
1038 ld_queueMemoryDmaWriteFromTbe;
1039 ut_unsetReissueTimer;
1040 k_popIncomingResponseQueue;
1041 }
1042
1043 transition(O_DW, Ack_All_Tokens, O_DW_W) {
1044 f_incrementTokens;
1045 dwt_writeDmaDataFromTBE;
1046 ld_queueMemoryDmaWriteFromTbe;
1047 ut_unsetReissueTimer;
1048 k_popIncomingResponseQueue;
1049 }
1050
1051 transition(O_DW, Ack_Owner_All_Tokens, O_DW_W) {
1052 f_incrementTokens;
1053 cd_writeCleanDataToTbe;
1054 dwt_writeDmaDataFromTBE;
1055 ld_queueMemoryDmaWriteFromTbe;
1056 ut_unsetReissueTimer;
1057 k_popIncomingResponseQueue;
1058 }
1059
1060 transition(O_DW_W, Memory_Ack, O) {
1061 da_sendDmaAck;
1062 s_deallocateTBE;
1063 l_popMemQueue;
1064 }
1065
1066 //
1067 // Trans. from NO
1068 // The direcotry does not have valid data, but may have some tokens
1069 //
1070 transition(NO, GETX) {
1071 a_sendTokens;
1072 j_popIncomingRequestQueue;
1073 }
1074
1075 transition(NO, DMA_WRITE, NO_DW) {
1076 vd_allocateDmaRequestInTBE;
1077 bw_broadcastWrite;
1078 st_scheduleTimeout;
1079 p_popDmaRequestQueue;
1080 }
1081
1082 transition(NO, GETS) {
1083 j_popIncomingRequestQueue;
1084 }
1085
1086 transition(NO, DMA_READ, NO_DR) {
1087 vd_allocateDmaRequestInTBE;
1088 br_broadcastRead;
1089 st_scheduleTimeout;
1090 p_popDmaRequestQueue;
1091 }
1092
1093 transition(NO, Lockdown, L) {
1094 aa_sendTokensToStarver;
1095 l_popIncomingPersistentQueue;
1096 }
1097
1098 transition(NO, {Data_Owner, Data_All_Tokens}, O_W) {
1099 m_writeDataToMemory;
1100 f_incrementTokens;
1101 lq_queueMemoryWbRequest;
1102 k_popIncomingResponseQueue;
1103 }
1104
1105 transition(NO, {Ack_Owner, Ack_Owner_All_Tokens}, O) {
1106 n_checkData;
1107 f_incrementTokens;
1108 k_popIncomingResponseQueue;
1109 }
1110
1111 transition(NO, Tokens) {
1112 f_incrementTokens;
1113 k_popIncomingResponseQueue;
1114 }
1115
1116 transition(NO_W, Memory_Data, NO) {
1117 d_sendMemoryDataWithAllTokens;
1118 l_popMemQueue;
1119 }
1120
1121 // Trans. from NO_DW
1122 transition(NO_DW, Request_Timeout) {
1123 ut_unsetReissueTimer;
1124 px_tryIssuingPersistentGETXRequest;
1125 }
1126
1127 transition(NO_DW, Lockdown, DW_L) {
1128 aa_sendTokensToStarver;
1129 l_popIncomingPersistentQueue;
1130 }
1131
1132 // Note: NO_DW, Data_All_Tokens transition is combined with O_DW
1133 // Note: NO_DW should not receive the action Ack_All_Tokens because the
1134 // directory does not have valid data
1135
1136 transition(NO_DW, Data_Owner, O_DW) {
1137 f_incrementTokens;
1138 rd_recordDataInTbe;
1139 k_popIncomingResponseQueue;
1140 }
1141
1142 transition({NO_DW, NO_DR}, Tokens) {
1143 f_incrementTokens;
1144 k_popIncomingResponseQueue;
1145 }
1146
1147 // Trans. from NO_DR
1148 transition(NO_DR, Request_Timeout) {
1149 ut_unsetReissueTimer;
1150 ps_tryIssuingPersistentGETSRequest;
1151 }
1152
1153 transition(NO_DR, Lockdown, DR_L) {
1154 aa_sendTokensToStarver;
1155 l_popIncomingPersistentQueue;
1156 }
1157
1158 transition(NO_DR, {Data_Owner, Data_All_Tokens}, O_W) {
1159 m_writeDataToMemory;
1160 f_incrementTokens;
1161 dd_sendDmaData;
1162 lr_queueMemoryDmaReadWriteback;
1163 ut_unsetReissueTimer;
1164 s_deallocateTBE;
1165 k_popIncomingResponseQueue;
1166 }
1167
1168 // Trans. from L
1169 transition({L, DW_L, DR_L}, {GETX, GETS}) {
1170 j_popIncomingRequestQueue;
1171 }
1172
1173 transition({L, DW_L, DR_L, L_O_W, L_NO_W, DR_L_W, DW_L_W}, Lockdown) {
1174 l_popIncomingPersistentQueue;
1175 }
1176
1177 //
1178 // Received data for lockdown blocks
1179 // For blocks with outstanding dma requests to them
1180 // ...we could change this to write the data to memory and send it cleanly
1181 // ...we could also proactively complete our DMA requests
1182 // However, to keep my mind from spinning out-of-control, we won't for now :)
1183 //
1184 transition({DW_L, DR_L, L}, {Data_Owner, Data_All_Tokens}) {
1185 r_bounceResponse;
1186 k_popIncomingResponseQueue;
1187 }
1188
1189 transition({DW_L, DR_L, L}, Tokens) {
1190 r_bounceResponse;
1191 k_popIncomingResponseQueue;
1192 }
1193
1194 transition({DW_L, DR_L, L}, {Ack_Owner_All_Tokens, Ack_Owner}) {
1195 bd_bounceDatalessOwnerToken;
1196 k_popIncomingResponseQueue;
1197 }
1198
1199 transition(L, {Unlockdown, Own_Lock_or_Unlock}, NO) {
1200 l_popIncomingPersistentQueue;
1201 }
1202
1203 transition(L, Own_Lock_or_Unlock_Tokens, O) {
1204 l_popIncomingPersistentQueue;
1205 }
1206
1207 transition({L_NO_W, L_O_W}, Memory_Data, L) {
1208 dd_sendMemDataToStarver;
1209 l_popMemQueue;
1210 }
1211
1212 transition(L_O_W, Memory_Ack) {
1213 qp_queueMemoryForPersistent;
1214 l_popMemQueue;
1215 }
1216
1217 transition(L_O_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_W) {
1218 l_popIncomingPersistentQueue;
1219 }
1220
1221 transition(L_NO_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_W) {
1222 l_popIncomingPersistentQueue;
1223 }
1224
1225 transition(DR_L_W, Memory_Data, DR_L) {
1226 dd_sendMemDataToStarver;
1227 l_popMemQueue;
1228 }
1229
1230 transition(DW_L_W, Memory_Ack, L) {
1231 aat_assertAllTokens;
1232 da_sendDmaAck;
1233 s_deallocateTBE;
1234 dd_sendMemDataToStarver;
1235 l_popMemQueue;
1236 }
1237
1238 transition(DW_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DW) {
1239 l_popIncomingPersistentQueue;
1240 }
1241
1242 transition(DR_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DR_W) {
1243 l_popIncomingPersistentQueue;
1244 }
1245
1246 transition(DW_L_W, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, O_DW_W) {
1247 l_popIncomingPersistentQueue;
1248 }
1249
1250 transition({DW_L, DR_L_W, DW_L_W}, Request_Timeout) {
1251 ut_unsetReissueTimer;
1252 px_tryIssuingPersistentGETXRequest;
1253 }
1254
1255 transition(DR_L, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}, NO_DR) {
1256 l_popIncomingPersistentQueue;
1257 }
1258
1259 transition(DR_L, Request_Timeout) {
1260 ut_unsetReissueTimer;
1261 ps_tryIssuingPersistentGETSRequest;
1262 }
1263
1264 //
1265 // The O_W + Memory_Data > O transistion is confusing, but it can happen if a
1266 // presistent request is issued and resolve before memory returns with data
1267 //
1268 transition(O_W, {Memory_Ack, Memory_Data}, O) {
1269 l_popMemQueue;
1270 }
1271
1272 transition({O, NO}, {Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1273 l_popIncomingPersistentQueue;
1274 }
1275
1276 // Blocked states
1277 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR}, {GETX, GETS}) {
1278 z_recycleRequest;
1279 }
1280
1281 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W, O_DW, NO_DW, NO_DR, L, DW_L, DR_L}, {DMA_READ, DMA_WRITE, DMA_WRITE_All_Tokens}) {
1282 y_recycleDmaRequestQueue;
1283 }
1284
1285 transition({NO_W, O_W, L_O_W, L_NO_W, DR_L_W, DW_L_W, O_DW_W, O_DR_W}, {Data_Owner, Ack_Owner, Tokens, Data_All_Tokens, Ack_All_Tokens}) {
1286 kz_recycleResponse;
1287 }
1288
1289 //
1290 // If we receive a request timeout while waiting for memory, it is likely that
1291 // the request will be satisfied and issuing a presistent request will do us
1292 // no good. Just wait.
1293 //
1294 transition({O_DW_W, O_DR_W}, Request_Timeout) {
1295 rs_resetScheduleTimeout;
1296 }
1297
1298 transition(NO_W, Lockdown, L_NO_W) {
1299 l_popIncomingPersistentQueue;
1300 }
1301
1302 transition(O_W, Lockdown, L_O_W) {
1303 l_popIncomingPersistentQueue;
1304 }
1305
1306 transition(O_DR_W, Lockdown, DR_L_W) {
1307 l_popIncomingPersistentQueue;
1308 }
1309
1310 transition(O_DW_W, Lockdown, DW_L_W) {
1311 l_popIncomingPersistentQueue;
1312 }
1313
1314 transition({NO_W, O_W, O_DR_W, O_DW_W, O_DW, NO_DR, NO_DW}, {Unlockdown, Own_Lock_or_Unlock, Own_Lock_or_Unlock_Tokens}) {
1315 l_popIncomingPersistentQueue;
1316 }
1317 }