Ruby: Remove CacheMsg class from SLICC
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory,
38 CacheMemory * probeFilter,
39 MemoryControl * memBuffer,
40 int memory_controller_latency = 2,
41 bool probe_filter_enabled = false,
42 bool full_bit_dir_enabled = false
43 {
44
45 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
47 //
48 // For a finite buffered network, note that the DMA response network only
49 // works at this relatively lower numbered (lower priority) virtual network
50 // because the trigger queue decouples cache responses from DMA responses.
51 //
52 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true";
53
54 MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false";
55 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
56 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", recycle_latency="1";
57 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
58
59 // STATES
60 state_declaration(State, desc="Directory states", default="Directory_State_E") {
61 // Base states
62 NX, AccessPermission:Invalid, desc="Not Owner, probe filter entry exists, block in O at Owner";
63 NO, AccessPermission:Invalid, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
64 S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
65 O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
66 E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
67
68 O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
69 S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
70 NO_R, AccessPermission:Invalid, desc="Was Not Owner or Sharer, replacing probe filter entry";
71
72 NO_B, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked";
73 NO_B_X, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
74 NO_B_S, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
75 NO_B_S_W, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
76 O_B, AccessPermission:Invalid, "O^B", desc="Owner, Blocked";
77 NO_B_W, AccessPermission:Invalid, desc="Not Owner, Blocked, waiting for Dram";
78 O_B_W, AccessPermission:Invalid, desc="Owner, Blocked, waiting for Dram";
79 NO_W, AccessPermission:Invalid, desc="Not Owner, waiting for Dram";
80 O_W, AccessPermission:Invalid, desc="Owner, waiting for Dram";
81 NO_DW_B_W, AccessPermission:Invalid, desc="Not Owner, Dma Write waiting for Dram and cache responses";
82 NO_DR_B_W, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for Dram and cache responses";
83 NO_DR_B_D, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
84 NO_DR_B, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for cache responses";
85 NO_DW_W, AccessPermission:Invalid, desc="Not Owner, Dma Write waiting for Dram";
86 O_DR_B_W, AccessPermission:Invalid, desc="Owner, Dma Read waiting for Dram and cache responses";
87 O_DR_B, AccessPermission:Invalid, desc="Owner, Dma Read waiting for cache responses";
88 WB, AccessPermission:Invalid, desc="Blocked on a writeback";
89 WB_O_W, AccessPermission:Invalid, desc="Blocked on memory write, will go to O";
90 WB_E_W, AccessPermission:Invalid, desc="Blocked on memory write, will go to E";
91 }
92
93 // Events
94 enumeration(Event, desc="Directory events") {
95 GETX, desc="A GETX arrives";
96 GETS, desc="A GETS arrives";
97 PUT, desc="A PUT arrives";
98 Unblock, desc="An unblock message arrives";
99 UnblockS, desc="An unblock message arrives";
100 UnblockM, desc="An unblock message arrives";
101 Writeback_Clean, desc="The final part of a PutX (no data)";
102 Writeback_Dirty, desc="The final part of a PutX (data)";
103 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
104 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
105
106 // Probe filter
107 Pf_Replacement, desc="probe filter replacement";
108
109 // DMA requests
110 DMA_READ, desc="A DMA Read memory request";
111 DMA_WRITE, desc="A DMA Write memory request";
112
113 // Memory Controller
114 Memory_Data, desc="Fetched data from memory arrives";
115 Memory_Ack, desc="Writeback Ack from memory arrives";
116
117 // Cache responses required to handle DMA
118 Ack, desc="Received an ack message";
119 Shared_Ack, desc="Received an ack message, responder has a shared copy";
120 Shared_Data, desc="Received a data message, responder has a shared copy";
121 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
122 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
123
124 // Triggers
125 All_acks_and_shared_data, desc="Received shared data and message acks";
126 All_acks_and_owner_data, desc="Received shared data and message acks";
127 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
128 All_Unblocks, desc="Received all unblocks for a merged gets request";
129 }
130
131 // TYPES
132
133 // DirectoryEntry
134 structure(Entry, desc="...", interface="AbstractEntry") {
135 State DirectoryState, desc="Directory state";
136 DataBlock DataBlk, desc="data for the block";
137 }
138
139 // ProbeFilterEntry
140 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
141 State PfState, desc="Directory state";
142 MachineID Owner, desc="Owner node";
143 DataBlock DataBlk, desc="data for the block";
144 Set Sharers, desc="sharing vector for full bit directory";
145 }
146
147 // TBE entries for DMA requests
148 structure(TBE, desc="TBE entries for outstanding DMA requests") {
149 Address PhysicalAddress, desc="physical address";
150 State TBEState, desc="Transient State";
151 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
152 int Acks, default="0", desc="The number of acks that the waiting response represents";
153 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
154 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
155 DataBlock DataBlk, desc="The current view of system memory";
156 int Len, desc="...";
157 MachineID DmaRequestor, desc="DMA requestor";
158 NetDest GetSRequestors, desc="GETS merged requestors";
159 int NumPendingMsgs, desc="Number of pending acks/messages";
160 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
161 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
162 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
163 }
164
165 structure(TBETable, external="yes") {
166 TBE lookup(Address);
167 void allocate(Address);
168 void deallocate(Address);
169 bool isPresent(Address);
170 }
171
172 void set_cache_entry(AbstractCacheEntry b);
173 void unset_cache_entry();
174 void set_tbe(TBE a);
175 void unset_tbe();
176 void wakeUpBuffers(Address a);
177
178 // ** OBJECTS **
179
180 Set fwd_set;
181
182 TBETable TBEs, template_hack="<Directory_TBE>";
183
184 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
185 return static_cast(Entry, directory[addr]);
186 }
187
188 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
189 if (probe_filter_enabled || full_bit_dir_enabled) {
190 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
191 return pfEntry;
192 }
193 return OOD;
194 }
195
196 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
197 if (is_valid(tbe)) {
198 return tbe.TBEState;
199 } else {
200 if (probe_filter_enabled || full_bit_dir_enabled) {
201 if (is_valid(pf_entry)) {
202 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
203 }
204 }
205 return getDirectoryEntry(addr).DirectoryState;
206 }
207 }
208
209 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
210 if (is_valid(tbe)) {
211 tbe.TBEState := state;
212 }
213 if (probe_filter_enabled || full_bit_dir_enabled) {
214 if (is_valid(pf_entry)) {
215 pf_entry.PfState := state;
216 }
217 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
218 assert(is_valid(pf_entry));
219 }
220 if (state == State:E) {
221 assert(is_valid(pf_entry) == false);
222 }
223 }
224 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
225 state == State:O) {
226 assert(is_valid(tbe) == false);
227 }
228 getDirectoryEntry(addr).DirectoryState := state;
229 }
230
231 Event cache_request_to_event(CoherenceRequestType type) {
232 if (type == CoherenceRequestType:GETS) {
233 return Event:GETS;
234 } else if (type == CoherenceRequestType:GETX) {
235 return Event:GETX;
236 } else {
237 error("Invalid CoherenceRequestType");
238 }
239 }
240
241 MessageBuffer triggerQueue, ordered="true";
242
243 // ** OUT_PORTS **
244 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
245 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
246 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
247 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
248 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
249
250 //
251 // Memory buffer for memory controller to DIMM communication
252 //
253 out_port(memQueue_out, MemoryMsg, memBuffer);
254
255 // ** IN_PORTS **
256
257 // Trigger Queue
258 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
259 if (triggerQueue_in.isReady()) {
260 peek(triggerQueue_in, TriggerMsg) {
261 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
262 TBE tbe := TBEs[in_msg.Address];
263 if (in_msg.Type == TriggerType:ALL_ACKS) {
264 trigger(Event:All_acks_and_owner_data, in_msg.Address,
265 pf_entry, tbe);
266 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
267 trigger(Event:All_acks_and_shared_data, in_msg.Address,
268 pf_entry, tbe);
269 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
270 trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
271 pf_entry, tbe);
272 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
273 trigger(Event:All_Unblocks, in_msg.Address,
274 pf_entry, tbe);
275 } else {
276 error("Unexpected message");
277 }
278 }
279 }
280 }
281
282 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
283 if (unblockNetwork_in.isReady()) {
284 peek(unblockNetwork_in, ResponseMsg) {
285 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
286 TBE tbe := TBEs[in_msg.Address];
287 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
288 trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
289 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
290 trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
291 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
292 trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
293 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
294 trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
295 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
296 trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
297 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
298 trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
299 pf_entry, tbe);
300 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
301 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
302 pf_entry, tbe);
303 } else {
304 error("Invalid message");
305 }
306 }
307 }
308 }
309
310 // Response Network
311 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
312 if (responseToDir_in.isReady()) {
313 peek(responseToDir_in, ResponseMsg) {
314 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
315 TBE tbe := TBEs[in_msg.Address];
316 if (in_msg.Type == CoherenceResponseType:ACK) {
317 trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
318 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
319 trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
320 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
321 trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
322 } else if (in_msg.Type == CoherenceResponseType:DATA) {
323 trigger(Event:Data, in_msg.Address, pf_entry, tbe);
324 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
325 trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
326 } else {
327 error("Unexpected message");
328 }
329 }
330 }
331 }
332
333 // off-chip memory request/response is done
334 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
335 if (memQueue_in.isReady()) {
336 peek(memQueue_in, MemoryMsg) {
337 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
338 TBE tbe := TBEs[in_msg.Address];
339 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
340 trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
341 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
342 trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
343 } else {
344 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
345 error("Invalid message");
346 }
347 }
348 }
349 }
350
351 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
352 if (requestQueue_in.isReady()) {
353 peek(requestQueue_in, RequestMsg) {
354 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
355 TBE tbe := TBEs[in_msg.Address];
356 if (in_msg.Type == CoherenceRequestType:PUT) {
357 trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
358 } else {
359 if (probe_filter_enabled || full_bit_dir_enabled) {
360 if (is_valid(pf_entry)) {
361 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
362 pf_entry, tbe);
363 } else {
364 if (probeFilter.cacheAvail(in_msg.Address)) {
365 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
366 pf_entry, tbe);
367 } else {
368 trigger(Event:Pf_Replacement,
369 probeFilter.cacheProbe(in_msg.Address),
370 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
371 TBEs[probeFilter.cacheProbe(in_msg.Address)]);
372 }
373 }
374 } else {
375 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
376 pf_entry, tbe);
377 }
378 }
379 }
380 }
381 }
382
383 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
384 if (dmaRequestQueue_in.isReady()) {
385 peek(dmaRequestQueue_in, DMARequestMsg) {
386 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
387 TBE tbe := TBEs[in_msg.LineAddress];
388 if (in_msg.Type == DMARequestType:READ) {
389 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
390 } else if (in_msg.Type == DMARequestType:WRITE) {
391 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
392 } else {
393 error("Invalid message");
394 }
395 }
396 }
397 }
398
399 // Actions
400
401 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
402 if (probe_filter_enabled || full_bit_dir_enabled) {
403 assert(is_valid(cache_entry));
404 probeFilter.setMRU(address);
405 }
406 }
407
408 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
409 if (probe_filter_enabled || full_bit_dir_enabled) {
410 assert(is_valid(cache_entry));
411 peek(unblockNetwork_in, ResponseMsg) {
412 assert(cache_entry.Owner != in_msg.Sender);
413 if (full_bit_dir_enabled) {
414 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
415 }
416 }
417 }
418 }
419
420 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
421 if (probe_filter_enabled || full_bit_dir_enabled) {
422 assert(is_valid(cache_entry));
423 peek(unblockNetwork_in, ResponseMsg) {
424 cache_entry.Owner := in_msg.Sender;
425 if (full_bit_dir_enabled) {
426 cache_entry.Sharers.clear();
427 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
428 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
429 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
430 }
431 }
432 }
433 }
434
435 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
436 if (full_bit_dir_enabled) {
437 assert(probeFilter.isTagPresent(address));
438 peek(unblockNetwork_in, ResponseMsg) {
439 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
440 }
441 }
442 }
443
444 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
445 peek(requestQueue_in, RequestMsg) {
446 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
447 out_msg.Address := address;
448 out_msg.Type := CoherenceRequestType:WB_ACK;
449 out_msg.Requestor := in_msg.Requestor;
450 out_msg.Destination.add(in_msg.Requestor);
451 out_msg.MessageSize := MessageSizeType:Writeback_Control;
452 }
453 }
454 }
455
456 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
457 peek(requestQueue_in, RequestMsg) {
458 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
459 out_msg.Address := address;
460 out_msg.Type := CoherenceRequestType:WB_NACK;
461 out_msg.Requestor := in_msg.Requestor;
462 out_msg.Destination.add(in_msg.Requestor);
463 out_msg.MessageSize := MessageSizeType:Writeback_Control;
464 }
465 }
466 }
467
468 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
469 if (probe_filter_enabled || full_bit_dir_enabled) {
470 peek(requestQueue_in, RequestMsg) {
471 set_cache_entry(probeFilter.allocate(address, new PfEntry));
472 cache_entry.Owner := in_msg.Requestor;
473 cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
474 }
475 }
476 }
477
478 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
479 if (probe_filter_enabled || full_bit_dir_enabled) {
480 probeFilter.deallocate(address);
481 unset_cache_entry();
482 }
483 }
484
485 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
486 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
487 probeFilter.deallocate(address);
488 unset_cache_entry();
489 }
490 }
491
492 action(v_allocateTBE, "v", desc="Allocate TBE") {
493 peek(requestQueue_in, RequestMsg) {
494 TBEs.allocate(address);
495 set_tbe(TBEs[address]);
496 tbe.PhysicalAddress := address;
497 tbe.ResponseType := CoherenceResponseType:NULL;
498 }
499 }
500
501 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
502 peek(dmaRequestQueue_in, DMARequestMsg) {
503 TBEs.allocate(address);
504 set_tbe(TBEs[address]);
505 tbe.DmaDataBlk := in_msg.DataBlk;
506 tbe.PhysicalAddress := in_msg.PhysicalAddress;
507 tbe.Len := in_msg.Len;
508 tbe.DmaRequestor := in_msg.Requestor;
509 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
510 //
511 // One ack for each last-level cache
512 //
513 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
514 //
515 // Assume initially that the caches store a clean copy and that memory
516 // will provide the data
517 //
518 tbe.CacheDirty := false;
519 }
520 }
521
522 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
523 assert(is_valid(tbe));
524 if (full_bit_dir_enabled) {
525 assert(is_valid(cache_entry));
526 tbe.NumPendingMsgs := cache_entry.Sharers.count();
527 } else {
528 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
529 }
530 }
531
532 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
533 assert(is_valid(tbe));
534 tbe.NumPendingMsgs := 1;
535 }
536
537 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
538 TBEs.deallocate(address);
539 unset_tbe();
540 }
541
542 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
543 assert(is_valid(tbe));
544 peek(requestQueue_in, RequestMsg) {
545 if (full_bit_dir_enabled) {
546 assert(is_valid(cache_entry));
547 //
548 // If we are using the full-bit directory and no sharers exists beyond
549 // the requestor, then we must set the ack number to all, not one
550 //
551 fwd_set := cache_entry.Sharers;
552 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
553 if (fwd_set.count() > 0) {
554 tbe.Acks := 1;
555 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
556 tbe.SilentAcks := tbe.SilentAcks - 1;
557 } else {
558 tbe.Acks := machineCount(MachineType:L1Cache);
559 tbe.SilentAcks := 0;
560 }
561 } else {
562 tbe.Acks := 1;
563 }
564 }
565 }
566
567 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
568 assert(is_valid(tbe));
569 if (probe_filter_enabled || full_bit_dir_enabled) {
570 tbe.Acks := machineCount(MachineType:L1Cache);
571 tbe.SilentAcks := 0;
572 } else {
573 tbe.Acks := 1;
574 }
575 }
576
577 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
578 peek(responseToDir_in, ResponseMsg) {
579 assert(is_valid(tbe));
580 assert(in_msg.Acks > 0);
581 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
582 //
583 // Note that cache data responses will have an ack count of 2. However,
584 // directory DMA requests must wait for acks from all LLC caches, so
585 // only decrement by 1.
586 //
587 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
588 (in_msg.Type == CoherenceResponseType:DATA) ||
589 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
590 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
591 } else {
592 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
593 }
594 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
595 }
596 }
597
598 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
599 peek(unblockNetwork_in, ResponseMsg) {
600 assert(is_valid(tbe));
601 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
602 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
603 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
604 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
605 }
606 }
607
608 action(n_popResponseQueue, "n", desc="Pop response queue") {
609 responseToDir_in.dequeue();
610 }
611
612 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
613 assert(is_valid(tbe));
614 if (tbe.NumPendingMsgs == 0) {
615 enqueue(triggerQueue_out, TriggerMsg) {
616 out_msg.Address := address;
617 if (tbe.Sharers) {
618 if (tbe.Owned) {
619 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
620 } else {
621 out_msg.Type := TriggerType:ALL_ACKS;
622 }
623 } else {
624 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
625 }
626 }
627 }
628 }
629
630 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
631 assert(is_valid(tbe));
632 if (tbe.NumPendingMsgs == 0) {
633 enqueue(triggerQueue_out, TriggerMsg) {
634 out_msg.Address := address;
635 out_msg.Type := TriggerType:ALL_UNBLOCKS;
636 }
637 }
638 }
639
640 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
641 assert(is_valid(tbe));
642 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
643 }
644
645 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
646 if (probe_filter_enabled || full_bit_dir_enabled) {
647 assert(is_valid(tbe));
648 tbe.NumPendingMsgs := 0;
649 }
650 }
651
652 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
653 assert(is_valid(tbe));
654 if (tbe.NumPendingMsgs == 0) {
655 assert(probe_filter_enabled || full_bit_dir_enabled);
656 enqueue(triggerQueue_out, TriggerMsg) {
657 out_msg.Address := address;
658 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
659 }
660 }
661 }
662
663 action(d_sendData, "d", desc="Send data to requestor") {
664 peek(memQueue_in, MemoryMsg) {
665 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
666 assert(is_valid(tbe));
667 out_msg.Address := address;
668 out_msg.Type := tbe.ResponseType;
669 out_msg.Sender := machineID;
670 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
671 out_msg.DataBlk := in_msg.DataBlk;
672 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
673 out_msg.Dirty := false; // By definition, the block is now clean
674 out_msg.Acks := tbe.Acks;
675 out_msg.SilentAcks := tbe.SilentAcks;
676 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
677 assert(out_msg.Acks > 0);
678 out_msg.MessageSize := MessageSizeType:Response_Data;
679 }
680 }
681 }
682
683 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
684 peek(memQueue_in, MemoryMsg) {
685 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
686 assert(is_valid(tbe));
687 out_msg.PhysicalAddress := address;
688 out_msg.LineAddress := address;
689 out_msg.Type := DMAResponseType:DATA;
690 //
691 // we send the entire data block and rely on the dma controller to
692 // split it up if need be
693 //
694 out_msg.DataBlk := in_msg.DataBlk;
695 out_msg.Destination.add(tbe.DmaRequestor);
696 out_msg.MessageSize := MessageSizeType:Response_Data;
697 }
698 }
699 }
700
701 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
702 peek(triggerQueue_in, TriggerMsg) {
703 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
704 assert(is_valid(tbe));
705 out_msg.PhysicalAddress := address;
706 out_msg.LineAddress := address;
707 out_msg.Type := DMAResponseType:DATA;
708 //
709 // we send the entire data block and rely on the dma controller to
710 // split it up if need be
711 //
712 out_msg.DataBlk := tbe.DataBlk;
713 out_msg.Destination.add(tbe.DmaRequestor);
714 out_msg.MessageSize := MessageSizeType:Response_Data;
715 }
716 }
717 }
718
719 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
720 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
721 assert(is_valid(tbe));
722 out_msg.PhysicalAddress := address;
723 out_msg.LineAddress := address;
724 out_msg.Type := DMAResponseType:ACK;
725 out_msg.Destination.add(tbe.DmaRequestor);
726 out_msg.MessageSize := MessageSizeType:Writeback_Control;
727 }
728 }
729
730 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
731 peek(requestQueue_in, RequestMsg) {
732 assert(is_valid(tbe));
733 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
734 }
735 }
736
737 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
738 peek(requestQueue_in, RequestMsg) {
739 assert(is_valid(tbe));
740 if (full_bit_dir_enabled) {
741 fwd_set := cache_entry.Sharers;
742 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
743 if (fwd_set.count() > 0) {
744 tbe.ResponseType := CoherenceResponseType:DATA;
745 } else {
746 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
747 }
748 } else {
749 tbe.ResponseType := CoherenceResponseType:DATA;
750 }
751 }
752 }
753
754 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
755 peek(requestQueue_in, RequestMsg) {
756 assert(is_valid(tbe));
757 tbe.GetSRequestors.add(in_msg.Requestor);
758 }
759 }
760
761 action(r_setSharerBit, "r", desc="We saw other sharers") {
762 assert(is_valid(tbe));
763 tbe.Sharers := true;
764 }
765
766 action(so_setOwnerBit, "so", desc="We saw other sharers") {
767 assert(is_valid(tbe));
768 tbe.Sharers := true;
769 tbe.Owned := true;
770 }
771
772 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
773 peek(requestQueue_in, RequestMsg) {
774 enqueue(memQueue_out, MemoryMsg, latency="1") {
775 out_msg.Address := address;
776 out_msg.Type := MemoryRequestType:MEMORY_READ;
777 out_msg.Sender := machineID;
778 out_msg.OriginalRequestorMachId := in_msg.Requestor;
779 out_msg.MessageSize := in_msg.MessageSize;
780 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
781 DPRINTF(RubySlicc, "%s\n", out_msg);
782 }
783 }
784 }
785
786 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
787 peek(dmaRequestQueue_in, DMARequestMsg) {
788 enqueue(memQueue_out, MemoryMsg, latency="1") {
789 out_msg.Address := address;
790 out_msg.Type := MemoryRequestType:MEMORY_READ;
791 out_msg.Sender := machineID;
792 out_msg.OriginalRequestorMachId := in_msg.Requestor;
793 out_msg.MessageSize := in_msg.MessageSize;
794 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
795 DPRINTF(RubySlicc, "%s\n", out_msg);
796 }
797 }
798 }
799
800 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
801 assert(is_valid(tbe));
802 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
803 if (full_bit_dir_enabled) {
804 assert(is_valid(cache_entry));
805 peek(requestQueue_in, RequestMsg) {
806 fwd_set := cache_entry.Sharers;
807 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
808 if (fwd_set.count() > 0) {
809 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
810 out_msg.Address := address;
811 out_msg.Type := in_msg.Type;
812 out_msg.Requestor := in_msg.Requestor;
813 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
814 out_msg.MessageSize := MessageSizeType:Multicast_Control;
815 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
816 out_msg.ForwardRequestTime := get_time();
817 assert(tbe.SilentAcks > 0);
818 out_msg.SilentAcks := tbe.SilentAcks;
819 }
820 }
821 }
822 } else {
823 peek(requestQueue_in, RequestMsg) {
824 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
825 out_msg.Address := address;
826 out_msg.Type := in_msg.Type;
827 out_msg.Requestor := in_msg.Requestor;
828 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
829 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
830 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
831 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
832 out_msg.ForwardRequestTime := get_time();
833 }
834 }
835 }
836 }
837 }
838
839 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
840 if (machineCount(MachineType:L1Cache) > 1) {
841 if (full_bit_dir_enabled) {
842 assert(cache_entry.Sharers.count() > 0);
843 peek(requestQueue_in, RequestMsg) {
844 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
845 out_msg.Address := address;
846 out_msg.Type := CoherenceRequestType:INV;
847 out_msg.Requestor := machineID;
848 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
849 out_msg.MessageSize := MessageSizeType:Multicast_Control;
850 }
851 }
852 } else {
853 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
854 out_msg.Address := address;
855 out_msg.Type := CoherenceRequestType:INV;
856 out_msg.Requestor := machineID;
857 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
858 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
859 }
860 }
861 }
862 }
863
864 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
865 if (machineCount(MachineType:L1Cache) > 1) {
866 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
867 assert(is_valid(cache_entry));
868 out_msg.Address := address;
869 out_msg.Type := CoherenceRequestType:INV;
870 out_msg.Requestor := machineID;
871 out_msg.Destination.add(cache_entry.Owner);
872 out_msg.MessageSize := MessageSizeType:Request_Control;
873 out_msg.DirectedProbe := true;
874 }
875 }
876 }
877
878 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
879 if (machineCount(MachineType:L1Cache) > 1) {
880 peek(requestQueue_in, RequestMsg) {
881 if (full_bit_dir_enabled) {
882 fwd_set := cache_entry.Sharers;
883 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
884 if (fwd_set.count() > 0) {
885 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
886 out_msg.Address := address;
887 out_msg.Type := in_msg.Type;
888 out_msg.Requestor := in_msg.Requestor;
889 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
890 out_msg.MessageSize := MessageSizeType:Multicast_Control;
891 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
892 out_msg.ForwardRequestTime := get_time();
893 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
894 out_msg.SilentAcks := out_msg.SilentAcks - 1;
895 }
896 }
897 } else {
898 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
899 out_msg.Address := address;
900 out_msg.Type := in_msg.Type;
901 out_msg.Requestor := in_msg.Requestor;
902 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
903 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
904 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
905 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
906 out_msg.ForwardRequestTime := get_time();
907 }
908 }
909 }
910 }
911 }
912
913 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
914 assert(machineCount(MachineType:L1Cache) > 1);
915 //
916 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
917 // decouple the two.
918 //
919 peek(unblockNetwork_in, ResponseMsg) {
920 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
921 assert(is_valid(tbe));
922 out_msg.Address := address;
923 out_msg.Type := CoherenceRequestType:MERGED_GETS;
924 out_msg.MergedRequestors := tbe.GetSRequestors;
925 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
926 out_msg.Destination.add(in_msg.CurOwner);
927 } else {
928 out_msg.Destination.add(in_msg.Sender);
929 }
930 out_msg.MessageSize := MessageSizeType:Request_Control;
931 out_msg.InitialRequestTime := zero_time();
932 out_msg.ForwardRequestTime := get_time();
933 }
934 }
935 }
936
937 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
938 assert(machineCount(MachineType:L1Cache) > 1);
939 if (probe_filter_enabled || full_bit_dir_enabled) {
940 peek(requestQueue_in, RequestMsg) {
941 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
942 assert(is_valid(cache_entry));
943 out_msg.Address := address;
944 out_msg.Type := in_msg.Type;
945 out_msg.Requestor := in_msg.Requestor;
946 out_msg.Destination.add(cache_entry.Owner);
947 out_msg.MessageSize := MessageSizeType:Request_Control;
948 out_msg.DirectedProbe := true;
949 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
950 out_msg.ForwardRequestTime := get_time();
951 }
952 }
953 } else {
954 peek(requestQueue_in, RequestMsg) {
955 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
956 out_msg.Address := address;
957 out_msg.Type := in_msg.Type;
958 out_msg.Requestor := in_msg.Requestor;
959 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
960 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
961 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
962 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
963 out_msg.ForwardRequestTime := get_time();
964 }
965 }
966 }
967 }
968
969 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
970 assert(is_valid(tbe));
971 if (tbe.NumPendingMsgs > 0) {
972 peek(dmaRequestQueue_in, DMARequestMsg) {
973 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
974 out_msg.Address := address;
975 out_msg.Type := CoherenceRequestType:GETX;
976 //
977 // Send to all L1 caches, since the requestor is the memory controller
978 // itself
979 //
980 out_msg.Requestor := machineID;
981 out_msg.Destination.broadcast(MachineType:L1Cache);
982 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
983 }
984 }
985 }
986 }
987
988 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
989 assert(is_valid(tbe));
990 if (tbe.NumPendingMsgs > 0) {
991 peek(dmaRequestQueue_in, DMARequestMsg) {
992 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
993 out_msg.Address := address;
994 out_msg.Type := CoherenceRequestType:GETS;
995 //
996 // Send to all L1 caches, since the requestor is the memory controller
997 // itself
998 //
999 out_msg.Requestor := machineID;
1000 out_msg.Destination.broadcast(MachineType:L1Cache);
1001 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1002 }
1003 }
1004 }
1005 }
1006
1007 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1008 requestQueue_in.dequeue();
1009 }
1010
1011 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1012 peek(unblockNetwork_in, ResponseMsg) {
1013 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1014 }
1015 unblockNetwork_in.dequeue();
1016 }
1017
1018 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1019 wakeUpBuffers(address);
1020 }
1021
1022 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1023 memQueue_in.dequeue();
1024 }
1025
1026 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1027 triggerQueue_in.dequeue();
1028 }
1029
1030 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1031 dmaRequestQueue_in.dequeue();
1032 }
1033
1034 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1035 peek(dmaRequestQueue_in, DMARequestMsg) {
1036 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1037 }
1038 stall_and_wait(dmaRequestQueue_in, address);
1039 }
1040
1041 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1042 peek(memQueue_in, MemoryMsg) {
1043 assert(is_valid(tbe));
1044 if (tbe.CacheDirty == false) {
1045 tbe.DataBlk := in_msg.DataBlk;
1046 }
1047 }
1048 }
1049
1050 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1051 peek(responseToDir_in, ResponseMsg) {
1052 assert(is_valid(tbe));
1053 tbe.CacheDirty := true;
1054 tbe.DataBlk := in_msg.DataBlk;
1055 }
1056 }
1057
1058 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1059 peek(responseToDir_in, ResponseMsg) {
1060 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1061 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1062 in_msg.Address, in_msg.DataBlk);
1063 }
1064 }
1065
1066 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1067 peek(unblockNetwork_in, ResponseMsg) {
1068 assert(in_msg.Dirty);
1069 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1070 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1071 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1072 in_msg.Address, in_msg.DataBlk);
1073 }
1074 }
1075
1076 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1077 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1078 assert(is_valid(tbe));
1079 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1080 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1081 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1082 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1083 }
1084
1085 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1086 assert(is_valid(tbe));
1087 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1088 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1089 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1090 }
1091
1092 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1093 assert(is_valid(tbe));
1094 assert(tbe.CacheDirty);
1095 }
1096
1097 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1098 if (probe_filter_enabled || full_bit_dir_enabled) {
1099 peek(requestQueue_in, RequestMsg) {
1100 assert(is_valid(cache_entry));
1101 assert(cache_entry.Owner != in_msg.Requestor);
1102 }
1103 }
1104 }
1105
1106 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1107 if (full_bit_dir_enabled) {
1108 peek(requestQueue_in, RequestMsg) {
1109 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1110 }
1111 }
1112 }
1113
1114 action(rs_removeSharer, "s", desc="remove current sharer") {
1115 if (full_bit_dir_enabled) {
1116 peek(unblockNetwork_in, ResponseMsg) {
1117 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1118 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1119 }
1120 }
1121 }
1122
1123 action(cs_clearSharers, "cs", desc="clear current sharers") {
1124 if (full_bit_dir_enabled) {
1125 peek(requestQueue_in, RequestMsg) {
1126 cache_entry.Sharers.clear();
1127 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1128 }
1129 }
1130 }
1131
1132 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1133 peek(unblockNetwork_in, ResponseMsg) {
1134 enqueue(memQueue_out, MemoryMsg, latency="1") {
1135 out_msg.Address := address;
1136 out_msg.Type := MemoryRequestType:MEMORY_WB;
1137 DPRINTF(RubySlicc, "%s\n", out_msg);
1138 }
1139 }
1140 }
1141
1142 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1143 enqueue(memQueue_out, MemoryMsg, latency="1") {
1144 assert(is_valid(tbe));
1145 out_msg.Address := address;
1146 out_msg.Type := MemoryRequestType:MEMORY_WB;
1147 // first, initialize the data blk to the current version of system memory
1148 out_msg.DataBlk := tbe.DataBlk;
1149 // then add the dma write data
1150 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1151 DPRINTF(RubySlicc, "%s\n", out_msg);
1152 }
1153 }
1154
1155 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1156 peek(unblockNetwork_in, ResponseMsg) {
1157 assert(in_msg.Dirty == false);
1158 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1159
1160 // NOTE: The following check would not be valid in a real
1161 // implementation. We include the data in the "dataless"
1162 // message so we can assert the clean data matches the datablock
1163 // in memory
1164 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1165 }
1166 }
1167
1168 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1169 peek(requestQueue_in, RequestMsg) {
1170 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1171 }
1172 stall_and_wait(requestQueue_in, address);
1173 }
1174
1175 // TRANSITIONS
1176
1177 // Transitions out of E state
1178 transition(E, GETX, NO_B_W) {
1179 pfa_probeFilterAllocate;
1180 v_allocateTBE;
1181 rx_recordExclusiveInTBE;
1182 saa_setAcksToAllIfPF;
1183 qf_queueMemoryFetchRequest;
1184 fn_forwardRequestIfNecessary;
1185 i_popIncomingRequestQueue;
1186 }
1187
1188 transition(E, GETS, NO_B_W) {
1189 pfa_probeFilterAllocate;
1190 v_allocateTBE;
1191 rx_recordExclusiveInTBE;
1192 saa_setAcksToAllIfPF;
1193 qf_queueMemoryFetchRequest;
1194 fn_forwardRequestIfNecessary;
1195 i_popIncomingRequestQueue;
1196 }
1197
1198 transition(E, DMA_READ, NO_DR_B_W) {
1199 vd_allocateDmaRequestInTBE;
1200 qd_queueMemoryRequestFromDmaRead;
1201 spa_setPendingAcksToZeroIfPF;
1202 f_forwardReadFromDma;
1203 p_popDmaRequestQueue;
1204 }
1205
1206 transition(E, DMA_WRITE, NO_DW_B_W) {
1207 vd_allocateDmaRequestInTBE;
1208 spa_setPendingAcksToZeroIfPF;
1209 sc_signalCompletionIfPF;
1210 f_forwardWriteFromDma;
1211 p_popDmaRequestQueue;
1212 }
1213
1214 // Transitions out of O state
1215 transition(O, GETX, NO_B_W) {
1216 r_setMRU;
1217 v_allocateTBE;
1218 r_recordDataInTBE;
1219 sa_setAcksToOne;
1220 qf_queueMemoryFetchRequest;
1221 fb_forwardRequestBcast;
1222 cs_clearSharers;
1223 i_popIncomingRequestQueue;
1224 }
1225
1226 // This transition is dumb, if a shared copy exists on-chip, then that should
1227 // provide data, not slow off-chip dram. The problem is that the current
1228 // caches don't provide data in S state
1229 transition(O, GETS, O_B_W) {
1230 r_setMRU;
1231 v_allocateTBE;
1232 r_recordDataInTBE;
1233 saa_setAcksToAllIfPF;
1234 qf_queueMemoryFetchRequest;
1235 fn_forwardRequestIfNecessary;
1236 i_popIncomingRequestQueue;
1237 }
1238
1239 transition(O, DMA_READ, O_DR_B_W) {
1240 vd_allocateDmaRequestInTBE;
1241 spa_setPendingAcksToZeroIfPF;
1242 qd_queueMemoryRequestFromDmaRead;
1243 f_forwardReadFromDma;
1244 p_popDmaRequestQueue;
1245 }
1246
1247 transition(O, Pf_Replacement, O_R) {
1248 v_allocateTBE;
1249 pa_setPendingMsgsToAll;
1250 ia_invalidateAllRequest;
1251 pfd_probeFilterDeallocate;
1252 }
1253
1254 transition(S, Pf_Replacement, S_R) {
1255 v_allocateTBE;
1256 pa_setPendingMsgsToAll;
1257 ia_invalidateAllRequest;
1258 pfd_probeFilterDeallocate;
1259 }
1260
1261 transition(NO, Pf_Replacement, NO_R) {
1262 v_allocateTBE;
1263 po_setPendingMsgsToOne;
1264 io_invalidateOwnerRequest;
1265 pfd_probeFilterDeallocate;
1266 }
1267
1268 transition(NX, Pf_Replacement, NO_R) {
1269 v_allocateTBE;
1270 pa_setPendingMsgsToAll;
1271 ia_invalidateAllRequest;
1272 pfd_probeFilterDeallocate;
1273 }
1274
1275 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1276 vd_allocateDmaRequestInTBE;
1277 f_forwardWriteFromDma;
1278 p_popDmaRequestQueue;
1279 }
1280
1281 // Transitions out of NO state
1282 transition(NX, GETX, NO_B) {
1283 r_setMRU;
1284 fb_forwardRequestBcast;
1285 cs_clearSharers;
1286 i_popIncomingRequestQueue;
1287 }
1288
1289 // Transitions out of NO state
1290 transition(NO, GETX, NO_B) {
1291 r_setMRU;
1292 ano_assertNotOwner;
1293 fc_forwardRequestConditionalOwner;
1294 cs_clearSharers;
1295 i_popIncomingRequestQueue;
1296 }
1297
1298 transition(S, GETX, NO_B) {
1299 r_setMRU;
1300 fb_forwardRequestBcast;
1301 cs_clearSharers;
1302 i_popIncomingRequestQueue;
1303 }
1304
1305 transition(S, GETS, NO_B) {
1306 r_setMRU;
1307 ano_assertNotOwner;
1308 fb_forwardRequestBcast;
1309 i_popIncomingRequestQueue;
1310 }
1311
1312 transition(NO, GETS, NO_B) {
1313 r_setMRU;
1314 ano_assertNotOwner;
1315 ans_assertNotSharer;
1316 fc_forwardRequestConditionalOwner;
1317 i_popIncomingRequestQueue;
1318 }
1319
1320 transition(NX, GETS, NO_B) {
1321 r_setMRU;
1322 ano_assertNotOwner;
1323 fc_forwardRequestConditionalOwner;
1324 i_popIncomingRequestQueue;
1325 }
1326
1327 transition({NO, NX, S}, PUT, WB) {
1328 //
1329 // note that the PUT requestor may not be the current owner if an invalidate
1330 // raced with PUT
1331 //
1332 a_sendWriteBackAck;
1333 i_popIncomingRequestQueue;
1334 }
1335
1336 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1337 vd_allocateDmaRequestInTBE;
1338 f_forwardReadFromDma;
1339 p_popDmaRequestQueue;
1340 }
1341
1342 // Nack PUT requests when races cause us to believe we own the data
1343 transition({O, E}, PUT) {
1344 b_sendWriteBackNack;
1345 i_popIncomingRequestQueue;
1346 }
1347
1348 // Blocked transient states
1349 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1350 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1351 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
1352 {GETS, GETX, PUT, Pf_Replacement}) {
1353 z_stallAndWaitRequest;
1354 }
1355
1356 transition(NO_B, GETX, NO_B_X) {
1357 z_stallAndWaitRequest;
1358 }
1359
1360 transition(NO_B, {PUT, Pf_Replacement}) {
1361 z_stallAndWaitRequest;
1362 }
1363
1364 transition(NO_B_S, {GETX, PUT, Pf_Replacement}) {
1365 z_stallAndWaitRequest;
1366 }
1367
1368 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1369 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1370 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
1371 {DMA_READ, DMA_WRITE}) {
1372 zd_stallAndWaitDMARequest;
1373 }
1374
1375 // merge GETS into one response
1376 transition(NO_B, GETS, NO_B_S) {
1377 v_allocateTBE;
1378 rs_recordGetSRequestor;
1379 i_popIncomingRequestQueue;
1380 }
1381
1382 transition(NO_B_S, GETS) {
1383 rs_recordGetSRequestor;
1384 i_popIncomingRequestQueue;
1385 }
1386
1387 // unblock responses
1388 transition({NO_B, NO_B_X}, UnblockS, NX) {
1389 us_updateSharerIfFBD;
1390 k_wakeUpDependents;
1391 j_popIncomingUnblockQueue;
1392 }
1393
1394 transition({NO_B, NO_B_X}, UnblockM, NO) {
1395 uo_updateOwnerIfPf;
1396 us_updateSharerIfFBD;
1397 k_wakeUpDependents;
1398 j_popIncomingUnblockQueue;
1399 }
1400
1401 transition(NO_B_S, UnblockS, NO_B_S_W) {
1402 us_updateSharerIfFBD;
1403 fr_forwardMergeReadRequestsToOwner;
1404 sp_setPendingMsgsToMergedSharers;
1405 j_popIncomingUnblockQueue;
1406 }
1407
1408 transition(NO_B_S, UnblockM, NO_B_S_W) {
1409 uo_updateOwnerIfPf;
1410 fr_forwardMergeReadRequestsToOwner;
1411 sp_setPendingMsgsToMergedSharers;
1412 j_popIncomingUnblockQueue;
1413 }
1414
1415 transition(NO_B_S_W, UnblockS) {
1416 us_updateSharerIfFBD;
1417 mu_decrementNumberOfUnblocks;
1418 os_checkForMergedGetSCompletion;
1419 j_popIncomingUnblockQueue;
1420 }
1421
1422 transition(NO_B_S_W, All_Unblocks, NX) {
1423 w_deallocateTBE;
1424 k_wakeUpDependents;
1425 g_popTriggerQueue;
1426 }
1427
1428 transition(O_B, UnblockS, O) {
1429 us_updateSharerIfFBD;
1430 k_wakeUpDependents;
1431 j_popIncomingUnblockQueue;
1432 }
1433
1434 transition(O_B, UnblockM, NO) {
1435 us_updateSharerIfFBD;
1436 uo_updateOwnerIfPf;
1437 k_wakeUpDependents;
1438 j_popIncomingUnblockQueue;
1439 }
1440
1441 transition(NO_B_W, Memory_Data, NO_B) {
1442 d_sendData;
1443 w_deallocateTBE;
1444 l_popMemQueue;
1445 }
1446
1447 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1448 r_recordMemoryData;
1449 o_checkForCompletion;
1450 l_popMemQueue;
1451 }
1452
1453 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1454 r_recordMemoryData;
1455 dr_sendDmaData;
1456 o_checkForCompletion;
1457 l_popMemQueue;
1458 }
1459
1460 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1461 m_decrementNumberOfMessages;
1462 o_checkForCompletion;
1463 n_popResponseQueue;
1464 }
1465
1466 transition({O_R, S_R, NO_R}, Ack) {
1467 m_decrementNumberOfMessages;
1468 o_checkForCompletion;
1469 n_popResponseQueue;
1470 }
1471
1472 transition(S_R, Data) {
1473 wr_writeResponseDataToMemory;
1474 m_decrementNumberOfMessages;
1475 o_checkForCompletion;
1476 n_popResponseQueue;
1477 }
1478
1479 transition(NO_R, {Data, Exclusive_Data}) {
1480 wr_writeResponseDataToMemory;
1481 m_decrementNumberOfMessages;
1482 o_checkForCompletion;
1483 n_popResponseQueue;
1484 }
1485
1486 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1487 w_deallocateTBE;
1488 k_wakeUpDependents;
1489 g_popTriggerQueue;
1490 }
1491
1492 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1493 m_decrementNumberOfMessages;
1494 n_popResponseQueue;
1495 }
1496
1497 transition(NO_DR_B_W, Shared_Ack) {
1498 m_decrementNumberOfMessages;
1499 r_setSharerBit;
1500 n_popResponseQueue;
1501 }
1502
1503 transition(O_DR_B, Shared_Ack) {
1504 m_decrementNumberOfMessages;
1505 r_setSharerBit;
1506 o_checkForCompletion;
1507 n_popResponseQueue;
1508 }
1509
1510 transition(O_DR_B_W, Shared_Ack) {
1511 m_decrementNumberOfMessages;
1512 r_setSharerBit;
1513 n_popResponseQueue;
1514 }
1515
1516 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1517 m_decrementNumberOfMessages;
1518 r_setSharerBit;
1519 o_checkForCompletion;
1520 n_popResponseQueue;
1521 }
1522
1523 transition(NO_DR_B_W, Shared_Data) {
1524 r_recordCacheData;
1525 m_decrementNumberOfMessages;
1526 so_setOwnerBit;
1527 o_checkForCompletion;
1528 n_popResponseQueue;
1529 }
1530
1531 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1532 r_recordCacheData;
1533 m_decrementNumberOfMessages;
1534 so_setOwnerBit;
1535 o_checkForCompletion;
1536 n_popResponseQueue;
1537 }
1538
1539 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1540 r_recordCacheData;
1541 m_decrementNumberOfMessages;
1542 n_popResponseQueue;
1543 }
1544
1545 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1546 r_recordCacheData;
1547 m_decrementNumberOfMessages;
1548 o_checkForCompletion;
1549 n_popResponseQueue;
1550 }
1551
1552 transition(NO_DR_B, All_acks_and_owner_data, O) {
1553 //
1554 // Note that the DMA consistency model allows us to send the DMA device
1555 // a response as soon as we receive valid data and prior to receiving
1556 // all acks. However, to simplify the protocol we wait for all acks.
1557 //
1558 dt_sendDmaDataFromTbe;
1559 wdt_writeDataFromTBE;
1560 w_deallocateTBE;
1561 k_wakeUpDependents;
1562 g_popTriggerQueue;
1563 }
1564
1565 transition(NO_DR_B, All_acks_and_shared_data, S) {
1566 //
1567 // Note that the DMA consistency model allows us to send the DMA device
1568 // a response as soon as we receive valid data and prior to receiving
1569 // all acks. However, to simplify the protocol we wait for all acks.
1570 //
1571 dt_sendDmaDataFromTbe;
1572 wdt_writeDataFromTBE;
1573 w_deallocateTBE;
1574 k_wakeUpDependents;
1575 g_popTriggerQueue;
1576 }
1577
1578 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1579 //
1580 // Note that the DMA consistency model allows us to send the DMA device
1581 // a response as soon as we receive valid data and prior to receiving
1582 // all acks. However, to simplify the protocol we wait for all acks.
1583 //
1584 dt_sendDmaDataFromTbe;
1585 wdt_writeDataFromTBE;
1586 w_deallocateTBE;
1587 k_wakeUpDependents;
1588 g_popTriggerQueue;
1589 }
1590
1591 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1592 //
1593 // Note that the DMA consistency model allows us to send the DMA device
1594 // a response as soon as we receive valid data and prior to receiving
1595 // all acks. However, to simplify the protocol we wait for all acks.
1596 //
1597 dt_sendDmaDataFromTbe;
1598 wdt_writeDataFromTBE;
1599 w_deallocateTBE;
1600 k_wakeUpDependents;
1601 g_popTriggerQueue;
1602 }
1603
1604 transition(O_DR_B, All_acks_and_owner_data, O) {
1605 wdt_writeDataFromTBE;
1606 w_deallocateTBE;
1607 k_wakeUpDependents;
1608 g_popTriggerQueue;
1609 }
1610
1611 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1612 wdt_writeDataFromTBE;
1613 w_deallocateTBE;
1614 pfd_probeFilterDeallocate;
1615 k_wakeUpDependents;
1616 g_popTriggerQueue;
1617 }
1618
1619 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1620 //
1621 // Note that the DMA consistency model allows us to send the DMA device
1622 // a response as soon as we receive valid data and prior to receiving
1623 // all acks. However, to simplify the protocol we wait for all acks.
1624 //
1625 dt_sendDmaDataFromTbe;
1626 wdt_writeDataFromTBE;
1627 w_deallocateTBE;
1628 ppfd_possibleProbeFilterDeallocate;
1629 k_wakeUpDependents;
1630 g_popTriggerQueue;
1631 }
1632
1633 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1634 a_assertCacheData;
1635 //
1636 // Note that the DMA consistency model allows us to send the DMA device
1637 // a response as soon as we receive valid data and prior to receiving
1638 // all acks. However, to simplify the protocol we wait for all acks.
1639 //
1640 dt_sendDmaDataFromTbe;
1641 wdt_writeDataFromTBE;
1642 w_deallocateTBE;
1643 ppfd_possibleProbeFilterDeallocate;
1644 k_wakeUpDependents;
1645 g_popTriggerQueue;
1646 }
1647
1648 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1649 dwt_writeDmaDataFromTBE;
1650 ld_queueMemoryDmaWrite;
1651 g_popTriggerQueue;
1652 }
1653
1654 transition(NO_DW_W, Memory_Ack, E) {
1655 da_sendDmaAck;
1656 w_deallocateTBE;
1657 ppfd_possibleProbeFilterDeallocate;
1658 k_wakeUpDependents;
1659 l_popMemQueue;
1660 }
1661
1662 transition(O_B_W, Memory_Data, O_B) {
1663 d_sendData;
1664 w_deallocateTBE;
1665 l_popMemQueue;
1666 }
1667
1668 transition(NO_B_W, UnblockM, NO_W) {
1669 uo_updateOwnerIfPf;
1670 j_popIncomingUnblockQueue;
1671 }
1672
1673 transition(NO_B_W, UnblockS, NO_W) {
1674 us_updateSharerIfFBD;
1675 j_popIncomingUnblockQueue;
1676 }
1677
1678 transition(O_B_W, UnblockS, O_W) {
1679 us_updateSharerIfFBD;
1680 j_popIncomingUnblockQueue;
1681 }
1682
1683 transition(NO_W, Memory_Data, NO) {
1684 w_deallocateTBE;
1685 k_wakeUpDependents;
1686 l_popMemQueue;
1687 }
1688
1689 transition(O_W, Memory_Data, O) {
1690 w_deallocateTBE;
1691 k_wakeUpDependents;
1692 l_popMemQueue;
1693 }
1694
1695 // WB State Transistions
1696 transition(WB, Writeback_Dirty, WB_O_W) {
1697 l_writeDataToMemory;
1698 rs_removeSharer;
1699 l_queueMemoryWBRequest;
1700 j_popIncomingUnblockQueue;
1701 }
1702
1703 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1704 l_writeDataToMemory;
1705 rs_removeSharer;
1706 l_queueMemoryWBRequest;
1707 j_popIncomingUnblockQueue;
1708 }
1709
1710 transition(WB_E_W, Memory_Ack, E) {
1711 pfd_probeFilterDeallocate;
1712 k_wakeUpDependents;
1713 l_popMemQueue;
1714 }
1715
1716 transition(WB_O_W, Memory_Ack, O) {
1717 k_wakeUpDependents;
1718 l_popMemQueue;
1719 }
1720
1721 transition(WB, Writeback_Clean, O) {
1722 ll_checkIncomingWriteback;
1723 rs_removeSharer;
1724 k_wakeUpDependents;
1725 j_popIncomingUnblockQueue;
1726 }
1727
1728 transition(WB, Writeback_Exclusive_Clean, E) {
1729 ll_checkIncomingWriteback;
1730 rs_removeSharer;
1731 pfd_probeFilterDeallocate;
1732 k_wakeUpDependents;
1733 j_popIncomingUnblockQueue;
1734 }
1735
1736 transition(WB, Unblock, NX) {
1737 auno_assertUnblockerNotOwner;
1738 k_wakeUpDependents;
1739 j_popIncomingUnblockQueue;
1740 }
1741 }