SLICC: Remove the keyword wake_up_dependents
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory,
38 CacheMemory * probeFilter,
39 MemoryControl * memBuffer,
40 int memory_controller_latency = 2,
41 bool probe_filter_enabled = false,
42 bool full_bit_dir_enabled = false
43 {
44
45 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
47 //
48 // For a finite buffered network, note that the DMA response network only
49 // works at this relatively lower numbered (lower priority) virtual network
50 // because the trigger queue decouples cache responses from DMA responses.
51 //
52 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true";
53
54 MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false";
55 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
56 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", recycle_latency="1";
57 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
58
59 // STATES
60 state_declaration(State, desc="Directory states", default="Directory_State_E") {
61 // Base states
62 NX, AccessPermission:Invalid, desc="Not Owner, probe filter entry exists, block in O at Owner";
63 NO, AccessPermission:Invalid, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
64 S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
65 O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
66 E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
67
68 O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
69 S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
70 NO_R, AccessPermission:Invalid, desc="Was Not Owner or Sharer, replacing probe filter entry";
71
72 NO_B, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked";
73 NO_B_X, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
74 NO_B_S, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
75 NO_B_S_W, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
76 O_B, AccessPermission:Invalid, "O^B", desc="Owner, Blocked";
77 NO_B_W, AccessPermission:Invalid, desc="Not Owner, Blocked, waiting for Dram";
78 O_B_W, AccessPermission:Invalid, desc="Owner, Blocked, waiting for Dram";
79 NO_W, AccessPermission:Invalid, desc="Not Owner, waiting for Dram";
80 O_W, AccessPermission:Invalid, desc="Owner, waiting for Dram";
81 NO_DW_B_W, AccessPermission:Invalid, desc="Not Owner, Dma Write waiting for Dram and cache responses";
82 NO_DR_B_W, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for Dram and cache responses";
83 NO_DR_B_D, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
84 NO_DR_B, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for cache responses";
85 NO_DW_W, AccessPermission:Invalid, desc="Not Owner, Dma Write waiting for Dram";
86 O_DR_B_W, AccessPermission:Invalid, desc="Owner, Dma Read waiting for Dram and cache responses";
87 O_DR_B, AccessPermission:Invalid, desc="Owner, Dma Read waiting for cache responses";
88 WB, AccessPermission:Invalid, desc="Blocked on a writeback";
89 WB_O_W, AccessPermission:Invalid, desc="Blocked on memory write, will go to O";
90 WB_E_W, AccessPermission:Invalid, desc="Blocked on memory write, will go to E";
91 }
92
93 // Events
94 enumeration(Event, desc="Directory events") {
95 GETX, desc="A GETX arrives";
96 GETS, desc="A GETS arrives";
97 PUT, desc="A PUT arrives";
98 Unblock, desc="An unblock message arrives";
99 UnblockS, desc="An unblock message arrives";
100 UnblockM, desc="An unblock message arrives";
101 Writeback_Clean, desc="The final part of a PutX (no data)";
102 Writeback_Dirty, desc="The final part of a PutX (data)";
103 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
104 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
105
106 // Probe filter
107 Pf_Replacement, desc="probe filter replacement";
108
109 // DMA requests
110 DMA_READ, desc="A DMA Read memory request";
111 DMA_WRITE, desc="A DMA Write memory request";
112
113 // Memory Controller
114 Memory_Data, desc="Fetched data from memory arrives";
115 Memory_Ack, desc="Writeback Ack from memory arrives";
116
117 // Cache responses required to handle DMA
118 Ack, desc="Received an ack message";
119 Shared_Ack, desc="Received an ack message, responder has a shared copy";
120 Shared_Data, desc="Received a data message, responder has a shared copy";
121 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
122 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
123
124 // Triggers
125 All_acks_and_shared_data, desc="Received shared data and message acks";
126 All_acks_and_owner_data, desc="Received shared data and message acks";
127 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
128 All_Unblocks, desc="Received all unblocks for a merged gets request";
129 }
130
131 // TYPES
132
133 // DirectoryEntry
134 structure(Entry, desc="...", interface="AbstractEntry") {
135 State DirectoryState, desc="Directory state";
136 DataBlock DataBlk, desc="data for the block";
137 }
138
139 // ProbeFilterEntry
140 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
141 State PfState, desc="Directory state";
142 MachineID Owner, desc="Owner node";
143 DataBlock DataBlk, desc="data for the block";
144 Set Sharers, desc="sharing vector for full bit directory";
145 }
146
147 // TBE entries for DMA requests
148 structure(TBE, desc="TBE entries for outstanding DMA requests") {
149 Address PhysicalAddress, desc="physical address";
150 State TBEState, desc="Transient State";
151 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
152 int Acks, default="0", desc="The number of acks that the waiting response represents";
153 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
154 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
155 DataBlock DataBlk, desc="The current view of system memory";
156 int Len, desc="...";
157 MachineID DmaRequestor, desc="DMA requestor";
158 NetDest GetSRequestors, desc="GETS merged requestors";
159 int NumPendingMsgs, desc="Number of pending acks/messages";
160 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
161 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
162 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
163 }
164
165 external_type(TBETable) {
166 TBE lookup(Address);
167 void allocate(Address);
168 void deallocate(Address);
169 bool isPresent(Address);
170 }
171
172 void set_cache_entry(AbstractCacheEntry b);
173 void unset_cache_entry();
174 void set_tbe(TBE a);
175 void unset_tbe();
176 void wakeUpBuffers(Address a);
177
178 // ** OBJECTS **
179
180 Set fwd_set;
181
182 TBETable TBEs, template_hack="<Directory_TBE>";
183
184 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
185 return static_cast(Entry, directory[addr]);
186 }
187
188 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
189 if(probe_filter_enabled) {
190 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
191 return pfEntry;
192 }
193 return OOD;
194 }
195
196 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
197 if (is_valid(tbe)) {
198 return tbe.TBEState;
199 } else {
200 if (probe_filter_enabled || full_bit_dir_enabled) {
201 if (is_valid(pf_entry)) {
202 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
203 } else {
204 assert(getDirectoryEntry(addr).DirectoryState == State:E);
205 }
206 }
207 return getDirectoryEntry(addr).DirectoryState;
208 }
209 }
210
211 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
212 if (is_valid(tbe)) {
213 tbe.TBEState := state;
214 }
215 if (probe_filter_enabled || full_bit_dir_enabled) {
216 if (is_valid(pf_entry)) {
217 pf_entry.PfState := state;
218 }
219 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
220 assert(is_valid(pf_entry));
221 }
222 }
223 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
224 state == State:O) {
225 assert(is_valid(tbe) == false);
226 }
227 getDirectoryEntry(addr).DirectoryState := state;
228 }
229
230 Event cache_request_to_event(CoherenceRequestType type) {
231 if (type == CoherenceRequestType:GETS) {
232 return Event:GETS;
233 } else if (type == CoherenceRequestType:GETX) {
234 return Event:GETX;
235 } else {
236 error("Invalid CoherenceRequestType");
237 }
238 }
239
240 MessageBuffer triggerQueue, ordered="true";
241
242 // ** OUT_PORTS **
243 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
244 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
245 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
246 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
247 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
248
249 //
250 // Memory buffer for memory controller to DIMM communication
251 //
252 out_port(memQueue_out, MemoryMsg, memBuffer);
253
254 // ** IN_PORTS **
255
256 // Trigger Queue
257 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
258 if (triggerQueue_in.isReady()) {
259 peek(triggerQueue_in, TriggerMsg) {
260 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
261 TBE tbe := TBEs[in_msg.Address];
262 if (in_msg.Type == TriggerType:ALL_ACKS) {
263 trigger(Event:All_acks_and_owner_data, in_msg.Address,
264 pf_entry, tbe);
265 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
266 trigger(Event:All_acks_and_shared_data, in_msg.Address,
267 pf_entry, tbe);
268 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
269 trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
270 pf_entry, tbe);
271 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
272 trigger(Event:All_Unblocks, in_msg.Address,
273 pf_entry, tbe);
274 } else {
275 error("Unexpected message");
276 }
277 }
278 }
279 }
280
281 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
282 if (unblockNetwork_in.isReady()) {
283 peek(unblockNetwork_in, ResponseMsg) {
284 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
285 TBE tbe := TBEs[in_msg.Address];
286 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
287 trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
288 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
289 trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
290 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
291 trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
292 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
293 trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
294 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
295 trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
296 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
297 trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
298 pf_entry, tbe);
299 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
300 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
301 pf_entry, tbe);
302 } else {
303 error("Invalid message");
304 }
305 }
306 }
307 }
308
309 // Response Network
310 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
311 if (responseToDir_in.isReady()) {
312 peek(responseToDir_in, ResponseMsg) {
313 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
314 TBE tbe := TBEs[in_msg.Address];
315 if (in_msg.Type == CoherenceResponseType:ACK) {
316 trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
317 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
318 trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
319 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
320 trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
321 } else if (in_msg.Type == CoherenceResponseType:DATA) {
322 trigger(Event:Data, in_msg.Address, pf_entry, tbe);
323 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
324 trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
325 } else {
326 error("Unexpected message");
327 }
328 }
329 }
330 }
331
332 // off-chip memory request/response is done
333 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
334 if (memQueue_in.isReady()) {
335 peek(memQueue_in, MemoryMsg) {
336 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
337 TBE tbe := TBEs[in_msg.Address];
338 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
339 trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
340 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
341 trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
342 } else {
343 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
344 error("Invalid message");
345 }
346 }
347 }
348 }
349
350 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
351 if (requestQueue_in.isReady()) {
352 peek(requestQueue_in, RequestMsg) {
353 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
354 TBE tbe := TBEs[in_msg.Address];
355 if (in_msg.Type == CoherenceRequestType:PUT) {
356 trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
357 } else {
358 if (probe_filter_enabled || full_bit_dir_enabled) {
359 if (is_valid(pf_entry)) {
360 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
361 pf_entry, tbe);
362 } else {
363 if (probeFilter.cacheAvail(in_msg.Address)) {
364 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
365 pf_entry, tbe);
366 } else {
367 trigger(Event:Pf_Replacement,
368 probeFilter.cacheProbe(in_msg.Address),
369 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
370 TBEs[probeFilter.cacheProbe(in_msg.Address)]);
371 }
372 }
373 } else {
374 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
375 pf_entry, tbe);
376 }
377 }
378 }
379 }
380 }
381
382 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
383 if (dmaRequestQueue_in.isReady()) {
384 peek(dmaRequestQueue_in, DMARequestMsg) {
385 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
386 TBE tbe := TBEs[in_msg.LineAddress];
387 if (in_msg.Type == DMARequestType:READ) {
388 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
389 } else if (in_msg.Type == DMARequestType:WRITE) {
390 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
391 } else {
392 error("Invalid message");
393 }
394 }
395 }
396 }
397
398 // Actions
399
400 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
401 if (probe_filter_enabled || full_bit_dir_enabled) {
402 assert(is_valid(cache_entry));
403 probeFilter.setMRU(address);
404 }
405 }
406
407 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
408 if (probe_filter_enabled || full_bit_dir_enabled) {
409 assert(is_valid(cache_entry));
410 peek(unblockNetwork_in, ResponseMsg) {
411 assert(cache_entry.Owner != in_msg.Sender);
412 if (full_bit_dir_enabled) {
413 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
414 }
415 }
416 }
417 }
418
419 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
420 if (probe_filter_enabled || full_bit_dir_enabled) {
421 assert(is_valid(cache_entry));
422 peek(unblockNetwork_in, ResponseMsg) {
423 cache_entry.Owner := in_msg.Sender;
424 if (full_bit_dir_enabled) {
425 cache_entry.Sharers.clear();
426 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
427 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
428 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
429 }
430 }
431 }
432 }
433
434 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
435 if (full_bit_dir_enabled) {
436 assert(probeFilter.isTagPresent(address));
437 peek(unblockNetwork_in, ResponseMsg) {
438 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
439 }
440 }
441 }
442
443 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
444 peek(requestQueue_in, RequestMsg) {
445 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
446 out_msg.Address := address;
447 out_msg.Type := CoherenceRequestType:WB_ACK;
448 out_msg.Requestor := in_msg.Requestor;
449 out_msg.Destination.add(in_msg.Requestor);
450 out_msg.MessageSize := MessageSizeType:Writeback_Control;
451 }
452 }
453 }
454
455 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
456 peek(requestQueue_in, RequestMsg) {
457 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
458 out_msg.Address := address;
459 out_msg.Type := CoherenceRequestType:WB_NACK;
460 out_msg.Requestor := in_msg.Requestor;
461 out_msg.Destination.add(in_msg.Requestor);
462 out_msg.MessageSize := MessageSizeType:Writeback_Control;
463 }
464 }
465 }
466
467 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
468 if (probe_filter_enabled || full_bit_dir_enabled) {
469 peek(requestQueue_in, RequestMsg) {
470 set_cache_entry(probeFilter.allocate(address, new PfEntry));
471 cache_entry.Owner := in_msg.Requestor;
472 }
473 }
474 }
475
476 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
477 if (probe_filter_enabled || full_bit_dir_enabled) {
478 probeFilter.deallocate(address);
479 unset_cache_entry();
480 }
481 }
482
483 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
484 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
485 probeFilter.deallocate(address);
486 unset_cache_entry();
487 }
488 }
489
490 action(v_allocateTBE, "v", desc="Allocate TBE") {
491 peek(requestQueue_in, RequestMsg) {
492 TBEs.allocate(address);
493 set_tbe(TBEs[address]);
494 tbe.PhysicalAddress := address;
495 tbe.ResponseType := CoherenceResponseType:NULL;
496 }
497 }
498
499 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
500 peek(dmaRequestQueue_in, DMARequestMsg) {
501 TBEs.allocate(address);
502 set_tbe(TBEs[address]);
503 tbe.DmaDataBlk := in_msg.DataBlk;
504 tbe.PhysicalAddress := in_msg.PhysicalAddress;
505 tbe.Len := in_msg.Len;
506 tbe.DmaRequestor := in_msg.Requestor;
507 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
508 //
509 // One ack for each last-level cache
510 //
511 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
512 //
513 // Assume initially that the caches store a clean copy and that memory
514 // will provide the data
515 //
516 tbe.CacheDirty := false;
517 }
518 }
519
520 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
521 assert(is_valid(tbe));
522 if (full_bit_dir_enabled) {
523 assert(is_valid(cache_entry));
524 tbe.NumPendingMsgs := cache_entry.Sharers.count();
525 } else {
526 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
527 }
528 }
529
530 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
531 assert(is_valid(tbe));
532 tbe.NumPendingMsgs := 1;
533 }
534
535 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
536 TBEs.deallocate(address);
537 unset_tbe();
538 }
539
540 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
541 assert(is_valid(tbe));
542 peek(requestQueue_in, RequestMsg) {
543 if (full_bit_dir_enabled) {
544 assert(is_valid(cache_entry));
545 //
546 // If we are using the full-bit directory and no sharers exists beyond
547 // the requestor, then we must set the ack number to all, not one
548 //
549 fwd_set := cache_entry.Sharers;
550 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
551 if (fwd_set.count() > 0) {
552 tbe.Acks := 1;
553 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
554 tbe.SilentAcks := tbe.SilentAcks - 1;
555 } else {
556 tbe.Acks := machineCount(MachineType:L1Cache);
557 tbe.SilentAcks := 0;
558 }
559 } else {
560 tbe.Acks := 1;
561 }
562 }
563 }
564
565 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
566 assert(is_valid(tbe));
567 if (probe_filter_enabled || full_bit_dir_enabled) {
568 tbe.Acks := machineCount(MachineType:L1Cache);
569 tbe.SilentAcks := 0;
570 } else {
571 tbe.Acks := 1;
572 }
573 }
574
575 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
576 peek(responseToDir_in, ResponseMsg) {
577 assert(is_valid(tbe));
578 assert(in_msg.Acks > 0);
579 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
580 //
581 // Note that cache data responses will have an ack count of 2. However,
582 // directory DMA requests must wait for acks from all LLC caches, so
583 // only decrement by 1.
584 //
585 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
586 (in_msg.Type == CoherenceResponseType:DATA) ||
587 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
588 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
589 } else {
590 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
591 }
592 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
593 }
594 }
595
596 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
597 peek(unblockNetwork_in, ResponseMsg) {
598 assert(is_valid(tbe));
599 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
600 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
601 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
602 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
603 }
604 }
605
606 action(n_popResponseQueue, "n", desc="Pop response queue") {
607 responseToDir_in.dequeue();
608 }
609
610 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
611 assert(is_valid(tbe));
612 if (tbe.NumPendingMsgs == 0) {
613 enqueue(triggerQueue_out, TriggerMsg) {
614 out_msg.Address := address;
615 if (tbe.Sharers) {
616 if (tbe.Owned) {
617 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
618 } else {
619 out_msg.Type := TriggerType:ALL_ACKS;
620 }
621 } else {
622 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
623 }
624 }
625 }
626 }
627
628 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
629 assert(is_valid(tbe));
630 if (tbe.NumPendingMsgs == 0) {
631 enqueue(triggerQueue_out, TriggerMsg) {
632 out_msg.Address := address;
633 out_msg.Type := TriggerType:ALL_UNBLOCKS;
634 }
635 }
636 }
637
638 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
639 assert(is_valid(tbe));
640 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
641 }
642
643 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
644 if (probe_filter_enabled || full_bit_dir_enabled) {
645 assert(is_valid(tbe));
646 tbe.NumPendingMsgs := 0;
647 }
648 }
649
650 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
651 assert(is_valid(tbe));
652 if (tbe.NumPendingMsgs == 0) {
653 assert(probe_filter_enabled || full_bit_dir_enabled);
654 enqueue(triggerQueue_out, TriggerMsg) {
655 out_msg.Address := address;
656 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
657 }
658 }
659 }
660
661 action(d_sendData, "d", desc="Send data to requestor") {
662 peek(memQueue_in, MemoryMsg) {
663 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
664 assert(is_valid(tbe));
665 out_msg.Address := address;
666 out_msg.Type := tbe.ResponseType;
667 out_msg.Sender := machineID;
668 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
669 out_msg.DataBlk := in_msg.DataBlk;
670 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
671 out_msg.Dirty := false; // By definition, the block is now clean
672 out_msg.Acks := tbe.Acks;
673 out_msg.SilentAcks := tbe.SilentAcks;
674 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
675 assert(out_msg.Acks > 0);
676 out_msg.MessageSize := MessageSizeType:Response_Data;
677 }
678 }
679 }
680
681 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
682 peek(memQueue_in, MemoryMsg) {
683 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
684 assert(is_valid(tbe));
685 out_msg.PhysicalAddress := address;
686 out_msg.LineAddress := address;
687 out_msg.Type := DMAResponseType:DATA;
688 //
689 // we send the entire data block and rely on the dma controller to
690 // split it up if need be
691 //
692 out_msg.DataBlk := in_msg.DataBlk;
693 out_msg.Destination.add(tbe.DmaRequestor);
694 out_msg.MessageSize := MessageSizeType:Response_Data;
695 }
696 }
697 }
698
699 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
700 peek(triggerQueue_in, TriggerMsg) {
701 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
702 assert(is_valid(tbe));
703 out_msg.PhysicalAddress := address;
704 out_msg.LineAddress := address;
705 out_msg.Type := DMAResponseType:DATA;
706 //
707 // we send the entire data block and rely on the dma controller to
708 // split it up if need be
709 //
710 out_msg.DataBlk := tbe.DataBlk;
711 out_msg.Destination.add(tbe.DmaRequestor);
712 out_msg.MessageSize := MessageSizeType:Response_Data;
713 }
714 }
715 }
716
717 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
718 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
719 assert(is_valid(tbe));
720 out_msg.PhysicalAddress := address;
721 out_msg.LineAddress := address;
722 out_msg.Type := DMAResponseType:ACK;
723 out_msg.Destination.add(tbe.DmaRequestor);
724 out_msg.MessageSize := MessageSizeType:Writeback_Control;
725 }
726 }
727
728 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
729 peek(requestQueue_in, RequestMsg) {
730 assert(is_valid(tbe));
731 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
732 }
733 }
734
735 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
736 peek(requestQueue_in, RequestMsg) {
737 assert(is_valid(tbe));
738 if (full_bit_dir_enabled) {
739 fwd_set := cache_entry.Sharers;
740 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
741 if (fwd_set.count() > 0) {
742 tbe.ResponseType := CoherenceResponseType:DATA;
743 } else {
744 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
745 }
746 } else {
747 tbe.ResponseType := CoherenceResponseType:DATA;
748 }
749 }
750 }
751
752 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
753 peek(requestQueue_in, RequestMsg) {
754 assert(is_valid(tbe));
755 tbe.GetSRequestors.add(in_msg.Requestor);
756 }
757 }
758
759 action(r_setSharerBit, "r", desc="We saw other sharers") {
760 assert(is_valid(tbe));
761 tbe.Sharers := true;
762 }
763
764 action(so_setOwnerBit, "so", desc="We saw other sharers") {
765 assert(is_valid(tbe));
766 tbe.Sharers := true;
767 tbe.Owned := true;
768 }
769
770 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
771 peek(requestQueue_in, RequestMsg) {
772 enqueue(memQueue_out, MemoryMsg, latency="1") {
773 out_msg.Address := address;
774 out_msg.Type := MemoryRequestType:MEMORY_READ;
775 out_msg.Sender := machineID;
776 out_msg.OriginalRequestorMachId := in_msg.Requestor;
777 out_msg.MessageSize := in_msg.MessageSize;
778 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
779 DPRINTF(RubySlicc, "%s\n", out_msg);
780 }
781 }
782 }
783
784 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
785 peek(dmaRequestQueue_in, DMARequestMsg) {
786 enqueue(memQueue_out, MemoryMsg, latency="1") {
787 out_msg.Address := address;
788 out_msg.Type := MemoryRequestType:MEMORY_READ;
789 out_msg.Sender := machineID;
790 out_msg.OriginalRequestorMachId := in_msg.Requestor;
791 out_msg.MessageSize := in_msg.MessageSize;
792 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
793 DPRINTF(RubySlicc, "%s\n", out_msg);
794 }
795 }
796 }
797
798 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
799 assert(is_valid(tbe));
800 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
801 if (full_bit_dir_enabled) {
802 assert(is_valid(cache_entry));
803 peek(requestQueue_in, RequestMsg) {
804 fwd_set := cache_entry.Sharers;
805 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
806 if (fwd_set.count() > 0) {
807 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
808 out_msg.Address := address;
809 out_msg.Type := in_msg.Type;
810 out_msg.Requestor := in_msg.Requestor;
811 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
812 out_msg.MessageSize := MessageSizeType:Multicast_Control;
813 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
814 out_msg.ForwardRequestTime := get_time();
815 assert(tbe.SilentAcks > 0);
816 out_msg.SilentAcks := tbe.SilentAcks;
817 }
818 }
819 }
820 } else {
821 peek(requestQueue_in, RequestMsg) {
822 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
823 out_msg.Address := address;
824 out_msg.Type := in_msg.Type;
825 out_msg.Requestor := in_msg.Requestor;
826 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
827 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
828 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
829 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
830 out_msg.ForwardRequestTime := get_time();
831 }
832 }
833 }
834 }
835 }
836
837 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
838 if (machineCount(MachineType:L1Cache) > 1) {
839 if (full_bit_dir_enabled) {
840 assert(cache_entry.Sharers.count() > 0);
841 peek(requestQueue_in, RequestMsg) {
842 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
843 out_msg.Address := address;
844 out_msg.Type := CoherenceRequestType:INV;
845 out_msg.Requestor := machineID;
846 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
847 out_msg.MessageSize := MessageSizeType:Multicast_Control;
848 }
849 }
850 } else {
851 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
852 out_msg.Address := address;
853 out_msg.Type := CoherenceRequestType:INV;
854 out_msg.Requestor := machineID;
855 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
856 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
857 }
858 }
859 }
860 }
861
862 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
863 if (machineCount(MachineType:L1Cache) > 1) {
864 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
865 assert(is_valid(cache_entry));
866 out_msg.Address := address;
867 out_msg.Type := CoherenceRequestType:INV;
868 out_msg.Requestor := machineID;
869 out_msg.Destination.add(cache_entry.Owner);
870 out_msg.MessageSize := MessageSizeType:Request_Control;
871 out_msg.DirectedProbe := true;
872 }
873 }
874 }
875
876 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
877 if (machineCount(MachineType:L1Cache) > 1) {
878 peek(requestQueue_in, RequestMsg) {
879 if (full_bit_dir_enabled) {
880 fwd_set := cache_entry.Sharers;
881 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
882 if (fwd_set.count() > 0) {
883 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
884 out_msg.Address := address;
885 out_msg.Type := in_msg.Type;
886 out_msg.Requestor := in_msg.Requestor;
887 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
888 out_msg.MessageSize := MessageSizeType:Multicast_Control;
889 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
890 out_msg.ForwardRequestTime := get_time();
891 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
892 out_msg.SilentAcks := out_msg.SilentAcks - 1;
893 }
894 }
895 } else {
896 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
897 out_msg.Address := address;
898 out_msg.Type := in_msg.Type;
899 out_msg.Requestor := in_msg.Requestor;
900 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
901 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
902 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
903 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
904 out_msg.ForwardRequestTime := get_time();
905 }
906 }
907 }
908 }
909 }
910
911 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
912 assert(machineCount(MachineType:L1Cache) > 1);
913 //
914 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
915 // decouple the two.
916 //
917 peek(unblockNetwork_in, ResponseMsg) {
918 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
919 assert(is_valid(tbe));
920 out_msg.Address := address;
921 out_msg.Type := CoherenceRequestType:MERGED_GETS;
922 out_msg.MergedRequestors := tbe.GetSRequestors;
923 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
924 out_msg.Destination.add(in_msg.CurOwner);
925 } else {
926 out_msg.Destination.add(in_msg.Sender);
927 }
928 out_msg.MessageSize := MessageSizeType:Request_Control;
929 out_msg.InitialRequestTime := zero_time();
930 out_msg.ForwardRequestTime := get_time();
931 }
932 }
933 }
934
935 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
936 assert(machineCount(MachineType:L1Cache) > 1);
937 if (probe_filter_enabled || full_bit_dir_enabled) {
938 peek(requestQueue_in, RequestMsg) {
939 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
940 assert(is_valid(cache_entry));
941 out_msg.Address := address;
942 out_msg.Type := in_msg.Type;
943 out_msg.Requestor := in_msg.Requestor;
944 out_msg.Destination.add(cache_entry.Owner);
945 out_msg.MessageSize := MessageSizeType:Request_Control;
946 out_msg.DirectedProbe := true;
947 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
948 out_msg.ForwardRequestTime := get_time();
949 }
950 }
951 } else {
952 peek(requestQueue_in, RequestMsg) {
953 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
954 out_msg.Address := address;
955 out_msg.Type := in_msg.Type;
956 out_msg.Requestor := in_msg.Requestor;
957 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
958 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
959 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
960 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
961 out_msg.ForwardRequestTime := get_time();
962 }
963 }
964 }
965 }
966
967 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
968 assert(is_valid(tbe));
969 if (tbe.NumPendingMsgs > 0) {
970 peek(dmaRequestQueue_in, DMARequestMsg) {
971 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
972 out_msg.Address := address;
973 out_msg.Type := CoherenceRequestType:GETX;
974 //
975 // Send to all L1 caches, since the requestor is the memory controller
976 // itself
977 //
978 out_msg.Requestor := machineID;
979 out_msg.Destination.broadcast(MachineType:L1Cache);
980 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
981 }
982 }
983 }
984 }
985
986 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
987 assert(is_valid(tbe));
988 if (tbe.NumPendingMsgs > 0) {
989 peek(dmaRequestQueue_in, DMARequestMsg) {
990 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
991 out_msg.Address := address;
992 out_msg.Type := CoherenceRequestType:GETS;
993 //
994 // Send to all L1 caches, since the requestor is the memory controller
995 // itself
996 //
997 out_msg.Requestor := machineID;
998 out_msg.Destination.broadcast(MachineType:L1Cache);
999 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1000 }
1001 }
1002 }
1003 }
1004
1005 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1006 requestQueue_in.dequeue();
1007 }
1008
1009 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1010 peek(unblockNetwork_in, ResponseMsg) {
1011 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1012 }
1013 unblockNetwork_in.dequeue();
1014 }
1015
1016 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1017 wakeUpBuffers(address);
1018 }
1019
1020 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1021 memQueue_in.dequeue();
1022 }
1023
1024 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1025 triggerQueue_in.dequeue();
1026 }
1027
1028 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1029 dmaRequestQueue_in.dequeue();
1030 }
1031
1032 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1033 peek(dmaRequestQueue_in, DMARequestMsg) {
1034 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1035 }
1036 stall_and_wait(dmaRequestQueue_in, address);
1037 }
1038
1039 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1040 peek(memQueue_in, MemoryMsg) {
1041 assert(is_valid(tbe));
1042 if (tbe.CacheDirty == false) {
1043 tbe.DataBlk := in_msg.DataBlk;
1044 }
1045 }
1046 }
1047
1048 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1049 peek(responseToDir_in, ResponseMsg) {
1050 assert(is_valid(tbe));
1051 tbe.CacheDirty := true;
1052 tbe.DataBlk := in_msg.DataBlk;
1053 }
1054 }
1055
1056 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1057 peek(responseToDir_in, ResponseMsg) {
1058 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1059 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1060 in_msg.Address, in_msg.DataBlk);
1061 }
1062 }
1063
1064 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1065 peek(unblockNetwork_in, ResponseMsg) {
1066 assert(in_msg.Dirty);
1067 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1068 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1069 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1070 in_msg.Address, in_msg.DataBlk);
1071 }
1072 }
1073
1074 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1075 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1076 assert(is_valid(tbe));
1077 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1078 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1079 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1080 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1081 }
1082
1083 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1084 assert(is_valid(tbe));
1085 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1086 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1087 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1088 }
1089
1090 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1091 assert(is_valid(tbe));
1092 assert(tbe.CacheDirty);
1093 }
1094
1095 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1096 if (probe_filter_enabled || full_bit_dir_enabled) {
1097 peek(requestQueue_in, RequestMsg) {
1098 assert(is_valid(cache_entry));
1099 assert(cache_entry.Owner != in_msg.Requestor);
1100 }
1101 }
1102 }
1103
1104 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1105 if (full_bit_dir_enabled) {
1106 peek(requestQueue_in, RequestMsg) {
1107 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1108 }
1109 }
1110 }
1111
1112 action(rs_removeSharer, "s", desc="remove current sharer") {
1113 if (full_bit_dir_enabled) {
1114 peek(unblockNetwork_in, ResponseMsg) {
1115 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1116 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1117 }
1118 }
1119 }
1120
1121 action(cs_clearSharers, "cs", desc="clear current sharers") {
1122 if (full_bit_dir_enabled) {
1123 peek(requestQueue_in, RequestMsg) {
1124 cache_entry.Sharers.clear();
1125 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1126 }
1127 }
1128 }
1129
1130 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1131 peek(unblockNetwork_in, ResponseMsg) {
1132 enqueue(memQueue_out, MemoryMsg, latency="1") {
1133 out_msg.Address := address;
1134 out_msg.Type := MemoryRequestType:MEMORY_WB;
1135 DPRINTF(RubySlicc, "%s\n", out_msg);
1136 }
1137 }
1138 }
1139
1140 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1141 enqueue(memQueue_out, MemoryMsg, latency="1") {
1142 assert(is_valid(tbe));
1143 out_msg.Address := address;
1144 out_msg.Type := MemoryRequestType:MEMORY_WB;
1145 // first, initialize the data blk to the current version of system memory
1146 out_msg.DataBlk := tbe.DataBlk;
1147 // then add the dma write data
1148 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1149 DPRINTF(RubySlicc, "%s\n", out_msg);
1150 }
1151 }
1152
1153 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1154 peek(unblockNetwork_in, ResponseMsg) {
1155 assert(in_msg.Dirty == false);
1156 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1157
1158 // NOTE: The following check would not be valid in a real
1159 // implementation. We include the data in the "dataless"
1160 // message so we can assert the clean data matches the datablock
1161 // in memory
1162 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1163 }
1164 }
1165
1166 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1167 peek(requestQueue_in, RequestMsg) {
1168 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1169 }
1170 stall_and_wait(requestQueue_in, address);
1171 }
1172
1173 // TRANSITIONS
1174
1175 // Transitions out of E state
1176 transition(E, GETX, NO_B_W) {
1177 pfa_probeFilterAllocate;
1178 v_allocateTBE;
1179 rx_recordExclusiveInTBE;
1180 saa_setAcksToAllIfPF;
1181 qf_queueMemoryFetchRequest;
1182 fn_forwardRequestIfNecessary;
1183 i_popIncomingRequestQueue;
1184 }
1185
1186 transition(E, GETS, NO_B_W) {
1187 pfa_probeFilterAllocate;
1188 v_allocateTBE;
1189 rx_recordExclusiveInTBE;
1190 saa_setAcksToAllIfPF;
1191 qf_queueMemoryFetchRequest;
1192 fn_forwardRequestIfNecessary;
1193 i_popIncomingRequestQueue;
1194 }
1195
1196 transition(E, DMA_READ, NO_DR_B_W) {
1197 vd_allocateDmaRequestInTBE;
1198 qd_queueMemoryRequestFromDmaRead;
1199 spa_setPendingAcksToZeroIfPF;
1200 f_forwardReadFromDma;
1201 p_popDmaRequestQueue;
1202 }
1203
1204 transition(E, DMA_WRITE, NO_DW_B_W) {
1205 vd_allocateDmaRequestInTBE;
1206 spa_setPendingAcksToZeroIfPF;
1207 sc_signalCompletionIfPF;
1208 f_forwardWriteFromDma;
1209 p_popDmaRequestQueue;
1210 }
1211
1212 // Transitions out of O state
1213 transition(O, GETX, NO_B_W) {
1214 r_setMRU;
1215 v_allocateTBE;
1216 r_recordDataInTBE;
1217 sa_setAcksToOne;
1218 qf_queueMemoryFetchRequest;
1219 fb_forwardRequestBcast;
1220 cs_clearSharers;
1221 i_popIncomingRequestQueue;
1222 }
1223
1224 // This transition is dumb, if a shared copy exists on-chip, then that should
1225 // provide data, not slow off-chip dram. The problem is that the current
1226 // caches don't provide data in S state
1227 transition(O, GETS, O_B_W) {
1228 r_setMRU;
1229 v_allocateTBE;
1230 r_recordDataInTBE;
1231 saa_setAcksToAllIfPF;
1232 qf_queueMemoryFetchRequest;
1233 fn_forwardRequestIfNecessary;
1234 i_popIncomingRequestQueue;
1235 }
1236
1237 transition(O, DMA_READ, O_DR_B_W) {
1238 vd_allocateDmaRequestInTBE;
1239 spa_setPendingAcksToZeroIfPF;
1240 qd_queueMemoryRequestFromDmaRead;
1241 f_forwardReadFromDma;
1242 p_popDmaRequestQueue;
1243 }
1244
1245 transition(O, Pf_Replacement, O_R) {
1246 v_allocateTBE;
1247 pa_setPendingMsgsToAll;
1248 ia_invalidateAllRequest;
1249 pfd_probeFilterDeallocate;
1250 }
1251
1252 transition(S, Pf_Replacement, S_R) {
1253 v_allocateTBE;
1254 pa_setPendingMsgsToAll;
1255 ia_invalidateAllRequest;
1256 pfd_probeFilterDeallocate;
1257 }
1258
1259 transition(NO, Pf_Replacement, NO_R) {
1260 v_allocateTBE;
1261 po_setPendingMsgsToOne;
1262 io_invalidateOwnerRequest;
1263 pfd_probeFilterDeallocate;
1264 }
1265
1266 transition(NX, Pf_Replacement, NO_R) {
1267 v_allocateTBE;
1268 pa_setPendingMsgsToAll;
1269 ia_invalidateAllRequest;
1270 pfd_probeFilterDeallocate;
1271 }
1272
1273 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1274 vd_allocateDmaRequestInTBE;
1275 f_forwardWriteFromDma;
1276 p_popDmaRequestQueue;
1277 }
1278
1279 // Transitions out of NO state
1280 transition(NX, GETX, NO_B) {
1281 r_setMRU;
1282 fb_forwardRequestBcast;
1283 cs_clearSharers;
1284 i_popIncomingRequestQueue;
1285 }
1286
1287 // Transitions out of NO state
1288 transition(NO, GETX, NO_B) {
1289 r_setMRU;
1290 ano_assertNotOwner;
1291 fc_forwardRequestConditionalOwner;
1292 cs_clearSharers;
1293 i_popIncomingRequestQueue;
1294 }
1295
1296 transition(S, GETX, NO_B) {
1297 r_setMRU;
1298 fb_forwardRequestBcast;
1299 cs_clearSharers;
1300 i_popIncomingRequestQueue;
1301 }
1302
1303 transition(S, GETS, NO_B) {
1304 r_setMRU;
1305 ano_assertNotOwner;
1306 fb_forwardRequestBcast;
1307 i_popIncomingRequestQueue;
1308 }
1309
1310 transition(NO, GETS, NO_B) {
1311 r_setMRU;
1312 ano_assertNotOwner;
1313 ans_assertNotSharer;
1314 fc_forwardRequestConditionalOwner;
1315 i_popIncomingRequestQueue;
1316 }
1317
1318 transition(NX, GETS, NO_B) {
1319 r_setMRU;
1320 ano_assertNotOwner;
1321 fc_forwardRequestConditionalOwner;
1322 i_popIncomingRequestQueue;
1323 }
1324
1325 transition({NO, NX, S}, PUT, WB) {
1326 //
1327 // note that the PUT requestor may not be the current owner if an invalidate
1328 // raced with PUT
1329 //
1330 a_sendWriteBackAck;
1331 i_popIncomingRequestQueue;
1332 }
1333
1334 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1335 vd_allocateDmaRequestInTBE;
1336 f_forwardReadFromDma;
1337 p_popDmaRequestQueue;
1338 }
1339
1340 // Nack PUT requests when races cause us to believe we own the data
1341 transition({O, E}, PUT) {
1342 b_sendWriteBackNack;
1343 i_popIncomingRequestQueue;
1344 }
1345
1346 // Blocked transient states
1347 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1348 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1349 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
1350 {GETS, GETX, PUT, Pf_Replacement}) {
1351 z_stallAndWaitRequest;
1352 }
1353
1354 transition(NO_B, GETX, NO_B_X) {
1355 z_stallAndWaitRequest;
1356 }
1357
1358 transition(NO_B, {PUT, Pf_Replacement}) {
1359 z_stallAndWaitRequest;
1360 }
1361
1362 transition(NO_B_S, {GETX, PUT, Pf_Replacement}) {
1363 z_stallAndWaitRequest;
1364 }
1365
1366 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1367 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1368 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R},
1369 {DMA_READ, DMA_WRITE}) {
1370 zd_stallAndWaitDMARequest;
1371 }
1372
1373 // merge GETS into one response
1374 transition(NO_B, GETS, NO_B_S) {
1375 v_allocateTBE;
1376 rs_recordGetSRequestor;
1377 i_popIncomingRequestQueue;
1378 }
1379
1380 transition(NO_B_S, GETS) {
1381 rs_recordGetSRequestor;
1382 i_popIncomingRequestQueue;
1383 }
1384
1385 // unblock responses
1386 transition({NO_B, NO_B_X}, UnblockS, NX) {
1387 us_updateSharerIfFBD;
1388 k_wakeUpDependents;
1389 j_popIncomingUnblockQueue;
1390 }
1391
1392 transition({NO_B, NO_B_X}, UnblockM, NO) {
1393 uo_updateOwnerIfPf;
1394 us_updateSharerIfFBD;
1395 k_wakeUpDependents;
1396 j_popIncomingUnblockQueue;
1397 }
1398
1399 transition(NO_B_S, UnblockS, NO_B_S_W) {
1400 us_updateSharerIfFBD;
1401 fr_forwardMergeReadRequestsToOwner;
1402 sp_setPendingMsgsToMergedSharers;
1403 j_popIncomingUnblockQueue;
1404 }
1405
1406 transition(NO_B_S, UnblockM, NO_B_S_W) {
1407 uo_updateOwnerIfPf;
1408 fr_forwardMergeReadRequestsToOwner;
1409 sp_setPendingMsgsToMergedSharers;
1410 j_popIncomingUnblockQueue;
1411 }
1412
1413 transition(NO_B_S_W, UnblockS) {
1414 us_updateSharerIfFBD;
1415 mu_decrementNumberOfUnblocks;
1416 os_checkForMergedGetSCompletion;
1417 j_popIncomingUnblockQueue;
1418 }
1419
1420 transition(NO_B_S_W, All_Unblocks, NX) {
1421 w_deallocateTBE;
1422 k_wakeUpDependents;
1423 g_popTriggerQueue;
1424 }
1425
1426 transition(O_B, UnblockS, O) {
1427 us_updateSharerIfFBD;
1428 k_wakeUpDependents;
1429 j_popIncomingUnblockQueue;
1430 }
1431
1432 transition(O_B, UnblockM, NO) {
1433 us_updateSharerIfFBD;
1434 uo_updateOwnerIfPf;
1435 k_wakeUpDependents;
1436 j_popIncomingUnblockQueue;
1437 }
1438
1439 transition(NO_B_W, Memory_Data, NO_B) {
1440 d_sendData;
1441 w_deallocateTBE;
1442 l_popMemQueue;
1443 }
1444
1445 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1446 r_recordMemoryData;
1447 o_checkForCompletion;
1448 l_popMemQueue;
1449 }
1450
1451 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1452 r_recordMemoryData;
1453 dr_sendDmaData;
1454 o_checkForCompletion;
1455 l_popMemQueue;
1456 }
1457
1458 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1459 m_decrementNumberOfMessages;
1460 o_checkForCompletion;
1461 n_popResponseQueue;
1462 }
1463
1464 transition({O_R, S_R, NO_R}, Ack) {
1465 m_decrementNumberOfMessages;
1466 o_checkForCompletion;
1467 n_popResponseQueue;
1468 }
1469
1470 transition(S_R, Data) {
1471 wr_writeResponseDataToMemory;
1472 m_decrementNumberOfMessages;
1473 o_checkForCompletion;
1474 n_popResponseQueue;
1475 }
1476
1477 transition(NO_R, {Data, Exclusive_Data}) {
1478 wr_writeResponseDataToMemory;
1479 m_decrementNumberOfMessages;
1480 o_checkForCompletion;
1481 n_popResponseQueue;
1482 }
1483
1484 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1485 w_deallocateTBE;
1486 k_wakeUpDependents;
1487 g_popTriggerQueue;
1488 }
1489
1490 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1491 m_decrementNumberOfMessages;
1492 n_popResponseQueue;
1493 }
1494
1495 transition(NO_DR_B_W, Shared_Ack) {
1496 m_decrementNumberOfMessages;
1497 r_setSharerBit;
1498 n_popResponseQueue;
1499 }
1500
1501 transition(O_DR_B, Shared_Ack) {
1502 m_decrementNumberOfMessages;
1503 so_setOwnerBit;
1504 o_checkForCompletion;
1505 n_popResponseQueue;
1506 }
1507
1508 transition(O_DR_B_W, Shared_Ack) {
1509 m_decrementNumberOfMessages;
1510 so_setOwnerBit;
1511 n_popResponseQueue;
1512 }
1513
1514 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1515 m_decrementNumberOfMessages;
1516 r_setSharerBit;
1517 o_checkForCompletion;
1518 n_popResponseQueue;
1519 }
1520
1521 transition(NO_DR_B_W, Shared_Data) {
1522 r_recordCacheData;
1523 m_decrementNumberOfMessages;
1524 so_setOwnerBit;
1525 o_checkForCompletion;
1526 n_popResponseQueue;
1527 }
1528
1529 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1530 r_recordCacheData;
1531 m_decrementNumberOfMessages;
1532 so_setOwnerBit;
1533 o_checkForCompletion;
1534 n_popResponseQueue;
1535 }
1536
1537 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1538 r_recordCacheData;
1539 m_decrementNumberOfMessages;
1540 n_popResponseQueue;
1541 }
1542
1543 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1544 r_recordCacheData;
1545 m_decrementNumberOfMessages;
1546 o_checkForCompletion;
1547 n_popResponseQueue;
1548 }
1549
1550 transition(NO_DR_B, All_acks_and_owner_data, O) {
1551 //
1552 // Note that the DMA consistency model allows us to send the DMA device
1553 // a response as soon as we receive valid data and prior to receiving
1554 // all acks. However, to simplify the protocol we wait for all acks.
1555 //
1556 dt_sendDmaDataFromTbe;
1557 wdt_writeDataFromTBE;
1558 w_deallocateTBE;
1559 k_wakeUpDependents;
1560 g_popTriggerQueue;
1561 }
1562
1563 transition(NO_DR_B, All_acks_and_shared_data, S) {
1564 //
1565 // Note that the DMA consistency model allows us to send the DMA device
1566 // a response as soon as we receive valid data and prior to receiving
1567 // all acks. However, to simplify the protocol we wait for all acks.
1568 //
1569 dt_sendDmaDataFromTbe;
1570 wdt_writeDataFromTBE;
1571 w_deallocateTBE;
1572 k_wakeUpDependents;
1573 g_popTriggerQueue;
1574 }
1575
1576 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1577 //
1578 // Note that the DMA consistency model allows us to send the DMA device
1579 // a response as soon as we receive valid data and prior to receiving
1580 // all acks. However, to simplify the protocol we wait for all acks.
1581 //
1582 dt_sendDmaDataFromTbe;
1583 wdt_writeDataFromTBE;
1584 w_deallocateTBE;
1585 k_wakeUpDependents;
1586 g_popTriggerQueue;
1587 }
1588
1589 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1590 //
1591 // Note that the DMA consistency model allows us to send the DMA device
1592 // a response as soon as we receive valid data and prior to receiving
1593 // all acks. However, to simplify the protocol we wait for all acks.
1594 //
1595 dt_sendDmaDataFromTbe;
1596 wdt_writeDataFromTBE;
1597 w_deallocateTBE;
1598 k_wakeUpDependents;
1599 g_popTriggerQueue;
1600 }
1601
1602 transition(O_DR_B, All_acks_and_owner_data, O) {
1603 wdt_writeDataFromTBE;
1604 w_deallocateTBE;
1605 k_wakeUpDependents;
1606 g_popTriggerQueue;
1607 }
1608
1609 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1610 wdt_writeDataFromTBE;
1611 w_deallocateTBE;
1612 pfd_probeFilterDeallocate;
1613 k_wakeUpDependents;
1614 g_popTriggerQueue;
1615 }
1616
1617 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1618 //
1619 // Note that the DMA consistency model allows us to send the DMA device
1620 // a response as soon as we receive valid data and prior to receiving
1621 // all acks. However, to simplify the protocol we wait for all acks.
1622 //
1623 dt_sendDmaDataFromTbe;
1624 wdt_writeDataFromTBE;
1625 w_deallocateTBE;
1626 ppfd_possibleProbeFilterDeallocate;
1627 k_wakeUpDependents;
1628 g_popTriggerQueue;
1629 }
1630
1631 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1632 a_assertCacheData;
1633 //
1634 // Note that the DMA consistency model allows us to send the DMA device
1635 // a response as soon as we receive valid data and prior to receiving
1636 // all acks. However, to simplify the protocol we wait for all acks.
1637 //
1638 dt_sendDmaDataFromTbe;
1639 wdt_writeDataFromTBE;
1640 w_deallocateTBE;
1641 ppfd_possibleProbeFilterDeallocate;
1642 k_wakeUpDependents;
1643 g_popTriggerQueue;
1644 }
1645
1646 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1647 dwt_writeDmaDataFromTBE;
1648 ld_queueMemoryDmaWrite;
1649 g_popTriggerQueue;
1650 }
1651
1652 transition(NO_DW_W, Memory_Ack, E) {
1653 da_sendDmaAck;
1654 w_deallocateTBE;
1655 ppfd_possibleProbeFilterDeallocate;
1656 k_wakeUpDependents;
1657 l_popMemQueue;
1658 }
1659
1660 transition(O_B_W, Memory_Data, O_B) {
1661 d_sendData;
1662 w_deallocateTBE;
1663 l_popMemQueue;
1664 }
1665
1666 transition(NO_B_W, UnblockM, NO_W) {
1667 uo_updateOwnerIfPf;
1668 j_popIncomingUnblockQueue;
1669 }
1670
1671 transition(NO_B_W, UnblockS, NO_W) {
1672 us_updateSharerIfFBD;
1673 j_popIncomingUnblockQueue;
1674 }
1675
1676 transition(O_B_W, UnblockS, O_W) {
1677 us_updateSharerIfFBD;
1678 j_popIncomingUnblockQueue;
1679 }
1680
1681 transition(NO_W, Memory_Data, NO) {
1682 w_deallocateTBE;
1683 k_wakeUpDependents;
1684 l_popMemQueue;
1685 }
1686
1687 transition(O_W, Memory_Data, O) {
1688 w_deallocateTBE;
1689 k_wakeUpDependents;
1690 l_popMemQueue;
1691 }
1692
1693 // WB State Transistions
1694 transition(WB, Writeback_Dirty, WB_O_W) {
1695 l_writeDataToMemory;
1696 rs_removeSharer;
1697 l_queueMemoryWBRequest;
1698 j_popIncomingUnblockQueue;
1699 }
1700
1701 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1702 l_writeDataToMemory;
1703 rs_removeSharer;
1704 l_queueMemoryWBRequest;
1705 j_popIncomingUnblockQueue;
1706 }
1707
1708 transition(WB_E_W, Memory_Ack, E) {
1709 pfd_probeFilterDeallocate;
1710 k_wakeUpDependents;
1711 l_popMemQueue;
1712 }
1713
1714 transition(WB_O_W, Memory_Ack, O) {
1715 k_wakeUpDependents;
1716 l_popMemQueue;
1717 }
1718
1719 transition(WB, Writeback_Clean, O) {
1720 ll_checkIncomingWriteback;
1721 rs_removeSharer;
1722 k_wakeUpDependents;
1723 j_popIncomingUnblockQueue;
1724 }
1725
1726 transition(WB, Writeback_Exclusive_Clean, E) {
1727 ll_checkIncomingWriteback;
1728 rs_removeSharer;
1729 pfd_probeFilterDeallocate;
1730 k_wakeUpDependents;
1731 j_popIncomingUnblockQueue;
1732 }
1733
1734 transition(WB, Unblock, NX) {
1735 auno_assertUnblockerNotOwner;
1736 k_wakeUpDependents;
1737 j_popIncomingUnblockQueue;
1738 }
1739 }