mem: Fix guest corruption when caches handle uncacheable accesses
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory,
38 CacheMemory * probeFilter,
39 MemoryControl * memBuffer,
40 int memory_controller_latency = 2,
41 bool probe_filter_enabled = false,
42 bool full_bit_dir_enabled = false
43 {
44
45 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
47 //
48 // For a finite buffered network, note that the DMA response network only
49 // works at this relatively lower numbered (lower priority) virtual network
50 // because the trigger queue decouples cache responses from DMA responses.
51 //
52 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
53
54 MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
55 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
56 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
57 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
58
59 // STATES
60 state_declaration(State, desc="Directory states", default="Directory_State_E") {
61 // Base states
62 NX, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in O at Owner";
63 NO, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
64 S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
65 O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
66 E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
67
68 O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
69 S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
70 NO_R, AccessPermission:Busy, desc="Was Not Owner or Sharer, replacing probe filter entry";
71
72 NO_B, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked";
73 NO_B_X, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
74 NO_B_S, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
75 NO_B_S_W, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
76 O_B, AccessPermission:Busy, "O^B", desc="Owner, Blocked";
77 NO_B_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
78 O_B_W, AccessPermission:Busy, desc="Owner, Blocked, waiting for Dram";
79 NO_W, AccessPermission:Busy, desc="Not Owner, waiting for Dram";
80 O_W, AccessPermission:Busy, desc="Owner, waiting for Dram";
81 NO_DW_B_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram and cache responses";
82 NO_DR_B_W, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for Dram and cache responses";
83 NO_DR_B_D, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
84 NO_DR_B, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses";
85 NO_DW_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram";
86 O_DR_B_W, AccessPermission:Busy, desc="Owner, Dma Read waiting for Dram and cache responses";
87 O_DR_B, AccessPermission:Busy, desc="Owner, Dma Read waiting for cache responses";
88 WB, AccessPermission:Busy, desc="Blocked on a writeback";
89 WB_O_W, AccessPermission:Busy, desc="Blocked on memory write, will go to O";
90 WB_E_W, AccessPermission:Busy, desc="Blocked on memory write, will go to E";
91
92 NO_F, AccessPermission:Busy, desc="Blocked on a flush";
93 NO_F_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
94 }
95
96 // Events
97 enumeration(Event, desc="Directory events") {
98 GETX, desc="A GETX arrives";
99 GETS, desc="A GETS arrives";
100 PUT, desc="A PUT arrives";
101 Unblock, desc="An unblock message arrives";
102 UnblockS, desc="An unblock message arrives";
103 UnblockM, desc="An unblock message arrives";
104 Writeback_Clean, desc="The final part of a PutX (no data)";
105 Writeback_Dirty, desc="The final part of a PutX (data)";
106 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
107 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
108
109 // Probe filter
110 Pf_Replacement, desc="probe filter replacement";
111
112 // DMA requests
113 DMA_READ, desc="A DMA Read memory request";
114 DMA_WRITE, desc="A DMA Write memory request";
115
116 // Memory Controller
117 Memory_Data, desc="Fetched data from memory arrives";
118 Memory_Ack, desc="Writeback Ack from memory arrives";
119
120 // Cache responses required to handle DMA
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Shared_Data, desc="Received a data message, responder has a shared copy";
124 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 // Triggers
128 All_acks_and_shared_data, desc="Received shared data and message acks";
129 All_acks_and_owner_data, desc="Received shared data and message acks";
130 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
131 All_Unblocks, desc="Received all unblocks for a merged gets request";
132 GETF, desc="A GETF arrives";
133 PUTF, desc="A PUTF arrives";
134 }
135
136 // TYPES
137
138 // DirectoryEntry
139 structure(Entry, desc="...", interface="AbstractEntry") {
140 State DirectoryState, desc="Directory state";
141 DataBlock DataBlk, desc="data for the block";
142 }
143
144 // ProbeFilterEntry
145 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
146 State PfState, desc="Directory state";
147 MachineID Owner, desc="Owner node";
148 DataBlock DataBlk, desc="data for the block";
149 Set Sharers, desc="sharing vector for full bit directory";
150 }
151
152 // TBE entries for DMA requests
153 structure(TBE, desc="TBE entries for outstanding DMA requests") {
154 Address PhysicalAddress, desc="physical address";
155 State TBEState, desc="Transient State";
156 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
157 int Acks, default="0", desc="The number of acks that the waiting response represents";
158 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
159 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
160 DataBlock DataBlk, desc="The current view of system memory";
161 int Len, desc="...";
162 MachineID DmaRequestor, desc="DMA requestor";
163 NetDest GetSRequestors, desc="GETS merged requestors";
164 int NumPendingMsgs, desc="Number of pending acks/messages";
165 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
166 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
167 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
168 }
169
170 structure(TBETable, external="yes") {
171 TBE lookup(Address);
172 void allocate(Address);
173 void deallocate(Address);
174 bool isPresent(Address);
175 }
176
177 void set_cache_entry(AbstractCacheEntry b);
178 void unset_cache_entry();
179 void set_tbe(TBE a);
180 void unset_tbe();
181 void wakeUpBuffers(Address a);
182
183 // ** OBJECTS **
184
185 Set fwd_set;
186
187 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
188
189 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
190 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
191
192 if (is_valid(dir_entry)) {
193 return dir_entry;
194 }
195
196 dir_entry := static_cast(Entry, "pointer",
197 directory.allocate(addr, new Entry));
198 return dir_entry;
199 }
200
201 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
202 Entry dir_entry := getDirectoryEntry(addr);
203 if(is_valid(dir_entry)) {
204 return dir_entry.DataBlk;
205 }
206
207 TBE tbe := TBEs[addr];
208 if(is_valid(tbe)) {
209 return tbe.DataBlk;
210 }
211
212 error("Data block missing!");
213 }
214
215 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
216 if (probe_filter_enabled || full_bit_dir_enabled) {
217 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
218 return pfEntry;
219 }
220 return OOD;
221 }
222
223 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
224 if (is_valid(tbe)) {
225 return tbe.TBEState;
226 } else {
227 if (probe_filter_enabled || full_bit_dir_enabled) {
228 if (is_valid(pf_entry)) {
229 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
230 }
231 }
232 return getDirectoryEntry(addr).DirectoryState;
233 }
234 }
235
236 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
237 if (is_valid(tbe)) {
238 tbe.TBEState := state;
239 }
240 if (probe_filter_enabled || full_bit_dir_enabled) {
241 if (is_valid(pf_entry)) {
242 pf_entry.PfState := state;
243 }
244 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
245 assert(is_valid(pf_entry));
246 }
247 if (state == State:E) {
248 assert(is_valid(pf_entry) == false);
249 }
250 }
251 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
252 state == State:O) {
253 assert(is_valid(tbe) == false);
254 }
255 getDirectoryEntry(addr).DirectoryState := state;
256 }
257
258 AccessPermission getAccessPermission(Address addr) {
259 TBE tbe := TBEs[addr];
260 if(is_valid(tbe)) {
261 return Directory_State_to_permission(tbe.TBEState);
262 }
263
264 if(directory.isPresent(addr)) {
265 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
266 }
267
268 return AccessPermission:NotPresent;
269 }
270
271 void setAccessPermission(PfEntry pf_entry, Address addr, State state) {
272 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
273 }
274
275 Event cache_request_to_event(CoherenceRequestType type) {
276 if (type == CoherenceRequestType:GETS) {
277 return Event:GETS;
278 } else if (type == CoherenceRequestType:GETX) {
279 return Event:GETX;
280 } else if (type == CoherenceRequestType:GETF) {
281 return Event:GETF;
282 } else {
283 error("Invalid CoherenceRequestType");
284 }
285 }
286
287 MessageBuffer triggerQueue, ordered="true";
288
289 // ** OUT_PORTS **
290 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
291 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
292 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
293 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
294 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
295
296 //
297 // Memory buffer for memory controller to DIMM communication
298 //
299 out_port(memQueue_out, MemoryMsg, memBuffer);
300
301 // ** IN_PORTS **
302
303 // Trigger Queue
304 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
305 if (triggerQueue_in.isReady()) {
306 peek(triggerQueue_in, TriggerMsg) {
307 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
308 TBE tbe := TBEs[in_msg.Address];
309 if (in_msg.Type == TriggerType:ALL_ACKS) {
310 trigger(Event:All_acks_and_owner_data, in_msg.Address,
311 pf_entry, tbe);
312 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
313 trigger(Event:All_acks_and_shared_data, in_msg.Address,
314 pf_entry, tbe);
315 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
316 trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
317 pf_entry, tbe);
318 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
319 trigger(Event:All_Unblocks, in_msg.Address,
320 pf_entry, tbe);
321 } else {
322 error("Unexpected message");
323 }
324 }
325 }
326 }
327
328 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
329 if (unblockNetwork_in.isReady()) {
330 peek(unblockNetwork_in, ResponseMsg) {
331 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
332 TBE tbe := TBEs[in_msg.Address];
333 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
334 trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
335 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
336 trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
337 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
338 trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
339 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
340 trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
341 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
342 trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
343 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
344 trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
345 pf_entry, tbe);
346 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
347 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
348 pf_entry, tbe);
349 } else {
350 error("Invalid message");
351 }
352 }
353 }
354 }
355
356 // Response Network
357 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
358 if (responseToDir_in.isReady()) {
359 peek(responseToDir_in, ResponseMsg) {
360 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
361 TBE tbe := TBEs[in_msg.Address];
362 if (in_msg.Type == CoherenceResponseType:ACK) {
363 trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
364 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
365 trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
366 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
367 trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
368 } else if (in_msg.Type == CoherenceResponseType:DATA) {
369 trigger(Event:Data, in_msg.Address, pf_entry, tbe);
370 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
371 trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
372 } else {
373 error("Unexpected message");
374 }
375 }
376 }
377 }
378
379 // off-chip memory request/response is done
380 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
381 if (memQueue_in.isReady()) {
382 peek(memQueue_in, MemoryMsg) {
383 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
384 TBE tbe := TBEs[in_msg.Address];
385 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
386 trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
387 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
388 trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
389 } else {
390 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
391 error("Invalid message");
392 }
393 }
394 }
395 }
396
397 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
398 if (requestQueue_in.isReady()) {
399 peek(requestQueue_in, RequestMsg) {
400 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
401 TBE tbe := TBEs[in_msg.Address];
402 if (in_msg.Type == CoherenceRequestType:PUT) {
403 trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
404 } else if (in_msg.Type == CoherenceRequestType:PUTF) {
405 trigger(Event:PUTF, in_msg.Address, pf_entry, tbe);
406 } else {
407 if (probe_filter_enabled || full_bit_dir_enabled) {
408 if (is_valid(pf_entry)) {
409 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
410 pf_entry, tbe);
411 } else {
412 if (probeFilter.cacheAvail(in_msg.Address)) {
413 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
414 pf_entry, tbe);
415 } else {
416 trigger(Event:Pf_Replacement,
417 probeFilter.cacheProbe(in_msg.Address),
418 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
419 TBEs[probeFilter.cacheProbe(in_msg.Address)]);
420 }
421 }
422 } else {
423 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
424 pf_entry, tbe);
425 }
426 }
427 }
428 }
429 }
430
431 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
432 if (dmaRequestQueue_in.isReady()) {
433 peek(dmaRequestQueue_in, DMARequestMsg) {
434 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
435 TBE tbe := TBEs[in_msg.LineAddress];
436 if (in_msg.Type == DMARequestType:READ) {
437 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
438 } else if (in_msg.Type == DMARequestType:WRITE) {
439 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
440 } else {
441 error("Invalid message");
442 }
443 }
444 }
445 }
446
447 // Actions
448
449 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
450 if (probe_filter_enabled || full_bit_dir_enabled) {
451 assert(is_valid(cache_entry));
452 probeFilter.setMRU(address);
453 }
454 }
455
456 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
457 if (probe_filter_enabled || full_bit_dir_enabled) {
458 assert(is_valid(cache_entry));
459 peek(unblockNetwork_in, ResponseMsg) {
460 assert(cache_entry.Owner != in_msg.Sender);
461 if (full_bit_dir_enabled) {
462 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
463 }
464 }
465 }
466 }
467
468 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
469 if (probe_filter_enabled || full_bit_dir_enabled) {
470 assert(is_valid(cache_entry));
471 peek(unblockNetwork_in, ResponseMsg) {
472 cache_entry.Owner := in_msg.Sender;
473 if (full_bit_dir_enabled) {
474 cache_entry.Sharers.clear();
475 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
476 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
477 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
478 }
479 }
480 }
481 }
482
483 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
484 if (full_bit_dir_enabled) {
485 assert(probeFilter.isTagPresent(address));
486 peek(unblockNetwork_in, ResponseMsg) {
487 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
488 }
489 }
490 }
491
492 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
493 peek(requestQueue_in, RequestMsg) {
494 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
495 out_msg.Address := address;
496 out_msg.Type := CoherenceRequestType:WB_ACK;
497 out_msg.Requestor := in_msg.Requestor;
498 out_msg.Destination.add(in_msg.Requestor);
499 out_msg.MessageSize := MessageSizeType:Writeback_Control;
500 }
501 }
502 }
503
504 action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
505 peek(requestQueue_in, RequestMsg) {
506 if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
507 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
508 out_msg.Address := address;
509 out_msg.Type := CoherenceRequestType:BLOCK_ACK;
510 out_msg.Requestor := in_msg.Requestor;
511 out_msg.Destination.add(in_msg.Requestor);
512 out_msg.MessageSize := MessageSizeType:Writeback_Control;
513 }
514 }
515 }
516 }
517
518 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
519 peek(requestQueue_in, RequestMsg) {
520 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
521 out_msg.Address := address;
522 out_msg.Type := CoherenceRequestType:WB_NACK;
523 out_msg.Requestor := in_msg.Requestor;
524 out_msg.Destination.add(in_msg.Requestor);
525 out_msg.MessageSize := MessageSizeType:Writeback_Control;
526 }
527 }
528 }
529
530 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
531 if (probe_filter_enabled || full_bit_dir_enabled) {
532 peek(requestQueue_in, RequestMsg) {
533 set_cache_entry(probeFilter.allocate(address, new PfEntry));
534 cache_entry.Owner := in_msg.Requestor;
535 cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
536 }
537 }
538 }
539
540 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
541 if (probe_filter_enabled || full_bit_dir_enabled) {
542 probeFilter.deallocate(address);
543 unset_cache_entry();
544 }
545 }
546
547 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
548 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
549 probeFilter.deallocate(address);
550 unset_cache_entry();
551 }
552 }
553
554 action(v_allocateTBE, "v", desc="Allocate TBE") {
555 check_allocate(TBEs);
556 peek(requestQueue_in, RequestMsg) {
557 TBEs.allocate(address);
558 set_tbe(TBEs[address]);
559 tbe.PhysicalAddress := address;
560 tbe.ResponseType := CoherenceResponseType:NULL;
561 }
562 }
563
564 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
565 check_allocate(TBEs);
566 peek(dmaRequestQueue_in, DMARequestMsg) {
567 TBEs.allocate(address);
568 set_tbe(TBEs[address]);
569 tbe.DmaDataBlk := in_msg.DataBlk;
570 tbe.PhysicalAddress := in_msg.PhysicalAddress;
571 tbe.Len := in_msg.Len;
572 tbe.DmaRequestor := in_msg.Requestor;
573 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
574 //
575 // One ack for each last-level cache
576 //
577 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
578 //
579 // Assume initially that the caches store a clean copy and that memory
580 // will provide the data
581 //
582 tbe.CacheDirty := false;
583 }
584 }
585
586 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
587 assert(is_valid(tbe));
588 if (full_bit_dir_enabled) {
589 assert(is_valid(cache_entry));
590 tbe.NumPendingMsgs := cache_entry.Sharers.count();
591 } else {
592 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
593 }
594 }
595
596 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
597 assert(is_valid(tbe));
598 tbe.NumPendingMsgs := 1;
599 }
600
601 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
602 TBEs.deallocate(address);
603 unset_tbe();
604 }
605
606 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
607 assert(is_valid(tbe));
608 peek(requestQueue_in, RequestMsg) {
609 if (full_bit_dir_enabled) {
610 assert(is_valid(cache_entry));
611 //
612 // If we are using the full-bit directory and no sharers exists beyond
613 // the requestor, then we must set the ack number to all, not one
614 //
615 fwd_set := cache_entry.Sharers;
616 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
617 if (fwd_set.count() > 0) {
618 tbe.Acks := 1;
619 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
620 tbe.SilentAcks := tbe.SilentAcks - 1;
621 } else {
622 tbe.Acks := machineCount(MachineType:L1Cache);
623 tbe.SilentAcks := 0;
624 }
625 } else {
626 tbe.Acks := 1;
627 }
628 }
629 }
630
631 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
632 assert(is_valid(tbe));
633 if (probe_filter_enabled || full_bit_dir_enabled) {
634 tbe.Acks := machineCount(MachineType:L1Cache);
635 tbe.SilentAcks := 0;
636 } else {
637 tbe.Acks := 1;
638 }
639 }
640
641 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
642 peek(responseToDir_in, ResponseMsg) {
643 assert(is_valid(tbe));
644 assert(in_msg.Acks > 0);
645 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
646 //
647 // Note that cache data responses will have an ack count of 2. However,
648 // directory DMA requests must wait for acks from all LLC caches, so
649 // only decrement by 1.
650 //
651 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
652 (in_msg.Type == CoherenceResponseType:DATA) ||
653 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
654 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
655 } else {
656 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
657 }
658 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
659 }
660 }
661
662 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
663 peek(unblockNetwork_in, ResponseMsg) {
664 assert(is_valid(tbe));
665 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
666 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
667 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
668 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
669 }
670 }
671
672 action(n_popResponseQueue, "n", desc="Pop response queue") {
673 responseToDir_in.dequeue();
674 }
675
676 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
677 assert(is_valid(tbe));
678 if (tbe.NumPendingMsgs == 0) {
679 enqueue(triggerQueue_out, TriggerMsg) {
680 out_msg.Address := address;
681 if (tbe.Sharers) {
682 if (tbe.Owned) {
683 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
684 } else {
685 out_msg.Type := TriggerType:ALL_ACKS;
686 }
687 } else {
688 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
689 }
690 }
691 }
692 }
693
694 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
695 assert(is_valid(tbe));
696 if (tbe.NumPendingMsgs == 0) {
697 enqueue(triggerQueue_out, TriggerMsg) {
698 out_msg.Address := address;
699 out_msg.Type := TriggerType:ALL_UNBLOCKS;
700 }
701 }
702 }
703
704 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
705 assert(is_valid(tbe));
706 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
707 }
708
709 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
710 if (probe_filter_enabled || full_bit_dir_enabled) {
711 assert(is_valid(tbe));
712 tbe.NumPendingMsgs := 0;
713 }
714 }
715
716 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
717 assert(is_valid(tbe));
718 if (tbe.NumPendingMsgs == 0) {
719 assert(probe_filter_enabled || full_bit_dir_enabled);
720 enqueue(triggerQueue_out, TriggerMsg) {
721 out_msg.Address := address;
722 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
723 }
724 }
725 }
726
727 action(d_sendData, "d", desc="Send data to requestor") {
728 peek(memQueue_in, MemoryMsg) {
729 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
730 assert(is_valid(tbe));
731 out_msg.Address := address;
732 out_msg.Type := tbe.ResponseType;
733 out_msg.Sender := machineID;
734 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
735 out_msg.DataBlk := in_msg.DataBlk;
736 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
737 out_msg.Dirty := false; // By definition, the block is now clean
738 out_msg.Acks := tbe.Acks;
739 out_msg.SilentAcks := tbe.SilentAcks;
740 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
741 assert(out_msg.Acks > 0);
742 out_msg.MessageSize := MessageSizeType:Response_Data;
743 }
744 }
745 }
746
747 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
748 peek(memQueue_in, MemoryMsg) {
749 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
750 assert(is_valid(tbe));
751 out_msg.PhysicalAddress := address;
752 out_msg.LineAddress := address;
753 out_msg.Type := DMAResponseType:DATA;
754 //
755 // we send the entire data block and rely on the dma controller to
756 // split it up if need be
757 //
758 out_msg.DataBlk := in_msg.DataBlk;
759 out_msg.Destination.add(tbe.DmaRequestor);
760 out_msg.MessageSize := MessageSizeType:Response_Data;
761 }
762 }
763 }
764
765 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
766 peek(triggerQueue_in, TriggerMsg) {
767 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
768 assert(is_valid(tbe));
769 out_msg.PhysicalAddress := address;
770 out_msg.LineAddress := address;
771 out_msg.Type := DMAResponseType:DATA;
772 //
773 // we send the entire data block and rely on the dma controller to
774 // split it up if need be
775 //
776 out_msg.DataBlk := tbe.DataBlk;
777 out_msg.Destination.add(tbe.DmaRequestor);
778 out_msg.MessageSize := MessageSizeType:Response_Data;
779 }
780 }
781 }
782
783 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
784 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
785 assert(is_valid(tbe));
786 out_msg.PhysicalAddress := address;
787 out_msg.LineAddress := address;
788 out_msg.Type := DMAResponseType:ACK;
789 out_msg.Destination.add(tbe.DmaRequestor);
790 out_msg.MessageSize := MessageSizeType:Writeback_Control;
791 }
792 }
793
794 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
795 peek(requestQueue_in, RequestMsg) {
796 assert(is_valid(tbe));
797 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
798 }
799 }
800
801 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
802 peek(requestQueue_in, RequestMsg) {
803 assert(is_valid(tbe));
804 if (full_bit_dir_enabled) {
805 fwd_set := cache_entry.Sharers;
806 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
807 if (fwd_set.count() > 0) {
808 tbe.ResponseType := CoherenceResponseType:DATA;
809 } else {
810 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
811 }
812 } else {
813 tbe.ResponseType := CoherenceResponseType:DATA;
814 }
815 }
816 }
817
818 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
819 peek(requestQueue_in, RequestMsg) {
820 assert(is_valid(tbe));
821 tbe.GetSRequestors.add(in_msg.Requestor);
822 }
823 }
824
825 action(r_setSharerBit, "r", desc="We saw other sharers") {
826 assert(is_valid(tbe));
827 tbe.Sharers := true;
828 }
829
830 action(so_setOwnerBit, "so", desc="We saw other sharers") {
831 assert(is_valid(tbe));
832 tbe.Sharers := true;
833 tbe.Owned := true;
834 }
835
836 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
837 peek(requestQueue_in, RequestMsg) {
838 enqueue(memQueue_out, MemoryMsg, latency="1") {
839 out_msg.Address := address;
840 out_msg.Type := MemoryRequestType:MEMORY_READ;
841 out_msg.Sender := machineID;
842 out_msg.OriginalRequestorMachId := in_msg.Requestor;
843 out_msg.MessageSize := in_msg.MessageSize;
844 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
845 DPRINTF(RubySlicc, "%s\n", out_msg);
846 }
847 }
848 }
849
850 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
851 peek(dmaRequestQueue_in, DMARequestMsg) {
852 enqueue(memQueue_out, MemoryMsg, latency="1") {
853 out_msg.Address := address;
854 out_msg.Type := MemoryRequestType:MEMORY_READ;
855 out_msg.Sender := machineID;
856 out_msg.OriginalRequestorMachId := in_msg.Requestor;
857 out_msg.MessageSize := in_msg.MessageSize;
858 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
859 DPRINTF(RubySlicc, "%s\n", out_msg);
860 }
861 }
862 }
863
864 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
865 assert(is_valid(tbe));
866 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
867 if (full_bit_dir_enabled) {
868 assert(is_valid(cache_entry));
869 peek(requestQueue_in, RequestMsg) {
870 fwd_set := cache_entry.Sharers;
871 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
872 if (fwd_set.count() > 0) {
873 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
874 out_msg.Address := address;
875 out_msg.Type := in_msg.Type;
876 out_msg.Requestor := in_msg.Requestor;
877 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
878 out_msg.MessageSize := MessageSizeType:Multicast_Control;
879 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
880 out_msg.ForwardRequestTime := get_time();
881 assert(tbe.SilentAcks > 0);
882 out_msg.SilentAcks := tbe.SilentAcks;
883 }
884 }
885 }
886 } else {
887 peek(requestQueue_in, RequestMsg) {
888 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
889 out_msg.Address := address;
890 out_msg.Type := in_msg.Type;
891 out_msg.Requestor := in_msg.Requestor;
892 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
893 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
894 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
895 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
896 out_msg.ForwardRequestTime := get_time();
897 }
898 }
899 }
900 }
901 }
902
903 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
904 if (machineCount(MachineType:L1Cache) > 1) {
905 if (full_bit_dir_enabled) {
906 assert(cache_entry.Sharers.count() > 0);
907 peek(requestQueue_in, RequestMsg) {
908 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
909 out_msg.Address := address;
910 out_msg.Type := CoherenceRequestType:INV;
911 out_msg.Requestor := machineID;
912 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
913 out_msg.MessageSize := MessageSizeType:Multicast_Control;
914 }
915 }
916 } else {
917 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
918 out_msg.Address := address;
919 out_msg.Type := CoherenceRequestType:INV;
920 out_msg.Requestor := machineID;
921 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
922 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
923 }
924 }
925 }
926 }
927
928 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
929 if (machineCount(MachineType:L1Cache) > 1) {
930 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
931 assert(is_valid(cache_entry));
932 out_msg.Address := address;
933 out_msg.Type := CoherenceRequestType:INV;
934 out_msg.Requestor := machineID;
935 out_msg.Destination.add(cache_entry.Owner);
936 out_msg.MessageSize := MessageSizeType:Request_Control;
937 out_msg.DirectedProbe := true;
938 }
939 }
940 }
941
942 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
943 if (machineCount(MachineType:L1Cache) > 1) {
944 peek(requestQueue_in, RequestMsg) {
945 if (full_bit_dir_enabled) {
946 fwd_set := cache_entry.Sharers;
947 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
948 if (fwd_set.count() > 0) {
949 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
950 out_msg.Address := address;
951 out_msg.Type := in_msg.Type;
952 out_msg.Requestor := in_msg.Requestor;
953 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
954 out_msg.MessageSize := MessageSizeType:Multicast_Control;
955 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
956 out_msg.ForwardRequestTime := get_time();
957 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
958 out_msg.SilentAcks := out_msg.SilentAcks - 1;
959 }
960 }
961 } else {
962 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
963 out_msg.Address := address;
964 out_msg.Type := in_msg.Type;
965 out_msg.Requestor := in_msg.Requestor;
966 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
967 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
968 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
969 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
970 out_msg.ForwardRequestTime := get_time();
971 }
972 }
973 }
974 } else {
975 peek(requestQueue_in, RequestMsg) {
976 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
977 out_msg.Address := address;
978 out_msg.Type := CoherenceResponseType:ACK;
979 out_msg.Sender := machineID;
980 out_msg.Destination.add(in_msg.Requestor);
981 out_msg.Dirty := false; // By definition, the block is now clean
982 out_msg.Acks := 0;
983 out_msg.SilentAcks := 0;
984 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
985 out_msg.MessageSize := MessageSizeType:Response_Control;
986 }
987 }
988 }
989 }
990
991 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
992 assert(machineCount(MachineType:L1Cache) > 1);
993 //
994 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
995 // decouple the two.
996 //
997 peek(unblockNetwork_in, ResponseMsg) {
998 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
999 assert(is_valid(tbe));
1000 out_msg.Address := address;
1001 out_msg.Type := CoherenceRequestType:MERGED_GETS;
1002 out_msg.MergedRequestors := tbe.GetSRequestors;
1003 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
1004 out_msg.Destination.add(in_msg.CurOwner);
1005 } else {
1006 out_msg.Destination.add(in_msg.Sender);
1007 }
1008 out_msg.MessageSize := MessageSizeType:Request_Control;
1009 out_msg.InitialRequestTime := zero_time();
1010 out_msg.ForwardRequestTime := get_time();
1011 }
1012 }
1013 }
1014
1015 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
1016 assert(machineCount(MachineType:L1Cache) > 1);
1017 if (probe_filter_enabled || full_bit_dir_enabled) {
1018 peek(requestQueue_in, RequestMsg) {
1019 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1020 assert(is_valid(cache_entry));
1021 out_msg.Address := address;
1022 out_msg.Type := in_msg.Type;
1023 out_msg.Requestor := in_msg.Requestor;
1024 out_msg.Destination.add(cache_entry.Owner);
1025 out_msg.MessageSize := MessageSizeType:Request_Control;
1026 out_msg.DirectedProbe := true;
1027 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1028 out_msg.ForwardRequestTime := get_time();
1029 }
1030 }
1031 } else {
1032 peek(requestQueue_in, RequestMsg) {
1033 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1034 out_msg.Address := address;
1035 out_msg.Type := in_msg.Type;
1036 out_msg.Requestor := in_msg.Requestor;
1037 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1038 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1039 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1040 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1041 out_msg.ForwardRequestTime := get_time();
1042 }
1043 }
1044 }
1045 }
1046
1047 action(nofc_forwardRequestConditionalOwner, "nofc", desc="Forward request to one or more nodes if the requestor is not the owner") {
1048 if (machineCount(MachineType:L1Cache) > 1) {
1049
1050 if (probe_filter_enabled || full_bit_dir_enabled) {
1051 peek(requestQueue_in, RequestMsg) {
1052 if (in_msg.Requestor != cache_entry.Owner) {
1053 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1054 assert(is_valid(cache_entry));
1055 out_msg.Address := address;
1056 out_msg.Type := in_msg.Type;
1057 out_msg.Requestor := in_msg.Requestor;
1058 out_msg.Destination.add(cache_entry.Owner);
1059 out_msg.MessageSize := MessageSizeType:Request_Control;
1060 out_msg.DirectedProbe := true;
1061 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1062 out_msg.ForwardRequestTime := get_time();
1063 }
1064 }
1065 }
1066 } else {
1067 peek(requestQueue_in, RequestMsg) {
1068 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1069 out_msg.Address := address;
1070 out_msg.Type := in_msg.Type;
1071 out_msg.Requestor := in_msg.Requestor;
1072 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1073 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1074 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1075 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1076 out_msg.ForwardRequestTime := get_time();
1077 }
1078 }
1079 }
1080 }
1081 }
1082
1083 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
1084 assert(is_valid(tbe));
1085 if (tbe.NumPendingMsgs > 0) {
1086 peek(dmaRequestQueue_in, DMARequestMsg) {
1087 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1088 out_msg.Address := address;
1089 out_msg.Type := CoherenceRequestType:GETX;
1090 //
1091 // Send to all L1 caches, since the requestor is the memory controller
1092 // itself
1093 //
1094 out_msg.Requestor := machineID;
1095 out_msg.Destination.broadcast(MachineType:L1Cache);
1096 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1097 }
1098 }
1099 }
1100 }
1101
1102 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
1103 assert(is_valid(tbe));
1104 if (tbe.NumPendingMsgs > 0) {
1105 peek(dmaRequestQueue_in, DMARequestMsg) {
1106 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1107 out_msg.Address := address;
1108 out_msg.Type := CoherenceRequestType:GETS;
1109 //
1110 // Send to all L1 caches, since the requestor is the memory controller
1111 // itself
1112 //
1113 out_msg.Requestor := machineID;
1114 out_msg.Destination.broadcast(MachineType:L1Cache);
1115 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1116 }
1117 }
1118 }
1119 }
1120
1121 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1122 requestQueue_in.dequeue();
1123 }
1124
1125 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1126 peek(unblockNetwork_in, ResponseMsg) {
1127 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1128 }
1129 unblockNetwork_in.dequeue();
1130 }
1131
1132 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1133 wakeUpBuffers(address);
1134 }
1135
1136 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1137 memQueue_in.dequeue();
1138 }
1139
1140 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1141 triggerQueue_in.dequeue();
1142 }
1143
1144 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1145 dmaRequestQueue_in.dequeue();
1146 }
1147
1148 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1149 peek(dmaRequestQueue_in, DMARequestMsg) {
1150 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1151 }
1152 stall_and_wait(dmaRequestQueue_in, address);
1153 }
1154
1155 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1156 peek(memQueue_in, MemoryMsg) {
1157 assert(is_valid(tbe));
1158 if (tbe.CacheDirty == false) {
1159 tbe.DataBlk := in_msg.DataBlk;
1160 }
1161 }
1162 }
1163
1164 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1165 peek(responseToDir_in, ResponseMsg) {
1166 assert(is_valid(tbe));
1167 tbe.CacheDirty := true;
1168 tbe.DataBlk := in_msg.DataBlk;
1169 }
1170 }
1171
1172 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1173 peek(responseToDir_in, ResponseMsg) {
1174 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1175 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1176 in_msg.Address, in_msg.DataBlk);
1177 }
1178 }
1179
1180 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1181 peek(memQueue_in, MemoryMsg) {
1182 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1183 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1184 in_msg.Address, in_msg.DataBlk);
1185 }
1186 }
1187
1188 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1189 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1190 assert(is_valid(tbe));
1191 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1192 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1193 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1194 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1195 }
1196
1197 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1198 assert(is_valid(tbe));
1199 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1200 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1201 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1202 }
1203
1204 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1205 assert(is_valid(tbe));
1206 assert(tbe.CacheDirty);
1207 }
1208
1209 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1210 if (probe_filter_enabled || full_bit_dir_enabled) {
1211 peek(requestQueue_in, RequestMsg) {
1212 assert(is_valid(cache_entry));
1213 assert(cache_entry.Owner != in_msg.Requestor);
1214 }
1215 }
1216 }
1217
1218 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1219 if (full_bit_dir_enabled) {
1220 peek(requestQueue_in, RequestMsg) {
1221 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1222 }
1223 }
1224 }
1225
1226 action(rs_removeSharer, "s", desc="remove current sharer") {
1227 if (full_bit_dir_enabled) {
1228 peek(unblockNetwork_in, ResponseMsg) {
1229 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1230 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1231 }
1232 }
1233 }
1234
1235 action(cs_clearSharers, "cs", desc="clear current sharers") {
1236 if (full_bit_dir_enabled) {
1237 peek(requestQueue_in, RequestMsg) {
1238 cache_entry.Sharers.clear();
1239 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1240 }
1241 }
1242 }
1243
1244 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1245 peek(unblockNetwork_in, ResponseMsg) {
1246 enqueue(memQueue_out, MemoryMsg, latency="1") {
1247 assert(in_msg.Dirty);
1248 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1249 out_msg.Address := address;
1250 out_msg.Type := MemoryRequestType:MEMORY_WB;
1251 out_msg.DataBlk := in_msg.DataBlk;
1252 DPRINTF(RubySlicc, "%s\n", out_msg);
1253 }
1254 }
1255 }
1256
1257 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1258 enqueue(memQueue_out, MemoryMsg, latency="1") {
1259 assert(is_valid(tbe));
1260 out_msg.Address := address;
1261 out_msg.Type := MemoryRequestType:MEMORY_WB;
1262 // first, initialize the data blk to the current version of system memory
1263 out_msg.DataBlk := tbe.DataBlk;
1264 // then add the dma write data
1265 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1266 DPRINTF(RubySlicc, "%s\n", out_msg);
1267 }
1268 }
1269
1270 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1271 peek(unblockNetwork_in, ResponseMsg) {
1272 assert(in_msg.Dirty == false);
1273 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1274 DPRINTF(RubySlicc, "%s\n", in_msg.DataBlk);
1275 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1276
1277 // NOTE: The following check would not be valid in a real
1278 // implementation. We include the data in the "dataless"
1279 // message so we can assert the clean data matches the datablock
1280 // in memory
1281 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1282 }
1283 }
1284
1285 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1286 peek(requestQueue_in, RequestMsg) {
1287 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1288 }
1289 stall_and_wait(requestQueue_in, address);
1290 }
1291
1292 // TRANSITIONS
1293
1294 // Transitions out of E state
1295 transition(E, GETX, NO_B_W) {
1296 pfa_probeFilterAllocate;
1297 v_allocateTBE;
1298 rx_recordExclusiveInTBE;
1299 saa_setAcksToAllIfPF;
1300 qf_queueMemoryFetchRequest;
1301 fn_forwardRequestIfNecessary;
1302 i_popIncomingRequestQueue;
1303 }
1304
1305 transition(E, GETF, NO_F_W) {
1306 pfa_probeFilterAllocate;
1307 v_allocateTBE;
1308 rx_recordExclusiveInTBE;
1309 saa_setAcksToAllIfPF;
1310 qf_queueMemoryFetchRequest;
1311 fn_forwardRequestIfNecessary;
1312 i_popIncomingRequestQueue;
1313 }
1314
1315 transition(E, GETS, NO_B_W) {
1316 pfa_probeFilterAllocate;
1317 v_allocateTBE;
1318 rx_recordExclusiveInTBE;
1319 saa_setAcksToAllIfPF;
1320 qf_queueMemoryFetchRequest;
1321 fn_forwardRequestIfNecessary;
1322 i_popIncomingRequestQueue;
1323 }
1324
1325 transition(E, DMA_READ, NO_DR_B_W) {
1326 vd_allocateDmaRequestInTBE;
1327 qd_queueMemoryRequestFromDmaRead;
1328 spa_setPendingAcksToZeroIfPF;
1329 f_forwardReadFromDma;
1330 p_popDmaRequestQueue;
1331 }
1332
1333 transition(E, DMA_WRITE, NO_DW_B_W) {
1334 vd_allocateDmaRequestInTBE;
1335 spa_setPendingAcksToZeroIfPF;
1336 sc_signalCompletionIfPF;
1337 f_forwardWriteFromDma;
1338 p_popDmaRequestQueue;
1339 }
1340
1341 // Transitions out of O state
1342 transition(O, GETX, NO_B_W) {
1343 r_setMRU;
1344 v_allocateTBE;
1345 r_recordDataInTBE;
1346 sa_setAcksToOne;
1347 qf_queueMemoryFetchRequest;
1348 fb_forwardRequestBcast;
1349 cs_clearSharers;
1350 i_popIncomingRequestQueue;
1351 }
1352
1353 transition(O, GETF, NO_F_W) {
1354 r_setMRU;
1355 v_allocateTBE;
1356 r_recordDataInTBE;
1357 sa_setAcksToOne;
1358 qf_queueMemoryFetchRequest;
1359 fb_forwardRequestBcast;
1360 cs_clearSharers;
1361 i_popIncomingRequestQueue;
1362 }
1363
1364 // This transition is dumb, if a shared copy exists on-chip, then that should
1365 // provide data, not slow off-chip dram. The problem is that the current
1366 // caches don't provide data in S state
1367 transition(O, GETS, O_B_W) {
1368 r_setMRU;
1369 v_allocateTBE;
1370 r_recordDataInTBE;
1371 saa_setAcksToAllIfPF;
1372 qf_queueMemoryFetchRequest;
1373 fn_forwardRequestIfNecessary;
1374 i_popIncomingRequestQueue;
1375 }
1376
1377 transition(O, DMA_READ, O_DR_B_W) {
1378 vd_allocateDmaRequestInTBE;
1379 spa_setPendingAcksToZeroIfPF;
1380 qd_queueMemoryRequestFromDmaRead;
1381 f_forwardReadFromDma;
1382 p_popDmaRequestQueue;
1383 }
1384
1385 transition(O, Pf_Replacement, O_R) {
1386 v_allocateTBE;
1387 pa_setPendingMsgsToAll;
1388 ia_invalidateAllRequest;
1389 pfd_probeFilterDeallocate;
1390 }
1391
1392 transition(S, Pf_Replacement, S_R) {
1393 v_allocateTBE;
1394 pa_setPendingMsgsToAll;
1395 ia_invalidateAllRequest;
1396 pfd_probeFilterDeallocate;
1397 }
1398
1399 transition(NO, Pf_Replacement, NO_R) {
1400 v_allocateTBE;
1401 po_setPendingMsgsToOne;
1402 io_invalidateOwnerRequest;
1403 pfd_probeFilterDeallocate;
1404 }
1405
1406 transition(NX, Pf_Replacement, NO_R) {
1407 v_allocateTBE;
1408 pa_setPendingMsgsToAll;
1409 ia_invalidateAllRequest;
1410 pfd_probeFilterDeallocate;
1411 }
1412
1413 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1414 vd_allocateDmaRequestInTBE;
1415 f_forwardWriteFromDma;
1416 p_popDmaRequestQueue;
1417 }
1418
1419 // Transitions out of NO state
1420 transition(NX, GETX, NO_B) {
1421 r_setMRU;
1422 fb_forwardRequestBcast;
1423 cs_clearSharers;
1424 i_popIncomingRequestQueue;
1425 }
1426
1427 transition(NX, GETF, NO_F) {
1428 r_setMRU;
1429 fb_forwardRequestBcast;
1430 cs_clearSharers;
1431 i_popIncomingRequestQueue;
1432 }
1433
1434 // Transitions out of NO state
1435 transition(NO, GETX, NO_B) {
1436 r_setMRU;
1437 ano_assertNotOwner;
1438 fc_forwardRequestConditionalOwner;
1439 cs_clearSharers;
1440 i_popIncomingRequestQueue;
1441 }
1442
1443 transition(NO, GETF, NO_F) {
1444 r_setMRU;
1445 //ano_assertNotOwner;
1446 nofc_forwardRequestConditionalOwner; //forward request if the requester is not the owner
1447 cs_clearSharers;
1448 oc_sendBlockAck; // send ack if the owner
1449 i_popIncomingRequestQueue;
1450 }
1451
1452 transition(S, GETX, NO_B) {
1453 r_setMRU;
1454 fb_forwardRequestBcast;
1455 cs_clearSharers;
1456 i_popIncomingRequestQueue;
1457 }
1458
1459 transition(S, GETF, NO_F) {
1460 r_setMRU;
1461 fb_forwardRequestBcast;
1462 cs_clearSharers;
1463 i_popIncomingRequestQueue;
1464 }
1465
1466 transition(S, GETS, NO_B) {
1467 r_setMRU;
1468 ano_assertNotOwner;
1469 fb_forwardRequestBcast;
1470 i_popIncomingRequestQueue;
1471 }
1472
1473 transition(NO, GETS, NO_B) {
1474 r_setMRU;
1475 ano_assertNotOwner;
1476 ans_assertNotSharer;
1477 fc_forwardRequestConditionalOwner;
1478 i_popIncomingRequestQueue;
1479 }
1480
1481 transition(NX, GETS, NO_B) {
1482 r_setMRU;
1483 ano_assertNotOwner;
1484 fc_forwardRequestConditionalOwner;
1485 i_popIncomingRequestQueue;
1486 }
1487
1488 transition({NO, NX, S}, PUT, WB) {
1489 //
1490 // note that the PUT requestor may not be the current owner if an invalidate
1491 // raced with PUT
1492 //
1493 a_sendWriteBackAck;
1494 i_popIncomingRequestQueue;
1495 }
1496
1497 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1498 vd_allocateDmaRequestInTBE;
1499 f_forwardReadFromDma;
1500 p_popDmaRequestQueue;
1501 }
1502
1503 // Nack PUT requests when races cause us to believe we own the data
1504 transition({O, E}, PUT) {
1505 b_sendWriteBackNack;
1506 i_popIncomingRequestQueue;
1507 }
1508
1509 // Blocked transient states
1510 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1511 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1512 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1513 {GETS, GETX, GETF, PUT, Pf_Replacement}) {
1514 z_stallAndWaitRequest;
1515 }
1516
1517 transition(NO_F, {GETS, GETX, GETF, PUT, Pf_Replacement}){
1518 z_stallAndWaitRequest;
1519 }
1520
1521 transition(NO_B, {GETX, GETF}, NO_B_X) {
1522 z_stallAndWaitRequest;
1523 }
1524
1525 transition(NO_B, {PUT, Pf_Replacement}) {
1526 z_stallAndWaitRequest;
1527 }
1528
1529 transition(NO_B_S, {GETX, GETF, PUT, Pf_Replacement}) {
1530 z_stallAndWaitRequest;
1531 }
1532
1533 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1534 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1535 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1536 {DMA_READ, DMA_WRITE}) {
1537 zd_stallAndWaitDMARequest;
1538 }
1539
1540 // merge GETS into one response
1541 transition(NO_B, GETS, NO_B_S) {
1542 v_allocateTBE;
1543 rs_recordGetSRequestor;
1544 i_popIncomingRequestQueue;
1545 }
1546
1547 transition(NO_B_S, GETS) {
1548 rs_recordGetSRequestor;
1549 i_popIncomingRequestQueue;
1550 }
1551
1552 // unblock responses
1553 transition({NO_B, NO_B_X}, UnblockS, NX) {
1554 us_updateSharerIfFBD;
1555 k_wakeUpDependents;
1556 j_popIncomingUnblockQueue;
1557 }
1558
1559 transition({NO_B, NO_B_X}, UnblockM, NO) {
1560 uo_updateOwnerIfPf;
1561 us_updateSharerIfFBD;
1562 k_wakeUpDependents;
1563 j_popIncomingUnblockQueue;
1564 }
1565
1566 transition(NO_B_S, UnblockS, NO_B_S_W) {
1567 us_updateSharerIfFBD;
1568 fr_forwardMergeReadRequestsToOwner;
1569 sp_setPendingMsgsToMergedSharers;
1570 j_popIncomingUnblockQueue;
1571 }
1572
1573 transition(NO_B_S, UnblockM, NO_B_S_W) {
1574 uo_updateOwnerIfPf;
1575 fr_forwardMergeReadRequestsToOwner;
1576 sp_setPendingMsgsToMergedSharers;
1577 j_popIncomingUnblockQueue;
1578 }
1579
1580 transition(NO_B_S_W, UnblockS) {
1581 us_updateSharerIfFBD;
1582 mu_decrementNumberOfUnblocks;
1583 os_checkForMergedGetSCompletion;
1584 j_popIncomingUnblockQueue;
1585 }
1586
1587 transition(NO_B_S_W, All_Unblocks, NX) {
1588 w_deallocateTBE;
1589 k_wakeUpDependents;
1590 g_popTriggerQueue;
1591 }
1592
1593 transition(O_B, UnblockS, O) {
1594 us_updateSharerIfFBD;
1595 k_wakeUpDependents;
1596 j_popIncomingUnblockQueue;
1597 }
1598
1599 transition(O_B, UnblockM, NO) {
1600 us_updateSharerIfFBD;
1601 uo_updateOwnerIfPf;
1602 k_wakeUpDependents;
1603 j_popIncomingUnblockQueue;
1604 }
1605
1606 transition(NO_B_W, Memory_Data, NO_B) {
1607 d_sendData;
1608 w_deallocateTBE;
1609 l_popMemQueue;
1610 }
1611
1612 transition(NO_F_W, Memory_Data, NO_F) {
1613 d_sendData;
1614 w_deallocateTBE;
1615 l_popMemQueue;
1616 }
1617
1618 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1619 r_recordMemoryData;
1620 o_checkForCompletion;
1621 l_popMemQueue;
1622 }
1623
1624 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1625 r_recordMemoryData;
1626 dr_sendDmaData;
1627 o_checkForCompletion;
1628 l_popMemQueue;
1629 }
1630
1631 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1632 m_decrementNumberOfMessages;
1633 o_checkForCompletion;
1634 n_popResponseQueue;
1635 }
1636
1637 transition({O_R, S_R, NO_R}, Ack) {
1638 m_decrementNumberOfMessages;
1639 o_checkForCompletion;
1640 n_popResponseQueue;
1641 }
1642
1643 transition(S_R, Data) {
1644 wr_writeResponseDataToMemory;
1645 m_decrementNumberOfMessages;
1646 o_checkForCompletion;
1647 n_popResponseQueue;
1648 }
1649
1650 transition(NO_R, {Data, Exclusive_Data}) {
1651 wr_writeResponseDataToMemory;
1652 m_decrementNumberOfMessages;
1653 o_checkForCompletion;
1654 n_popResponseQueue;
1655 }
1656
1657 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1658 w_deallocateTBE;
1659 k_wakeUpDependents;
1660 g_popTriggerQueue;
1661 }
1662
1663 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1664 m_decrementNumberOfMessages;
1665 n_popResponseQueue;
1666 }
1667
1668 transition(NO_DR_B_W, Shared_Ack) {
1669 m_decrementNumberOfMessages;
1670 r_setSharerBit;
1671 n_popResponseQueue;
1672 }
1673
1674 transition(O_DR_B, Shared_Ack) {
1675 m_decrementNumberOfMessages;
1676 r_setSharerBit;
1677 o_checkForCompletion;
1678 n_popResponseQueue;
1679 }
1680
1681 transition(O_DR_B_W, Shared_Ack) {
1682 m_decrementNumberOfMessages;
1683 r_setSharerBit;
1684 n_popResponseQueue;
1685 }
1686
1687 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1688 m_decrementNumberOfMessages;
1689 r_setSharerBit;
1690 o_checkForCompletion;
1691 n_popResponseQueue;
1692 }
1693
1694 transition(NO_DR_B_W, Shared_Data) {
1695 r_recordCacheData;
1696 m_decrementNumberOfMessages;
1697 so_setOwnerBit;
1698 o_checkForCompletion;
1699 n_popResponseQueue;
1700 }
1701
1702 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1703 r_recordCacheData;
1704 m_decrementNumberOfMessages;
1705 so_setOwnerBit;
1706 o_checkForCompletion;
1707 n_popResponseQueue;
1708 }
1709
1710 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1711 r_recordCacheData;
1712 m_decrementNumberOfMessages;
1713 n_popResponseQueue;
1714 }
1715
1716 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1717 r_recordCacheData;
1718 m_decrementNumberOfMessages;
1719 o_checkForCompletion;
1720 n_popResponseQueue;
1721 }
1722
1723 transition(NO_DR_B, All_acks_and_owner_data, O) {
1724 //
1725 // Note that the DMA consistency model allows us to send the DMA device
1726 // a response as soon as we receive valid data and prior to receiving
1727 // all acks. However, to simplify the protocol we wait for all acks.
1728 //
1729 dt_sendDmaDataFromTbe;
1730 wdt_writeDataFromTBE;
1731 w_deallocateTBE;
1732 k_wakeUpDependents;
1733 g_popTriggerQueue;
1734 }
1735
1736 transition(NO_DR_B, All_acks_and_shared_data, S) {
1737 //
1738 // Note that the DMA consistency model allows us to send the DMA device
1739 // a response as soon as we receive valid data and prior to receiving
1740 // all acks. However, to simplify the protocol we wait for all acks.
1741 //
1742 dt_sendDmaDataFromTbe;
1743 wdt_writeDataFromTBE;
1744 w_deallocateTBE;
1745 k_wakeUpDependents;
1746 g_popTriggerQueue;
1747 }
1748
1749 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1750 //
1751 // Note that the DMA consistency model allows us to send the DMA device
1752 // a response as soon as we receive valid data and prior to receiving
1753 // all acks. However, to simplify the protocol we wait for all acks.
1754 //
1755 dt_sendDmaDataFromTbe;
1756 wdt_writeDataFromTBE;
1757 w_deallocateTBE;
1758 k_wakeUpDependents;
1759 g_popTriggerQueue;
1760 }
1761
1762 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1763 //
1764 // Note that the DMA consistency model allows us to send the DMA device
1765 // a response as soon as we receive valid data and prior to receiving
1766 // all acks. However, to simplify the protocol we wait for all acks.
1767 //
1768 dt_sendDmaDataFromTbe;
1769 wdt_writeDataFromTBE;
1770 w_deallocateTBE;
1771 k_wakeUpDependents;
1772 g_popTriggerQueue;
1773 }
1774
1775 transition(O_DR_B, All_acks_and_owner_data, O) {
1776 wdt_writeDataFromTBE;
1777 w_deallocateTBE;
1778 k_wakeUpDependents;
1779 g_popTriggerQueue;
1780 }
1781
1782 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1783 wdt_writeDataFromTBE;
1784 w_deallocateTBE;
1785 pfd_probeFilterDeallocate;
1786 k_wakeUpDependents;
1787 g_popTriggerQueue;
1788 }
1789
1790 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1791 //
1792 // Note that the DMA consistency model allows us to send the DMA device
1793 // a response as soon as we receive valid data and prior to receiving
1794 // all acks. However, to simplify the protocol we wait for all acks.
1795 //
1796 dt_sendDmaDataFromTbe;
1797 wdt_writeDataFromTBE;
1798 w_deallocateTBE;
1799 ppfd_possibleProbeFilterDeallocate;
1800 k_wakeUpDependents;
1801 g_popTriggerQueue;
1802 }
1803
1804 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1805 a_assertCacheData;
1806 //
1807 // Note that the DMA consistency model allows us to send the DMA device
1808 // a response as soon as we receive valid data and prior to receiving
1809 // all acks. However, to simplify the protocol we wait for all acks.
1810 //
1811 dt_sendDmaDataFromTbe;
1812 wdt_writeDataFromTBE;
1813 w_deallocateTBE;
1814 ppfd_possibleProbeFilterDeallocate;
1815 k_wakeUpDependents;
1816 g_popTriggerQueue;
1817 }
1818
1819 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1820 dwt_writeDmaDataFromTBE;
1821 ld_queueMemoryDmaWrite;
1822 g_popTriggerQueue;
1823 }
1824
1825 transition(NO_DW_W, Memory_Ack, E) {
1826 da_sendDmaAck;
1827 w_deallocateTBE;
1828 ppfd_possibleProbeFilterDeallocate;
1829 k_wakeUpDependents;
1830 l_popMemQueue;
1831 }
1832
1833 transition(O_B_W, Memory_Data, O_B) {
1834 d_sendData;
1835 w_deallocateTBE;
1836 l_popMemQueue;
1837 }
1838
1839 transition(NO_B_W, UnblockM, NO_W) {
1840 uo_updateOwnerIfPf;
1841 j_popIncomingUnblockQueue;
1842 }
1843
1844 transition(NO_B_W, UnblockS, NO_W) {
1845 us_updateSharerIfFBD;
1846 j_popIncomingUnblockQueue;
1847 }
1848
1849 transition(O_B_W, UnblockS, O_W) {
1850 us_updateSharerIfFBD;
1851 j_popIncomingUnblockQueue;
1852 }
1853
1854 transition(NO_W, Memory_Data, NO) {
1855 w_deallocateTBE;
1856 k_wakeUpDependents;
1857 l_popMemQueue;
1858 }
1859
1860 transition(O_W, Memory_Data, O) {
1861 w_deallocateTBE;
1862 k_wakeUpDependents;
1863 l_popMemQueue;
1864 }
1865
1866 // WB State Transistions
1867 transition(WB, Writeback_Dirty, WB_O_W) {
1868 rs_removeSharer;
1869 l_queueMemoryWBRequest;
1870 j_popIncomingUnblockQueue;
1871 }
1872
1873 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1874 rs_removeSharer;
1875 l_queueMemoryWBRequest;
1876 j_popIncomingUnblockQueue;
1877 }
1878
1879 transition(WB_E_W, Memory_Ack, E) {
1880 l_writeDataToMemory;
1881 pfd_probeFilterDeallocate;
1882 k_wakeUpDependents;
1883 l_popMemQueue;
1884 }
1885
1886 transition(WB_O_W, Memory_Ack, O) {
1887 l_writeDataToMemory;
1888 k_wakeUpDependents;
1889 l_popMemQueue;
1890 }
1891
1892 transition(WB, Writeback_Clean, O) {
1893 ll_checkIncomingWriteback;
1894 rs_removeSharer;
1895 k_wakeUpDependents;
1896 j_popIncomingUnblockQueue;
1897 }
1898
1899 transition(WB, Writeback_Exclusive_Clean, E) {
1900 ll_checkIncomingWriteback;
1901 rs_removeSharer;
1902 pfd_probeFilterDeallocate;
1903 k_wakeUpDependents;
1904 j_popIncomingUnblockQueue;
1905 }
1906
1907 transition(WB, Unblock, NX) {
1908 auno_assertUnblockerNotOwner;
1909 k_wakeUpDependents;
1910 j_popIncomingUnblockQueue;
1911 }
1912
1913 transition(NO_F, PUTF, WB) {
1914 a_sendWriteBackAck;
1915 i_popIncomingRequestQueue;
1916 }
1917
1918 //possible race between GETF and UnblockM -- not sure needed any more?
1919 transition(NO_F, UnblockM) {
1920 us_updateSharerIfFBD;
1921 uo_updateOwnerIfPf;
1922 j_popIncomingUnblockQueue;
1923 }
1924 }