828c762cba7394a30e502100dbe8a53acaf0dfff
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory,
38 CacheMemory * probeFilter,
39 MemoryControl * memBuffer,
40 int memory_controller_latency = 2,
41 bool probe_filter_enabled = false,
42 bool full_bit_dir_enabled = false
43 {
44
45 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
47 //
48 // For a finite buffered network, note that the DMA response network only
49 // works at this relatively lower numbered (lower priority) virtual network
50 // because the trigger queue decouples cache responses from DMA responses.
51 //
52 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
53
54 MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
55 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
56 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
57 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
58
59 // STATES
60 state_declaration(State, desc="Directory states", default="Directory_State_E") {
61 // Base states
62 NX, AccessPermission:Invalid, desc="Not Owner, probe filter entry exists, block in O at Owner";
63 NO, AccessPermission:Invalid, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
64 S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
65 O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
66 E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
67
68 O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
69 S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
70 NO_R, AccessPermission:Invalid, desc="Was Not Owner or Sharer, replacing probe filter entry";
71
72 NO_B, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked";
73 NO_B_X, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
74 NO_B_S, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
75 NO_B_S_W, AccessPermission:Invalid, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
76 O_B, AccessPermission:Invalid, "O^B", desc="Owner, Blocked";
77 NO_B_W, AccessPermission:Invalid, desc="Not Owner, Blocked, waiting for Dram";
78 O_B_W, AccessPermission:Invalid, desc="Owner, Blocked, waiting for Dram";
79 NO_W, AccessPermission:Invalid, desc="Not Owner, waiting for Dram";
80 O_W, AccessPermission:Invalid, desc="Owner, waiting for Dram";
81 NO_DW_B_W, AccessPermission:Invalid, desc="Not Owner, Dma Write waiting for Dram and cache responses";
82 NO_DR_B_W, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for Dram and cache responses";
83 NO_DR_B_D, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
84 NO_DR_B, AccessPermission:Invalid, desc="Not Owner, Dma Read waiting for cache responses";
85 NO_DW_W, AccessPermission:Invalid, desc="Not Owner, Dma Write waiting for Dram";
86 O_DR_B_W, AccessPermission:Invalid, desc="Owner, Dma Read waiting for Dram and cache responses";
87 O_DR_B, AccessPermission:Invalid, desc="Owner, Dma Read waiting for cache responses";
88 WB, AccessPermission:Invalid, desc="Blocked on a writeback";
89 WB_O_W, AccessPermission:Invalid, desc="Blocked on memory write, will go to O";
90 WB_E_W, AccessPermission:Invalid, desc="Blocked on memory write, will go to E";
91
92 NO_F, AccessPermission:Invalid, desc="Blocked on a flush";
93 NO_F_W, AccessPermission:Invalid, desc="Not Owner, Blocked, waiting for Dram";
94 }
95
96 // Events
97 enumeration(Event, desc="Directory events") {
98 GETX, desc="A GETX arrives";
99 GETS, desc="A GETS arrives";
100 PUT, desc="A PUT arrives";
101 Unblock, desc="An unblock message arrives";
102 UnblockS, desc="An unblock message arrives";
103 UnblockM, desc="An unblock message arrives";
104 Writeback_Clean, desc="The final part of a PutX (no data)";
105 Writeback_Dirty, desc="The final part of a PutX (data)";
106 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
107 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
108
109 // Probe filter
110 Pf_Replacement, desc="probe filter replacement";
111
112 // DMA requests
113 DMA_READ, desc="A DMA Read memory request";
114 DMA_WRITE, desc="A DMA Write memory request";
115
116 // Memory Controller
117 Memory_Data, desc="Fetched data from memory arrives";
118 Memory_Ack, desc="Writeback Ack from memory arrives";
119
120 // Cache responses required to handle DMA
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Shared_Data, desc="Received a data message, responder has a shared copy";
124 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 // Triggers
128 All_acks_and_shared_data, desc="Received shared data and message acks";
129 All_acks_and_owner_data, desc="Received shared data and message acks";
130 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
131 All_Unblocks, desc="Received all unblocks for a merged gets request";
132 GETF, desc="A GETF arrives";
133 PUTF, desc="A PUTF arrives";
134 }
135
136 // TYPES
137
138 // DirectoryEntry
139 structure(Entry, desc="...", interface="AbstractEntry") {
140 State DirectoryState, desc="Directory state";
141 DataBlock DataBlk, desc="data for the block";
142 }
143
144 // ProbeFilterEntry
145 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
146 State PfState, desc="Directory state";
147 MachineID Owner, desc="Owner node";
148 DataBlock DataBlk, desc="data for the block";
149 Set Sharers, desc="sharing vector for full bit directory";
150 }
151
152 // TBE entries for DMA requests
153 structure(TBE, desc="TBE entries for outstanding DMA requests") {
154 Address PhysicalAddress, desc="physical address";
155 State TBEState, desc="Transient State";
156 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
157 int Acks, default="0", desc="The number of acks that the waiting response represents";
158 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
159 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
160 DataBlock DataBlk, desc="The current view of system memory";
161 int Len, desc="...";
162 MachineID DmaRequestor, desc="DMA requestor";
163 NetDest GetSRequestors, desc="GETS merged requestors";
164 int NumPendingMsgs, desc="Number of pending acks/messages";
165 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
166 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
167 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
168 }
169
170 structure(TBETable, external="yes") {
171 TBE lookup(Address);
172 void allocate(Address);
173 void deallocate(Address);
174 bool isPresent(Address);
175 }
176
177 void set_cache_entry(AbstractCacheEntry b);
178 void unset_cache_entry();
179 void set_tbe(TBE a);
180 void unset_tbe();
181 void wakeUpBuffers(Address a);
182
183 // ** OBJECTS **
184
185 Set fwd_set;
186
187 TBETable TBEs, template_hack="<Directory_TBE>";
188
189 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
190 return static_cast(Entry, directory[addr]);
191 }
192
193 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
194 if (probe_filter_enabled || full_bit_dir_enabled) {
195 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
196 return pfEntry;
197 }
198 return OOD;
199 }
200
201 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
202 if (is_valid(tbe)) {
203 return tbe.TBEState;
204 } else {
205 if (probe_filter_enabled || full_bit_dir_enabled) {
206 if (is_valid(pf_entry)) {
207 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
208 }
209 }
210 return getDirectoryEntry(addr).DirectoryState;
211 }
212 }
213
214 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
215 if (is_valid(tbe)) {
216 tbe.TBEState := state;
217 }
218 if (probe_filter_enabled || full_bit_dir_enabled) {
219 if (is_valid(pf_entry)) {
220 pf_entry.PfState := state;
221 }
222 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
223 assert(is_valid(pf_entry));
224 }
225 if (state == State:E) {
226 assert(is_valid(pf_entry) == false);
227 }
228 }
229 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
230 state == State:O) {
231 assert(is_valid(tbe) == false);
232 }
233 getDirectoryEntry(addr).DirectoryState := state;
234 }
235
236 AccessPermission getAccessPermission(Address addr) {
237 TBE tbe := TBEs[addr];
238 if(is_valid(tbe)) {
239 return Directory_State_to_permission(tbe.TBEState);
240 }
241
242 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
243 }
244
245 void setAccessPermission(PfEntry pf_entry, Address addr, State state) {
246 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
247 }
248
249 Event cache_request_to_event(CoherenceRequestType type) {
250 if (type == CoherenceRequestType:GETS) {
251 return Event:GETS;
252 } else if (type == CoherenceRequestType:GETX) {
253 return Event:GETX;
254 } else if (type == CoherenceRequestType:GETF) {
255 return Event:GETF;
256 } else {
257 error("Invalid CoherenceRequestType");
258 }
259 }
260
261 MessageBuffer triggerQueue, ordered="true";
262
263 // ** OUT_PORTS **
264 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
265 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
266 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
267 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
268 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
269
270 //
271 // Memory buffer for memory controller to DIMM communication
272 //
273 out_port(memQueue_out, MemoryMsg, memBuffer);
274
275 // ** IN_PORTS **
276
277 // Trigger Queue
278 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
279 if (triggerQueue_in.isReady()) {
280 peek(triggerQueue_in, TriggerMsg) {
281 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
282 TBE tbe := TBEs[in_msg.Address];
283 if (in_msg.Type == TriggerType:ALL_ACKS) {
284 trigger(Event:All_acks_and_owner_data, in_msg.Address,
285 pf_entry, tbe);
286 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
287 trigger(Event:All_acks_and_shared_data, in_msg.Address,
288 pf_entry, tbe);
289 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
290 trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
291 pf_entry, tbe);
292 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
293 trigger(Event:All_Unblocks, in_msg.Address,
294 pf_entry, tbe);
295 } else {
296 error("Unexpected message");
297 }
298 }
299 }
300 }
301
302 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
303 if (unblockNetwork_in.isReady()) {
304 peek(unblockNetwork_in, ResponseMsg) {
305 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
306 TBE tbe := TBEs[in_msg.Address];
307 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
308 trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
309 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
310 trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
311 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
312 trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
313 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
314 trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
315 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
316 trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
317 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
318 trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
319 pf_entry, tbe);
320 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
321 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
322 pf_entry, tbe);
323 } else {
324 error("Invalid message");
325 }
326 }
327 }
328 }
329
330 // Response Network
331 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
332 if (responseToDir_in.isReady()) {
333 peek(responseToDir_in, ResponseMsg) {
334 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
335 TBE tbe := TBEs[in_msg.Address];
336 if (in_msg.Type == CoherenceResponseType:ACK) {
337 trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
338 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
339 trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
340 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
341 trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
342 } else if (in_msg.Type == CoherenceResponseType:DATA) {
343 trigger(Event:Data, in_msg.Address, pf_entry, tbe);
344 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
345 trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
346 } else {
347 error("Unexpected message");
348 }
349 }
350 }
351 }
352
353 // off-chip memory request/response is done
354 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
355 if (memQueue_in.isReady()) {
356 peek(memQueue_in, MemoryMsg) {
357 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
358 TBE tbe := TBEs[in_msg.Address];
359 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
360 trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
361 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
362 trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
363 } else {
364 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
365 error("Invalid message");
366 }
367 }
368 }
369 }
370
371 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
372 if (requestQueue_in.isReady()) {
373 peek(requestQueue_in, RequestMsg) {
374 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
375 TBE tbe := TBEs[in_msg.Address];
376 if (in_msg.Type == CoherenceRequestType:PUT) {
377 trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
378 } else if (in_msg.Type == CoherenceRequestType:PUTF) {
379 trigger(Event:PUTF, in_msg.Address, pf_entry, tbe);
380 } else {
381 if (probe_filter_enabled || full_bit_dir_enabled) {
382 if (is_valid(pf_entry)) {
383 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
384 pf_entry, tbe);
385 } else {
386 if (probeFilter.cacheAvail(in_msg.Address)) {
387 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
388 pf_entry, tbe);
389 } else {
390 trigger(Event:Pf_Replacement,
391 probeFilter.cacheProbe(in_msg.Address),
392 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
393 TBEs[probeFilter.cacheProbe(in_msg.Address)]);
394 }
395 }
396 } else {
397 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
398 pf_entry, tbe);
399 }
400 }
401 }
402 }
403 }
404
405 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
406 if (dmaRequestQueue_in.isReady()) {
407 peek(dmaRequestQueue_in, DMARequestMsg) {
408 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
409 TBE tbe := TBEs[in_msg.LineAddress];
410 if (in_msg.Type == DMARequestType:READ) {
411 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
412 } else if (in_msg.Type == DMARequestType:WRITE) {
413 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
414 } else {
415 error("Invalid message");
416 }
417 }
418 }
419 }
420
421 // Actions
422
423 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
424 if (probe_filter_enabled || full_bit_dir_enabled) {
425 assert(is_valid(cache_entry));
426 probeFilter.setMRU(address);
427 }
428 }
429
430 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
431 if (probe_filter_enabled || full_bit_dir_enabled) {
432 assert(is_valid(cache_entry));
433 peek(unblockNetwork_in, ResponseMsg) {
434 assert(cache_entry.Owner != in_msg.Sender);
435 if (full_bit_dir_enabled) {
436 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
437 }
438 }
439 }
440 }
441
442 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
443 if (probe_filter_enabled || full_bit_dir_enabled) {
444 assert(is_valid(cache_entry));
445 peek(unblockNetwork_in, ResponseMsg) {
446 cache_entry.Owner := in_msg.Sender;
447 if (full_bit_dir_enabled) {
448 cache_entry.Sharers.clear();
449 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
450 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
451 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
452 }
453 }
454 }
455 }
456
457 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
458 if (full_bit_dir_enabled) {
459 assert(probeFilter.isTagPresent(address));
460 peek(unblockNetwork_in, ResponseMsg) {
461 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
462 }
463 }
464 }
465
466 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
467 peek(requestQueue_in, RequestMsg) {
468 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
469 out_msg.Address := address;
470 out_msg.Type := CoherenceRequestType:WB_ACK;
471 out_msg.Requestor := in_msg.Requestor;
472 out_msg.Destination.add(in_msg.Requestor);
473 out_msg.MessageSize := MessageSizeType:Writeback_Control;
474 }
475 }
476 }
477
478 action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
479 peek(requestQueue_in, RequestMsg) {
480 if ((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) {
481 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
482 out_msg.Address := address;
483 out_msg.Type := CoherenceRequestType:BLOCK_ACK;
484 out_msg.Requestor := in_msg.Requestor;
485 out_msg.Destination.add(in_msg.Requestor);
486 out_msg.MessageSize := MessageSizeType:Writeback_Control;
487 }
488 }
489 }
490 }
491
492 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
493 peek(requestQueue_in, RequestMsg) {
494 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
495 out_msg.Address := address;
496 out_msg.Type := CoherenceRequestType:WB_NACK;
497 out_msg.Requestor := in_msg.Requestor;
498 out_msg.Destination.add(in_msg.Requestor);
499 out_msg.MessageSize := MessageSizeType:Writeback_Control;
500 }
501 }
502 }
503
504 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
505 if (probe_filter_enabled || full_bit_dir_enabled) {
506 peek(requestQueue_in, RequestMsg) {
507 set_cache_entry(probeFilter.allocate(address, new PfEntry));
508 cache_entry.Owner := in_msg.Requestor;
509 cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
510 }
511 }
512 }
513
514 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
515 if (probe_filter_enabled || full_bit_dir_enabled) {
516 probeFilter.deallocate(address);
517 unset_cache_entry();
518 }
519 }
520
521 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
522 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
523 probeFilter.deallocate(address);
524 unset_cache_entry();
525 }
526 }
527
528 action(v_allocateTBE, "v", desc="Allocate TBE") {
529 peek(requestQueue_in, RequestMsg) {
530 TBEs.allocate(address);
531 set_tbe(TBEs[address]);
532 tbe.PhysicalAddress := address;
533 tbe.ResponseType := CoherenceResponseType:NULL;
534 }
535 }
536
537 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
538 peek(dmaRequestQueue_in, DMARequestMsg) {
539 TBEs.allocate(address);
540 set_tbe(TBEs[address]);
541 tbe.DmaDataBlk := in_msg.DataBlk;
542 tbe.PhysicalAddress := in_msg.PhysicalAddress;
543 tbe.Len := in_msg.Len;
544 tbe.DmaRequestor := in_msg.Requestor;
545 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
546 //
547 // One ack for each last-level cache
548 //
549 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
550 //
551 // Assume initially that the caches store a clean copy and that memory
552 // will provide the data
553 //
554 tbe.CacheDirty := false;
555 }
556 }
557
558 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
559 assert(is_valid(tbe));
560 if (full_bit_dir_enabled) {
561 assert(is_valid(cache_entry));
562 tbe.NumPendingMsgs := cache_entry.Sharers.count();
563 } else {
564 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
565 }
566 }
567
568 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
569 assert(is_valid(tbe));
570 tbe.NumPendingMsgs := 1;
571 }
572
573 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
574 TBEs.deallocate(address);
575 unset_tbe();
576 }
577
578 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
579 assert(is_valid(tbe));
580 peek(requestQueue_in, RequestMsg) {
581 if (full_bit_dir_enabled) {
582 assert(is_valid(cache_entry));
583 //
584 // If we are using the full-bit directory and no sharers exists beyond
585 // the requestor, then we must set the ack number to all, not one
586 //
587 fwd_set := cache_entry.Sharers;
588 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
589 if (fwd_set.count() > 0) {
590 tbe.Acks := 1;
591 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
592 tbe.SilentAcks := tbe.SilentAcks - 1;
593 } else {
594 tbe.Acks := machineCount(MachineType:L1Cache);
595 tbe.SilentAcks := 0;
596 }
597 } else {
598 tbe.Acks := 1;
599 }
600 }
601 }
602
603 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
604 assert(is_valid(tbe));
605 if (probe_filter_enabled || full_bit_dir_enabled) {
606 tbe.Acks := machineCount(MachineType:L1Cache);
607 tbe.SilentAcks := 0;
608 } else {
609 tbe.Acks := 1;
610 }
611 }
612
613 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
614 peek(responseToDir_in, ResponseMsg) {
615 assert(is_valid(tbe));
616 assert(in_msg.Acks > 0);
617 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
618 //
619 // Note that cache data responses will have an ack count of 2. However,
620 // directory DMA requests must wait for acks from all LLC caches, so
621 // only decrement by 1.
622 //
623 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
624 (in_msg.Type == CoherenceResponseType:DATA) ||
625 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
626 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
627 } else {
628 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
629 }
630 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
631 }
632 }
633
634 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
635 peek(unblockNetwork_in, ResponseMsg) {
636 assert(is_valid(tbe));
637 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
638 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
639 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
640 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
641 }
642 }
643
644 action(n_popResponseQueue, "n", desc="Pop response queue") {
645 responseToDir_in.dequeue();
646 }
647
648 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
649 assert(is_valid(tbe));
650 if (tbe.NumPendingMsgs == 0) {
651 enqueue(triggerQueue_out, TriggerMsg) {
652 out_msg.Address := address;
653 if (tbe.Sharers) {
654 if (tbe.Owned) {
655 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
656 } else {
657 out_msg.Type := TriggerType:ALL_ACKS;
658 }
659 } else {
660 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
661 }
662 }
663 }
664 }
665
666 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
667 assert(is_valid(tbe));
668 if (tbe.NumPendingMsgs == 0) {
669 enqueue(triggerQueue_out, TriggerMsg) {
670 out_msg.Address := address;
671 out_msg.Type := TriggerType:ALL_UNBLOCKS;
672 }
673 }
674 }
675
676 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
677 assert(is_valid(tbe));
678 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
679 }
680
681 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
682 if (probe_filter_enabled || full_bit_dir_enabled) {
683 assert(is_valid(tbe));
684 tbe.NumPendingMsgs := 0;
685 }
686 }
687
688 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
689 assert(is_valid(tbe));
690 if (tbe.NumPendingMsgs == 0) {
691 assert(probe_filter_enabled || full_bit_dir_enabled);
692 enqueue(triggerQueue_out, TriggerMsg) {
693 out_msg.Address := address;
694 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
695 }
696 }
697 }
698
699 action(d_sendData, "d", desc="Send data to requestor") {
700 peek(memQueue_in, MemoryMsg) {
701 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
702 assert(is_valid(tbe));
703 out_msg.Address := address;
704 out_msg.Type := tbe.ResponseType;
705 out_msg.Sender := machineID;
706 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
707 out_msg.DataBlk := in_msg.DataBlk;
708 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
709 out_msg.Dirty := false; // By definition, the block is now clean
710 out_msg.Acks := tbe.Acks;
711 out_msg.SilentAcks := tbe.SilentAcks;
712 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
713 assert(out_msg.Acks > 0);
714 out_msg.MessageSize := MessageSizeType:Response_Data;
715 }
716 }
717 }
718
719 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
720 peek(memQueue_in, MemoryMsg) {
721 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
722 assert(is_valid(tbe));
723 out_msg.PhysicalAddress := address;
724 out_msg.LineAddress := address;
725 out_msg.Type := DMAResponseType:DATA;
726 //
727 // we send the entire data block and rely on the dma controller to
728 // split it up if need be
729 //
730 out_msg.DataBlk := in_msg.DataBlk;
731 out_msg.Destination.add(tbe.DmaRequestor);
732 out_msg.MessageSize := MessageSizeType:Response_Data;
733 }
734 }
735 }
736
737 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
738 peek(triggerQueue_in, TriggerMsg) {
739 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
740 assert(is_valid(tbe));
741 out_msg.PhysicalAddress := address;
742 out_msg.LineAddress := address;
743 out_msg.Type := DMAResponseType:DATA;
744 //
745 // we send the entire data block and rely on the dma controller to
746 // split it up if need be
747 //
748 out_msg.DataBlk := tbe.DataBlk;
749 out_msg.Destination.add(tbe.DmaRequestor);
750 out_msg.MessageSize := MessageSizeType:Response_Data;
751 }
752 }
753 }
754
755 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
756 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
757 assert(is_valid(tbe));
758 out_msg.PhysicalAddress := address;
759 out_msg.LineAddress := address;
760 out_msg.Type := DMAResponseType:ACK;
761 out_msg.Destination.add(tbe.DmaRequestor);
762 out_msg.MessageSize := MessageSizeType:Writeback_Control;
763 }
764 }
765
766 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
767 peek(requestQueue_in, RequestMsg) {
768 assert(is_valid(tbe));
769 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
770 }
771 }
772
773 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
774 peek(requestQueue_in, RequestMsg) {
775 assert(is_valid(tbe));
776 if (full_bit_dir_enabled) {
777 fwd_set := cache_entry.Sharers;
778 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
779 if (fwd_set.count() > 0) {
780 tbe.ResponseType := CoherenceResponseType:DATA;
781 } else {
782 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
783 }
784 } else {
785 tbe.ResponseType := CoherenceResponseType:DATA;
786 }
787 }
788 }
789
790 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
791 peek(requestQueue_in, RequestMsg) {
792 assert(is_valid(tbe));
793 tbe.GetSRequestors.add(in_msg.Requestor);
794 }
795 }
796
797 action(r_setSharerBit, "r", desc="We saw other sharers") {
798 assert(is_valid(tbe));
799 tbe.Sharers := true;
800 }
801
802 action(so_setOwnerBit, "so", desc="We saw other sharers") {
803 assert(is_valid(tbe));
804 tbe.Sharers := true;
805 tbe.Owned := true;
806 }
807
808 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
809 peek(requestQueue_in, RequestMsg) {
810 enqueue(memQueue_out, MemoryMsg, latency="1") {
811 out_msg.Address := address;
812 out_msg.Type := MemoryRequestType:MEMORY_READ;
813 out_msg.Sender := machineID;
814 out_msg.OriginalRequestorMachId := in_msg.Requestor;
815 out_msg.MessageSize := in_msg.MessageSize;
816 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
817 DPRINTF(RubySlicc, "%s\n", out_msg);
818 }
819 }
820 }
821
822 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
823 peek(dmaRequestQueue_in, DMARequestMsg) {
824 enqueue(memQueue_out, MemoryMsg, latency="1") {
825 out_msg.Address := address;
826 out_msg.Type := MemoryRequestType:MEMORY_READ;
827 out_msg.Sender := machineID;
828 out_msg.OriginalRequestorMachId := in_msg.Requestor;
829 out_msg.MessageSize := in_msg.MessageSize;
830 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
831 DPRINTF(RubySlicc, "%s\n", out_msg);
832 }
833 }
834 }
835
836 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
837 assert(is_valid(tbe));
838 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
839 if (full_bit_dir_enabled) {
840 assert(is_valid(cache_entry));
841 peek(requestQueue_in, RequestMsg) {
842 fwd_set := cache_entry.Sharers;
843 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
844 if (fwd_set.count() > 0) {
845 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
846 out_msg.Address := address;
847 out_msg.Type := in_msg.Type;
848 out_msg.Requestor := in_msg.Requestor;
849 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
850 out_msg.MessageSize := MessageSizeType:Multicast_Control;
851 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
852 out_msg.ForwardRequestTime := get_time();
853 assert(tbe.SilentAcks > 0);
854 out_msg.SilentAcks := tbe.SilentAcks;
855 }
856 }
857 }
858 } else {
859 peek(requestQueue_in, RequestMsg) {
860 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
861 out_msg.Address := address;
862 out_msg.Type := in_msg.Type;
863 out_msg.Requestor := in_msg.Requestor;
864 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
865 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
866 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
867 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
868 out_msg.ForwardRequestTime := get_time();
869 }
870 }
871 }
872 }
873 }
874
875 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
876 if (machineCount(MachineType:L1Cache) > 1) {
877 if (full_bit_dir_enabled) {
878 assert(cache_entry.Sharers.count() > 0);
879 peek(requestQueue_in, RequestMsg) {
880 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
881 out_msg.Address := address;
882 out_msg.Type := CoherenceRequestType:INV;
883 out_msg.Requestor := machineID;
884 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
885 out_msg.MessageSize := MessageSizeType:Multicast_Control;
886 }
887 }
888 } else {
889 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
890 out_msg.Address := address;
891 out_msg.Type := CoherenceRequestType:INV;
892 out_msg.Requestor := machineID;
893 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
894 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
895 }
896 }
897 }
898 }
899
900 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
901 if (machineCount(MachineType:L1Cache) > 1) {
902 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
903 assert(is_valid(cache_entry));
904 out_msg.Address := address;
905 out_msg.Type := CoherenceRequestType:INV;
906 out_msg.Requestor := machineID;
907 out_msg.Destination.add(cache_entry.Owner);
908 out_msg.MessageSize := MessageSizeType:Request_Control;
909 out_msg.DirectedProbe := true;
910 }
911 }
912 }
913
914 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
915 if (machineCount(MachineType:L1Cache) > 1) {
916 peek(requestQueue_in, RequestMsg) {
917 if (full_bit_dir_enabled) {
918 fwd_set := cache_entry.Sharers;
919 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
920 if (fwd_set.count() > 0) {
921 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
922 out_msg.Address := address;
923 out_msg.Type := in_msg.Type;
924 out_msg.Requestor := in_msg.Requestor;
925 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
926 out_msg.MessageSize := MessageSizeType:Multicast_Control;
927 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
928 out_msg.ForwardRequestTime := get_time();
929 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
930 out_msg.SilentAcks := out_msg.SilentAcks - 1;
931 }
932 }
933 } else {
934 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
935 out_msg.Address := address;
936 out_msg.Type := in_msg.Type;
937 out_msg.Requestor := in_msg.Requestor;
938 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
939 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
940 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
941 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
942 out_msg.ForwardRequestTime := get_time();
943 }
944 }
945 }
946 }
947 }
948
949 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
950 assert(machineCount(MachineType:L1Cache) > 1);
951 //
952 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
953 // decouple the two.
954 //
955 peek(unblockNetwork_in, ResponseMsg) {
956 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
957 assert(is_valid(tbe));
958 out_msg.Address := address;
959 out_msg.Type := CoherenceRequestType:MERGED_GETS;
960 out_msg.MergedRequestors := tbe.GetSRequestors;
961 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
962 out_msg.Destination.add(in_msg.CurOwner);
963 } else {
964 out_msg.Destination.add(in_msg.Sender);
965 }
966 out_msg.MessageSize := MessageSizeType:Request_Control;
967 out_msg.InitialRequestTime := zero_time();
968 out_msg.ForwardRequestTime := get_time();
969 }
970 }
971 }
972
973 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
974 assert(machineCount(MachineType:L1Cache) > 1);
975 if (probe_filter_enabled || full_bit_dir_enabled) {
976 peek(requestQueue_in, RequestMsg) {
977 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
978 assert(is_valid(cache_entry));
979 out_msg.Address := address;
980 out_msg.Type := in_msg.Type;
981 out_msg.Requestor := in_msg.Requestor;
982 out_msg.Destination.add(cache_entry.Owner);
983 out_msg.MessageSize := MessageSizeType:Request_Control;
984 out_msg.DirectedProbe := true;
985 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
986 out_msg.ForwardRequestTime := get_time();
987 }
988 }
989 } else {
990 peek(requestQueue_in, RequestMsg) {
991 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
992 out_msg.Address := address;
993 out_msg.Type := in_msg.Type;
994 out_msg.Requestor := in_msg.Requestor;
995 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
996 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
997 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
998 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
999 out_msg.ForwardRequestTime := get_time();
1000 }
1001 }
1002 }
1003 }
1004
1005 action(nofc_forwardRequestConditionalOwner, "nofc", desc="Forward request to one or more nodes if the requestor is not the owner") {
1006 assert(machineCount(MachineType:L1Cache) > 1);
1007
1008 if (probe_filter_enabled || full_bit_dir_enabled) {
1009 peek(requestQueue_in, RequestMsg) {
1010 if (in_msg.Requestor != cache_entry.Owner) {
1011 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1012 assert(is_valid(cache_entry));
1013 out_msg.Address := address;
1014 out_msg.Type := in_msg.Type;
1015 out_msg.Requestor := in_msg.Requestor;
1016 out_msg.Destination.add(cache_entry.Owner);
1017 out_msg.MessageSize := MessageSizeType:Request_Control;
1018 out_msg.DirectedProbe := true;
1019 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1020 out_msg.ForwardRequestTime := get_time();
1021 }
1022 }
1023 }
1024 } else {
1025 peek(requestQueue_in, RequestMsg) {
1026 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1027 out_msg.Address := address;
1028 out_msg.Type := in_msg.Type;
1029 out_msg.Requestor := in_msg.Requestor;
1030 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1031 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1032 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1033 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1034 out_msg.ForwardRequestTime := get_time();
1035 }
1036 }
1037 }
1038
1039 }
1040
1041 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
1042 assert(is_valid(tbe));
1043 if (tbe.NumPendingMsgs > 0) {
1044 peek(dmaRequestQueue_in, DMARequestMsg) {
1045 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1046 out_msg.Address := address;
1047 out_msg.Type := CoherenceRequestType:GETX;
1048 //
1049 // Send to all L1 caches, since the requestor is the memory controller
1050 // itself
1051 //
1052 out_msg.Requestor := machineID;
1053 out_msg.Destination.broadcast(MachineType:L1Cache);
1054 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1055 }
1056 }
1057 }
1058 }
1059
1060 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
1061 assert(is_valid(tbe));
1062 if (tbe.NumPendingMsgs > 0) {
1063 peek(dmaRequestQueue_in, DMARequestMsg) {
1064 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1065 out_msg.Address := address;
1066 out_msg.Type := CoherenceRequestType:GETS;
1067 //
1068 // Send to all L1 caches, since the requestor is the memory controller
1069 // itself
1070 //
1071 out_msg.Requestor := machineID;
1072 out_msg.Destination.broadcast(MachineType:L1Cache);
1073 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1074 }
1075 }
1076 }
1077 }
1078
1079 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1080 requestQueue_in.dequeue();
1081 }
1082
1083 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1084 peek(unblockNetwork_in, ResponseMsg) {
1085 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1086 }
1087 unblockNetwork_in.dequeue();
1088 }
1089
1090 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1091 wakeUpBuffers(address);
1092 }
1093
1094 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1095 memQueue_in.dequeue();
1096 }
1097
1098 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1099 triggerQueue_in.dequeue();
1100 }
1101
1102 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1103 dmaRequestQueue_in.dequeue();
1104 }
1105
1106 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1107 peek(dmaRequestQueue_in, DMARequestMsg) {
1108 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1109 }
1110 stall_and_wait(dmaRequestQueue_in, address);
1111 }
1112
1113 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1114 peek(memQueue_in, MemoryMsg) {
1115 assert(is_valid(tbe));
1116 if (tbe.CacheDirty == false) {
1117 tbe.DataBlk := in_msg.DataBlk;
1118 }
1119 }
1120 }
1121
1122 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1123 peek(responseToDir_in, ResponseMsg) {
1124 assert(is_valid(tbe));
1125 tbe.CacheDirty := true;
1126 tbe.DataBlk := in_msg.DataBlk;
1127 }
1128 }
1129
1130 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1131 peek(responseToDir_in, ResponseMsg) {
1132 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1133 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1134 in_msg.Address, in_msg.DataBlk);
1135 }
1136 }
1137
1138 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1139 peek(unblockNetwork_in, ResponseMsg) {
1140 assert(in_msg.Dirty);
1141 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1142 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1143 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1144 in_msg.Address, in_msg.DataBlk);
1145 }
1146 }
1147
1148 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1149 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1150 assert(is_valid(tbe));
1151 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1152 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1153 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1154 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1155 }
1156
1157 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1158 assert(is_valid(tbe));
1159 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1160 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1161 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1162 }
1163
1164 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1165 assert(is_valid(tbe));
1166 assert(tbe.CacheDirty);
1167 }
1168
1169 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1170 if (probe_filter_enabled || full_bit_dir_enabled) {
1171 peek(requestQueue_in, RequestMsg) {
1172 assert(is_valid(cache_entry));
1173 assert(cache_entry.Owner != in_msg.Requestor);
1174 }
1175 }
1176 }
1177
1178 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1179 if (full_bit_dir_enabled) {
1180 peek(requestQueue_in, RequestMsg) {
1181 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1182 }
1183 }
1184 }
1185
1186 action(rs_removeSharer, "s", desc="remove current sharer") {
1187 if (full_bit_dir_enabled) {
1188 peek(unblockNetwork_in, ResponseMsg) {
1189 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1190 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1191 }
1192 }
1193 }
1194
1195 action(cs_clearSharers, "cs", desc="clear current sharers") {
1196 if (full_bit_dir_enabled) {
1197 peek(requestQueue_in, RequestMsg) {
1198 cache_entry.Sharers.clear();
1199 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1200 }
1201 }
1202 }
1203
1204 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1205 peek(unblockNetwork_in, ResponseMsg) {
1206 enqueue(memQueue_out, MemoryMsg, latency="1") {
1207 out_msg.Address := address;
1208 out_msg.Type := MemoryRequestType:MEMORY_WB;
1209 DPRINTF(RubySlicc, "%s\n", out_msg);
1210 }
1211 }
1212 }
1213
1214 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1215 enqueue(memQueue_out, MemoryMsg, latency="1") {
1216 assert(is_valid(tbe));
1217 out_msg.Address := address;
1218 out_msg.Type := MemoryRequestType:MEMORY_WB;
1219 // first, initialize the data blk to the current version of system memory
1220 out_msg.DataBlk := tbe.DataBlk;
1221 // then add the dma write data
1222 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1223 DPRINTF(RubySlicc, "%s\n", out_msg);
1224 }
1225 }
1226
1227 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1228 peek(unblockNetwork_in, ResponseMsg) {
1229 assert(in_msg.Dirty == false);
1230 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1231
1232 // NOTE: The following check would not be valid in a real
1233 // implementation. We include the data in the "dataless"
1234 // message so we can assert the clean data matches the datablock
1235 // in memory
1236 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1237 }
1238 }
1239
1240 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1241 peek(requestQueue_in, RequestMsg) {
1242 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1243 }
1244 stall_and_wait(requestQueue_in, address);
1245 }
1246
1247 // TRANSITIONS
1248
1249 // Transitions out of E state
1250 transition(E, GETX, NO_B_W) {
1251 pfa_probeFilterAllocate;
1252 v_allocateTBE;
1253 rx_recordExclusiveInTBE;
1254 saa_setAcksToAllIfPF;
1255 qf_queueMemoryFetchRequest;
1256 fn_forwardRequestIfNecessary;
1257 i_popIncomingRequestQueue;
1258 }
1259
1260 transition(E, GETF, NO_F_W) {
1261 pfa_probeFilterAllocate;
1262 v_allocateTBE;
1263 rx_recordExclusiveInTBE;
1264 saa_setAcksToAllIfPF;
1265 qf_queueMemoryFetchRequest;
1266 fn_forwardRequestIfNecessary;
1267 i_popIncomingRequestQueue;
1268 }
1269
1270 transition(E, GETS, NO_B_W) {
1271 pfa_probeFilterAllocate;
1272 v_allocateTBE;
1273 rx_recordExclusiveInTBE;
1274 saa_setAcksToAllIfPF;
1275 qf_queueMemoryFetchRequest;
1276 fn_forwardRequestIfNecessary;
1277 i_popIncomingRequestQueue;
1278 }
1279
1280 transition(E, DMA_READ, NO_DR_B_W) {
1281 vd_allocateDmaRequestInTBE;
1282 qd_queueMemoryRequestFromDmaRead;
1283 spa_setPendingAcksToZeroIfPF;
1284 f_forwardReadFromDma;
1285 p_popDmaRequestQueue;
1286 }
1287
1288 transition(E, DMA_WRITE, NO_DW_B_W) {
1289 vd_allocateDmaRequestInTBE;
1290 spa_setPendingAcksToZeroIfPF;
1291 sc_signalCompletionIfPF;
1292 f_forwardWriteFromDma;
1293 p_popDmaRequestQueue;
1294 }
1295
1296 // Transitions out of O state
1297 transition(O, GETX, NO_B_W) {
1298 r_setMRU;
1299 v_allocateTBE;
1300 r_recordDataInTBE;
1301 sa_setAcksToOne;
1302 qf_queueMemoryFetchRequest;
1303 fb_forwardRequestBcast;
1304 cs_clearSharers;
1305 i_popIncomingRequestQueue;
1306 }
1307
1308 transition(O, GETF, NO_F_W) {
1309 r_setMRU;
1310 v_allocateTBE;
1311 r_recordDataInTBE;
1312 sa_setAcksToOne;
1313 qf_queueMemoryFetchRequest;
1314 fb_forwardRequestBcast;
1315 cs_clearSharers;
1316 i_popIncomingRequestQueue;
1317 }
1318
1319 // This transition is dumb, if a shared copy exists on-chip, then that should
1320 // provide data, not slow off-chip dram. The problem is that the current
1321 // caches don't provide data in S state
1322 transition(O, GETS, O_B_W) {
1323 r_setMRU;
1324 v_allocateTBE;
1325 r_recordDataInTBE;
1326 saa_setAcksToAllIfPF;
1327 qf_queueMemoryFetchRequest;
1328 fn_forwardRequestIfNecessary;
1329 i_popIncomingRequestQueue;
1330 }
1331
1332 transition(O, DMA_READ, O_DR_B_W) {
1333 vd_allocateDmaRequestInTBE;
1334 spa_setPendingAcksToZeroIfPF;
1335 qd_queueMemoryRequestFromDmaRead;
1336 f_forwardReadFromDma;
1337 p_popDmaRequestQueue;
1338 }
1339
1340 transition(O, Pf_Replacement, O_R) {
1341 v_allocateTBE;
1342 pa_setPendingMsgsToAll;
1343 ia_invalidateAllRequest;
1344 pfd_probeFilterDeallocate;
1345 }
1346
1347 transition(S, Pf_Replacement, S_R) {
1348 v_allocateTBE;
1349 pa_setPendingMsgsToAll;
1350 ia_invalidateAllRequest;
1351 pfd_probeFilterDeallocate;
1352 }
1353
1354 transition(NO, Pf_Replacement, NO_R) {
1355 v_allocateTBE;
1356 po_setPendingMsgsToOne;
1357 io_invalidateOwnerRequest;
1358 pfd_probeFilterDeallocate;
1359 }
1360
1361 transition(NX, Pf_Replacement, NO_R) {
1362 v_allocateTBE;
1363 pa_setPendingMsgsToAll;
1364 ia_invalidateAllRequest;
1365 pfd_probeFilterDeallocate;
1366 }
1367
1368 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1369 vd_allocateDmaRequestInTBE;
1370 f_forwardWriteFromDma;
1371 p_popDmaRequestQueue;
1372 }
1373
1374 // Transitions out of NO state
1375 transition(NX, GETX, NO_B) {
1376 r_setMRU;
1377 fb_forwardRequestBcast;
1378 cs_clearSharers;
1379 i_popIncomingRequestQueue;
1380 }
1381
1382 transition(NX, GETF, NO_F) {
1383 r_setMRU;
1384 fb_forwardRequestBcast;
1385 cs_clearSharers;
1386 i_popIncomingRequestQueue;
1387 }
1388
1389 // Transitions out of NO state
1390 transition(NO, GETX, NO_B) {
1391 r_setMRU;
1392 ano_assertNotOwner;
1393 fc_forwardRequestConditionalOwner;
1394 cs_clearSharers;
1395 i_popIncomingRequestQueue;
1396 }
1397
1398 transition(NO, GETF, NO_F) {
1399 r_setMRU;
1400 //ano_assertNotOwner;
1401 nofc_forwardRequestConditionalOwner; //forward request if the requester is not the owner
1402 cs_clearSharers;
1403 oc_sendBlockAck; // send ack if the owner
1404 i_popIncomingRequestQueue;
1405 }
1406
1407 transition(S, GETX, NO_B) {
1408 r_setMRU;
1409 fb_forwardRequestBcast;
1410 cs_clearSharers;
1411 i_popIncomingRequestQueue;
1412 }
1413
1414 transition(S, GETF, NO_F) {
1415 r_setMRU;
1416 fb_forwardRequestBcast;
1417 cs_clearSharers;
1418 i_popIncomingRequestQueue;
1419 }
1420
1421 transition(S, GETS, NO_B) {
1422 r_setMRU;
1423 ano_assertNotOwner;
1424 fb_forwardRequestBcast;
1425 i_popIncomingRequestQueue;
1426 }
1427
1428 transition(NO, GETS, NO_B) {
1429 r_setMRU;
1430 ano_assertNotOwner;
1431 ans_assertNotSharer;
1432 fc_forwardRequestConditionalOwner;
1433 i_popIncomingRequestQueue;
1434 }
1435
1436 transition(NX, GETS, NO_B) {
1437 r_setMRU;
1438 ano_assertNotOwner;
1439 fc_forwardRequestConditionalOwner;
1440 i_popIncomingRequestQueue;
1441 }
1442
1443 transition({NO, NX, S}, PUT, WB) {
1444 //
1445 // note that the PUT requestor may not be the current owner if an invalidate
1446 // raced with PUT
1447 //
1448 a_sendWriteBackAck;
1449 i_popIncomingRequestQueue;
1450 }
1451
1452 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1453 vd_allocateDmaRequestInTBE;
1454 f_forwardReadFromDma;
1455 p_popDmaRequestQueue;
1456 }
1457
1458 // Nack PUT requests when races cause us to believe we own the data
1459 transition({O, E}, PUT) {
1460 b_sendWriteBackNack;
1461 i_popIncomingRequestQueue;
1462 }
1463
1464 // Blocked transient states
1465 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1466 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1467 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1468 {GETS, GETX, GETF, PUT, Pf_Replacement}) {
1469 z_stallAndWaitRequest;
1470 }
1471
1472 transition(NO_F, {GETS, GETX, GETF, PUT, Pf_Replacement}){
1473 z_stallAndWaitRequest;
1474 }
1475
1476 transition(NO_B, {GETX, GETF}, NO_B_X) {
1477 z_stallAndWaitRequest;
1478 }
1479
1480 transition(NO_B, {PUT, Pf_Replacement}) {
1481 z_stallAndWaitRequest;
1482 }
1483
1484 transition(NO_B_S, {GETX, GETF, PUT, Pf_Replacement}) {
1485 z_stallAndWaitRequest;
1486 }
1487
1488 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1489 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1490 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1491 {DMA_READ, DMA_WRITE}) {
1492 zd_stallAndWaitDMARequest;
1493 }
1494
1495 // merge GETS into one response
1496 transition(NO_B, GETS, NO_B_S) {
1497 v_allocateTBE;
1498 rs_recordGetSRequestor;
1499 i_popIncomingRequestQueue;
1500 }
1501
1502 transition(NO_B_S, GETS) {
1503 rs_recordGetSRequestor;
1504 i_popIncomingRequestQueue;
1505 }
1506
1507 // unblock responses
1508 transition({NO_B, NO_B_X}, UnblockS, NX) {
1509 us_updateSharerIfFBD;
1510 k_wakeUpDependents;
1511 j_popIncomingUnblockQueue;
1512 }
1513
1514 transition({NO_B, NO_B_X}, UnblockM, NO) {
1515 uo_updateOwnerIfPf;
1516 us_updateSharerIfFBD;
1517 k_wakeUpDependents;
1518 j_popIncomingUnblockQueue;
1519 }
1520
1521 transition(NO_B_S, UnblockS, NO_B_S_W) {
1522 us_updateSharerIfFBD;
1523 fr_forwardMergeReadRequestsToOwner;
1524 sp_setPendingMsgsToMergedSharers;
1525 j_popIncomingUnblockQueue;
1526 }
1527
1528 transition(NO_B_S, UnblockM, NO_B_S_W) {
1529 uo_updateOwnerIfPf;
1530 fr_forwardMergeReadRequestsToOwner;
1531 sp_setPendingMsgsToMergedSharers;
1532 j_popIncomingUnblockQueue;
1533 }
1534
1535 transition(NO_B_S_W, UnblockS) {
1536 us_updateSharerIfFBD;
1537 mu_decrementNumberOfUnblocks;
1538 os_checkForMergedGetSCompletion;
1539 j_popIncomingUnblockQueue;
1540 }
1541
1542 transition(NO_B_S_W, All_Unblocks, NX) {
1543 w_deallocateTBE;
1544 k_wakeUpDependents;
1545 g_popTriggerQueue;
1546 }
1547
1548 transition(O_B, UnblockS, O) {
1549 us_updateSharerIfFBD;
1550 k_wakeUpDependents;
1551 j_popIncomingUnblockQueue;
1552 }
1553
1554 transition(O_B, UnblockM, NO) {
1555 us_updateSharerIfFBD;
1556 uo_updateOwnerIfPf;
1557 k_wakeUpDependents;
1558 j_popIncomingUnblockQueue;
1559 }
1560
1561 transition(NO_B_W, Memory_Data, NO_B) {
1562 d_sendData;
1563 w_deallocateTBE;
1564 l_popMemQueue;
1565 }
1566
1567 transition(NO_F_W, Memory_Data, NO_F) {
1568 d_sendData;
1569 w_deallocateTBE;
1570 l_popMemQueue;
1571 }
1572
1573 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1574 r_recordMemoryData;
1575 o_checkForCompletion;
1576 l_popMemQueue;
1577 }
1578
1579 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1580 r_recordMemoryData;
1581 dr_sendDmaData;
1582 o_checkForCompletion;
1583 l_popMemQueue;
1584 }
1585
1586 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1587 m_decrementNumberOfMessages;
1588 o_checkForCompletion;
1589 n_popResponseQueue;
1590 }
1591
1592 transition({O_R, S_R, NO_R}, Ack) {
1593 m_decrementNumberOfMessages;
1594 o_checkForCompletion;
1595 n_popResponseQueue;
1596 }
1597
1598 transition(S_R, Data) {
1599 wr_writeResponseDataToMemory;
1600 m_decrementNumberOfMessages;
1601 o_checkForCompletion;
1602 n_popResponseQueue;
1603 }
1604
1605 transition(NO_R, {Data, Exclusive_Data}) {
1606 wr_writeResponseDataToMemory;
1607 m_decrementNumberOfMessages;
1608 o_checkForCompletion;
1609 n_popResponseQueue;
1610 }
1611
1612 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1613 w_deallocateTBE;
1614 k_wakeUpDependents;
1615 g_popTriggerQueue;
1616 }
1617
1618 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1619 m_decrementNumberOfMessages;
1620 n_popResponseQueue;
1621 }
1622
1623 transition(NO_DR_B_W, Shared_Ack) {
1624 m_decrementNumberOfMessages;
1625 r_setSharerBit;
1626 n_popResponseQueue;
1627 }
1628
1629 transition(O_DR_B, Shared_Ack) {
1630 m_decrementNumberOfMessages;
1631 r_setSharerBit;
1632 o_checkForCompletion;
1633 n_popResponseQueue;
1634 }
1635
1636 transition(O_DR_B_W, Shared_Ack) {
1637 m_decrementNumberOfMessages;
1638 r_setSharerBit;
1639 n_popResponseQueue;
1640 }
1641
1642 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1643 m_decrementNumberOfMessages;
1644 r_setSharerBit;
1645 o_checkForCompletion;
1646 n_popResponseQueue;
1647 }
1648
1649 transition(NO_DR_B_W, Shared_Data) {
1650 r_recordCacheData;
1651 m_decrementNumberOfMessages;
1652 so_setOwnerBit;
1653 o_checkForCompletion;
1654 n_popResponseQueue;
1655 }
1656
1657 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1658 r_recordCacheData;
1659 m_decrementNumberOfMessages;
1660 so_setOwnerBit;
1661 o_checkForCompletion;
1662 n_popResponseQueue;
1663 }
1664
1665 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1666 r_recordCacheData;
1667 m_decrementNumberOfMessages;
1668 n_popResponseQueue;
1669 }
1670
1671 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1672 r_recordCacheData;
1673 m_decrementNumberOfMessages;
1674 o_checkForCompletion;
1675 n_popResponseQueue;
1676 }
1677
1678 transition(NO_DR_B, All_acks_and_owner_data, O) {
1679 //
1680 // Note that the DMA consistency model allows us to send the DMA device
1681 // a response as soon as we receive valid data and prior to receiving
1682 // all acks. However, to simplify the protocol we wait for all acks.
1683 //
1684 dt_sendDmaDataFromTbe;
1685 wdt_writeDataFromTBE;
1686 w_deallocateTBE;
1687 k_wakeUpDependents;
1688 g_popTriggerQueue;
1689 }
1690
1691 transition(NO_DR_B, All_acks_and_shared_data, S) {
1692 //
1693 // Note that the DMA consistency model allows us to send the DMA device
1694 // a response as soon as we receive valid data and prior to receiving
1695 // all acks. However, to simplify the protocol we wait for all acks.
1696 //
1697 dt_sendDmaDataFromTbe;
1698 wdt_writeDataFromTBE;
1699 w_deallocateTBE;
1700 k_wakeUpDependents;
1701 g_popTriggerQueue;
1702 }
1703
1704 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1705 //
1706 // Note that the DMA consistency model allows us to send the DMA device
1707 // a response as soon as we receive valid data and prior to receiving
1708 // all acks. However, to simplify the protocol we wait for all acks.
1709 //
1710 dt_sendDmaDataFromTbe;
1711 wdt_writeDataFromTBE;
1712 w_deallocateTBE;
1713 k_wakeUpDependents;
1714 g_popTriggerQueue;
1715 }
1716
1717 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1718 //
1719 // Note that the DMA consistency model allows us to send the DMA device
1720 // a response as soon as we receive valid data and prior to receiving
1721 // all acks. However, to simplify the protocol we wait for all acks.
1722 //
1723 dt_sendDmaDataFromTbe;
1724 wdt_writeDataFromTBE;
1725 w_deallocateTBE;
1726 k_wakeUpDependents;
1727 g_popTriggerQueue;
1728 }
1729
1730 transition(O_DR_B, All_acks_and_owner_data, O) {
1731 wdt_writeDataFromTBE;
1732 w_deallocateTBE;
1733 k_wakeUpDependents;
1734 g_popTriggerQueue;
1735 }
1736
1737 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1738 wdt_writeDataFromTBE;
1739 w_deallocateTBE;
1740 pfd_probeFilterDeallocate;
1741 k_wakeUpDependents;
1742 g_popTriggerQueue;
1743 }
1744
1745 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1746 //
1747 // Note that the DMA consistency model allows us to send the DMA device
1748 // a response as soon as we receive valid data and prior to receiving
1749 // all acks. However, to simplify the protocol we wait for all acks.
1750 //
1751 dt_sendDmaDataFromTbe;
1752 wdt_writeDataFromTBE;
1753 w_deallocateTBE;
1754 ppfd_possibleProbeFilterDeallocate;
1755 k_wakeUpDependents;
1756 g_popTriggerQueue;
1757 }
1758
1759 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1760 a_assertCacheData;
1761 //
1762 // Note that the DMA consistency model allows us to send the DMA device
1763 // a response as soon as we receive valid data and prior to receiving
1764 // all acks. However, to simplify the protocol we wait for all acks.
1765 //
1766 dt_sendDmaDataFromTbe;
1767 wdt_writeDataFromTBE;
1768 w_deallocateTBE;
1769 ppfd_possibleProbeFilterDeallocate;
1770 k_wakeUpDependents;
1771 g_popTriggerQueue;
1772 }
1773
1774 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1775 dwt_writeDmaDataFromTBE;
1776 ld_queueMemoryDmaWrite;
1777 g_popTriggerQueue;
1778 }
1779
1780 transition(NO_DW_W, Memory_Ack, E) {
1781 da_sendDmaAck;
1782 w_deallocateTBE;
1783 ppfd_possibleProbeFilterDeallocate;
1784 k_wakeUpDependents;
1785 l_popMemQueue;
1786 }
1787
1788 transition(O_B_W, Memory_Data, O_B) {
1789 d_sendData;
1790 w_deallocateTBE;
1791 l_popMemQueue;
1792 }
1793
1794 transition(NO_B_W, UnblockM, NO_W) {
1795 uo_updateOwnerIfPf;
1796 j_popIncomingUnblockQueue;
1797 }
1798
1799 transition(NO_B_W, UnblockS, NO_W) {
1800 us_updateSharerIfFBD;
1801 j_popIncomingUnblockQueue;
1802 }
1803
1804 transition(O_B_W, UnblockS, O_W) {
1805 us_updateSharerIfFBD;
1806 j_popIncomingUnblockQueue;
1807 }
1808
1809 transition(NO_W, Memory_Data, NO) {
1810 w_deallocateTBE;
1811 k_wakeUpDependents;
1812 l_popMemQueue;
1813 }
1814
1815 transition(O_W, Memory_Data, O) {
1816 w_deallocateTBE;
1817 k_wakeUpDependents;
1818 l_popMemQueue;
1819 }
1820
1821 // WB State Transistions
1822 transition(WB, Writeback_Dirty, WB_O_W) {
1823 l_writeDataToMemory;
1824 rs_removeSharer;
1825 l_queueMemoryWBRequest;
1826 j_popIncomingUnblockQueue;
1827 }
1828
1829 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1830 l_writeDataToMemory;
1831 rs_removeSharer;
1832 l_queueMemoryWBRequest;
1833 j_popIncomingUnblockQueue;
1834 }
1835
1836 transition(WB_E_W, Memory_Ack, E) {
1837 pfd_probeFilterDeallocate;
1838 k_wakeUpDependents;
1839 l_popMemQueue;
1840 }
1841
1842 transition(WB_O_W, Memory_Ack, O) {
1843 k_wakeUpDependents;
1844 l_popMemQueue;
1845 }
1846
1847 transition(WB, Writeback_Clean, O) {
1848 ll_checkIncomingWriteback;
1849 rs_removeSharer;
1850 k_wakeUpDependents;
1851 j_popIncomingUnblockQueue;
1852 }
1853
1854 transition(WB, Writeback_Exclusive_Clean, E) {
1855 ll_checkIncomingWriteback;
1856 rs_removeSharer;
1857 pfd_probeFilterDeallocate;
1858 k_wakeUpDependents;
1859 j_popIncomingUnblockQueue;
1860 }
1861
1862 transition(WB, Unblock, NX) {
1863 auno_assertUnblockerNotOwner;
1864 k_wakeUpDependents;
1865 j_popIncomingUnblockQueue;
1866 }
1867
1868 transition(NO_F, PUTF, WB) {
1869 a_sendWriteBackAck;
1870 i_popIncomingRequestQueue;
1871 }
1872
1873 //possible race between GETF and UnblockM -- not sure needed any more?
1874 transition(NO_F, UnblockM) {
1875 us_updateSharerIfFBD;
1876 uo_updateOwnerIfPf;
1877 j_popIncomingUnblockQueue;
1878 }
1879 }