ruby: message buffers: significant changes
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory;
38 CacheMemory * probeFilter;
39 MemoryControl * memBuffer;
40 Cycles memory_controller_latency := 2;
41 bool probe_filter_enabled := "False";
42 bool full_bit_dir_enabled := "False";
43
44 MessageBuffer * forwardFromDir, network="To", virtual_network="3",
45 ordered="false", vnet_type="forward";
46
47 MessageBuffer * responseFromDir, network="To", virtual_network="4",
48 ordered="false", vnet_type="response";
49
50 // For a finite buffered network, note that the DMA response network only
51 // works at this relatively lower numbered (lower priority) virtual network
52 // because the trigger queue decouples cache responses from DMA responses.
53 MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
54 ordered="true", vnet_type="response";
55
56 MessageBuffer * unblockToDir, network="From", virtual_network="5",
57 ordered="false", vnet_type="unblock";
58
59 MessageBuffer * responseToDir, network="From", virtual_network="4",
60 ordered="false", vnet_type="response";
61
62 MessageBuffer * requestToDir, network="From", virtual_network="2",
63 ordered="false", vnet_type="request", recycle_latency="1";
64
65 MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
66 ordered="true", vnet_type="request";
67 {
68 // STATES
69 state_declaration(State, desc="Directory states", default="Directory_State_E") {
70 // Base states
71 NX, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in O at Owner";
72 NO, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
73 S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
74 O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
75 E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
76
77 O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
78 S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
79 NO_R, AccessPermission:Busy, desc="Was Not Owner or Sharer, replacing probe filter entry";
80
81 NO_B, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked";
82 NO_B_X, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
83 NO_B_S, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
84 NO_B_S_W, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
85 O_B, AccessPermission:Busy, "O^B", desc="Owner, Blocked";
86 NO_B_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
87 O_B_W, AccessPermission:Busy, desc="Owner, Blocked, waiting for Dram";
88 NO_W, AccessPermission:Busy, desc="Not Owner, waiting for Dram";
89 O_W, AccessPermission:Busy, desc="Owner, waiting for Dram";
90 NO_DW_B_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram and cache responses";
91 NO_DR_B_W, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for Dram and cache responses";
92 NO_DR_B_D, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
93 NO_DR_B, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses";
94 NO_DW_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram";
95 O_DR_B_W, AccessPermission:Busy, desc="Owner, Dma Read waiting for Dram and cache responses";
96 O_DR_B, AccessPermission:Busy, desc="Owner, Dma Read waiting for cache responses";
97 WB, AccessPermission:Busy, desc="Blocked on a writeback";
98 WB_O_W, AccessPermission:Busy, desc="Blocked on memory write, will go to O";
99 WB_E_W, AccessPermission:Busy, desc="Blocked on memory write, will go to E";
100
101 NO_F, AccessPermission:Busy, desc="Blocked on a flush";
102 NO_F_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
103 }
104
105 // Events
106 enumeration(Event, desc="Directory events") {
107 GETX, desc="A GETX arrives";
108 GETS, desc="A GETS arrives";
109 PUT, desc="A PUT arrives";
110 Unblock, desc="An unblock message arrives";
111 UnblockS, desc="An unblock message arrives";
112 UnblockM, desc="An unblock message arrives";
113 Writeback_Clean, desc="The final part of a PutX (no data)";
114 Writeback_Dirty, desc="The final part of a PutX (data)";
115 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
116 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
117
118 // Probe filter
119 Pf_Replacement, desc="probe filter replacement";
120
121 // DMA requests
122 DMA_READ, desc="A DMA Read memory request";
123 DMA_WRITE, desc="A DMA Write memory request";
124
125 // Memory Controller
126 Memory_Data, desc="Fetched data from memory arrives";
127 Memory_Ack, desc="Writeback Ack from memory arrives";
128
129 // Cache responses required to handle DMA
130 Ack, desc="Received an ack message";
131 Shared_Ack, desc="Received an ack message, responder has a shared copy";
132 Shared_Data, desc="Received a data message, responder has a shared copy";
133 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
134 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
135
136 // Triggers
137 All_acks_and_shared_data, desc="Received shared data and message acks";
138 All_acks_and_owner_data, desc="Received shared data and message acks";
139 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
140 All_Unblocks, desc="Received all unblocks for a merged gets request";
141 GETF, desc="A GETF arrives";
142 PUTF, desc="A PUTF arrives";
143 }
144
145 // TYPES
146
147 // DirectoryEntry
148 structure(Entry, desc="...", interface="AbstractEntry") {
149 State DirectoryState, desc="Directory state";
150 DataBlock DataBlk, desc="data for the block";
151 }
152
153 // ProbeFilterEntry
154 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
155 State PfState, desc="Directory state";
156 MachineID Owner, desc="Owner node";
157 DataBlock DataBlk, desc="data for the block";
158 Set Sharers, desc="sharing vector for full bit directory";
159 }
160
161 // TBE entries for DMA requests
162 structure(TBE, desc="TBE entries for outstanding DMA requests") {
163 Address PhysicalAddress, desc="physical address";
164 State TBEState, desc="Transient State";
165 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
166 int Acks, default="0", desc="The number of acks that the waiting response represents";
167 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
168 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
169 DataBlock DataBlk, desc="The current view of system memory";
170 int Len, desc="...";
171 MachineID DmaRequestor, desc="DMA requestor";
172 NetDest GetSRequestors, desc="GETS merged requestors";
173 int NumPendingMsgs, desc="Number of pending acks/messages";
174 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
175 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
176 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
177 }
178
179 structure(TBETable, external="yes") {
180 TBE lookup(Address);
181 void allocate(Address);
182 void deallocate(Address);
183 bool isPresent(Address);
184 }
185
186 void set_cache_entry(AbstractCacheEntry b);
187 void unset_cache_entry();
188 void set_tbe(TBE a);
189 void unset_tbe();
190 void wakeUpBuffers(Address a);
191 Cycles curCycle();
192
193 // ** OBJECTS **
194
195 Set fwd_set;
196
197 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
198
199 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
200 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
201
202 if (is_valid(dir_entry)) {
203 return dir_entry;
204 }
205
206 dir_entry := static_cast(Entry, "pointer",
207 directory.allocate(addr, new Entry));
208 return dir_entry;
209 }
210
211 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
212 Entry dir_entry := getDirectoryEntry(addr);
213 if(is_valid(dir_entry)) {
214 return dir_entry.DataBlk;
215 }
216
217 TBE tbe := TBEs[addr];
218 if(is_valid(tbe)) {
219 return tbe.DataBlk;
220 }
221
222 error("Data block missing!");
223 }
224
225 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
226 if (probe_filter_enabled || full_bit_dir_enabled) {
227 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
228 return pfEntry;
229 }
230 return OOD;
231 }
232
233 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
234 if (is_valid(tbe)) {
235 return tbe.TBEState;
236 } else {
237 if (probe_filter_enabled || full_bit_dir_enabled) {
238 if (is_valid(pf_entry)) {
239 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
240 }
241 }
242 return getDirectoryEntry(addr).DirectoryState;
243 }
244 }
245
246 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
247 if (is_valid(tbe)) {
248 tbe.TBEState := state;
249 }
250 if (probe_filter_enabled || full_bit_dir_enabled) {
251 if (is_valid(pf_entry)) {
252 pf_entry.PfState := state;
253 }
254 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
255 assert(is_valid(pf_entry));
256 }
257 if (state == State:E) {
258 assert(is_valid(pf_entry) == false);
259 }
260 }
261 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
262 state == State:O) {
263 assert(is_valid(tbe) == false);
264 }
265 getDirectoryEntry(addr).DirectoryState := state;
266 }
267
268 AccessPermission getAccessPermission(Address addr) {
269 TBE tbe := TBEs[addr];
270 if(is_valid(tbe)) {
271 return Directory_State_to_permission(tbe.TBEState);
272 }
273
274 if(directory.isPresent(addr)) {
275 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
276 }
277
278 return AccessPermission:NotPresent;
279 }
280
281 void setAccessPermission(PfEntry pf_entry, Address addr, State state) {
282 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
283 }
284
285 Event cache_request_to_event(CoherenceRequestType type) {
286 if (type == CoherenceRequestType:GETS) {
287 return Event:GETS;
288 } else if (type == CoherenceRequestType:GETX) {
289 return Event:GETX;
290 } else if (type == CoherenceRequestType:GETF) {
291 return Event:GETF;
292 } else {
293 error("Invalid CoherenceRequestType");
294 }
295 }
296
297 MessageBuffer triggerQueue, ordered="true";
298
299 // ** OUT_PORTS **
300 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
301 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
302 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
303 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
304 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
305
306 //
307 // Memory buffer for memory controller to DIMM communication
308 //
309 out_port(memQueue_out, MemoryMsg, memBuffer);
310
311 // ** IN_PORTS **
312
313 // Trigger Queue
314 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
315 if (triggerQueue_in.isReady()) {
316 peek(triggerQueue_in, TriggerMsg) {
317 PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
318 TBE tbe := TBEs[in_msg.Addr];
319 if (in_msg.Type == TriggerType:ALL_ACKS) {
320 trigger(Event:All_acks_and_owner_data, in_msg.Addr,
321 pf_entry, tbe);
322 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
323 trigger(Event:All_acks_and_shared_data, in_msg.Addr,
324 pf_entry, tbe);
325 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
326 trigger(Event:All_acks_and_data_no_sharers, in_msg.Addr,
327 pf_entry, tbe);
328 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
329 trigger(Event:All_Unblocks, in_msg.Addr,
330 pf_entry, tbe);
331 } else {
332 error("Unexpected message");
333 }
334 }
335 }
336 }
337
338 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
339 if (unblockNetwork_in.isReady()) {
340 peek(unblockNetwork_in, ResponseMsg) {
341 PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
342 TBE tbe := TBEs[in_msg.Addr];
343 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
344 trigger(Event:Unblock, in_msg.Addr, pf_entry, tbe);
345 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
346 trigger(Event:UnblockS, in_msg.Addr, pf_entry, tbe);
347 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
348 trigger(Event:UnblockM, in_msg.Addr, pf_entry, tbe);
349 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
350 trigger(Event:Writeback_Clean, in_msg.Addr, pf_entry, tbe);
351 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
352 trigger(Event:Writeback_Dirty, in_msg.Addr, pf_entry, tbe);
353 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
354 trigger(Event:Writeback_Exclusive_Clean, in_msg.Addr,
355 pf_entry, tbe);
356 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
357 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Addr,
358 pf_entry, tbe);
359 } else {
360 error("Invalid message");
361 }
362 }
363 }
364 }
365
366 // Response Network
367 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
368 if (responseToDir_in.isReady()) {
369 peek(responseToDir_in, ResponseMsg) {
370 PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
371 TBE tbe := TBEs[in_msg.Addr];
372 if (in_msg.Type == CoherenceResponseType:ACK) {
373 trigger(Event:Ack, in_msg.Addr, pf_entry, tbe);
374 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
375 trigger(Event:Shared_Ack, in_msg.Addr, pf_entry, tbe);
376 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
377 trigger(Event:Shared_Data, in_msg.Addr, pf_entry, tbe);
378 } else if (in_msg.Type == CoherenceResponseType:DATA) {
379 trigger(Event:Data, in_msg.Addr, pf_entry, tbe);
380 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
381 trigger(Event:Exclusive_Data, in_msg.Addr, pf_entry, tbe);
382 } else {
383 error("Unexpected message");
384 }
385 }
386 }
387 }
388
389 // off-chip memory request/response is done
390 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
391 if (memQueue_in.isReady()) {
392 peek(memQueue_in, MemoryMsg) {
393 PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
394 TBE tbe := TBEs[in_msg.Addr];
395 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
396 trigger(Event:Memory_Data, in_msg.Addr, pf_entry, tbe);
397 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
398 trigger(Event:Memory_Ack, in_msg.Addr, pf_entry, tbe);
399 } else {
400 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
401 error("Invalid message");
402 }
403 }
404 }
405 }
406
407 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
408 if (requestQueue_in.isReady()) {
409 peek(requestQueue_in, RequestMsg) {
410 PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
411 TBE tbe := TBEs[in_msg.Addr];
412 if (in_msg.Type == CoherenceRequestType:PUT) {
413 trigger(Event:PUT, in_msg.Addr, pf_entry, tbe);
414 } else if (in_msg.Type == CoherenceRequestType:PUTF) {
415 trigger(Event:PUTF, in_msg.Addr, pf_entry, tbe);
416 } else {
417 if (probe_filter_enabled || full_bit_dir_enabled) {
418 if (is_valid(pf_entry)) {
419 trigger(cache_request_to_event(in_msg.Type), in_msg.Addr,
420 pf_entry, tbe);
421 } else {
422 if (probeFilter.cacheAvail(in_msg.Addr)) {
423 trigger(cache_request_to_event(in_msg.Type), in_msg.Addr,
424 pf_entry, tbe);
425 } else {
426 trigger(Event:Pf_Replacement,
427 probeFilter.cacheProbe(in_msg.Addr),
428 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Addr)),
429 TBEs[probeFilter.cacheProbe(in_msg.Addr)]);
430 }
431 }
432 } else {
433 trigger(cache_request_to_event(in_msg.Type), in_msg.Addr,
434 pf_entry, tbe);
435 }
436 }
437 }
438 }
439 }
440
441 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
442 if (dmaRequestQueue_in.isReady()) {
443 peek(dmaRequestQueue_in, DMARequestMsg) {
444 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
445 TBE tbe := TBEs[in_msg.LineAddress];
446 if (in_msg.Type == DMARequestType:READ) {
447 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
448 } else if (in_msg.Type == DMARequestType:WRITE) {
449 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
450 } else {
451 error("Invalid message");
452 }
453 }
454 }
455 }
456
457 // Actions
458
459 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
460 if (probe_filter_enabled || full_bit_dir_enabled) {
461 assert(is_valid(cache_entry));
462 probeFilter.setMRU(address);
463 }
464 }
465
466 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
467 if (probe_filter_enabled || full_bit_dir_enabled) {
468 assert(is_valid(cache_entry));
469 peek(unblockNetwork_in, ResponseMsg) {
470 assert(cache_entry.Owner != in_msg.Sender);
471 if (full_bit_dir_enabled) {
472 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
473 }
474 }
475 }
476 }
477
478 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
479 if (probe_filter_enabled || full_bit_dir_enabled) {
480 assert(is_valid(cache_entry));
481 peek(unblockNetwork_in, ResponseMsg) {
482 cache_entry.Owner := in_msg.Sender;
483 if (full_bit_dir_enabled) {
484 cache_entry.Sharers.clear();
485 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
486 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
487 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
488 }
489 }
490 }
491 }
492
493 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
494 if (full_bit_dir_enabled) {
495 assert(probeFilter.isTagPresent(address));
496 peek(unblockNetwork_in, ResponseMsg) {
497 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
498 }
499 }
500 }
501
502 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
503 peek(requestQueue_in, RequestMsg) {
504 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
505 out_msg.Addr := address;
506 out_msg.Type := CoherenceRequestType:WB_ACK;
507 out_msg.Requestor := in_msg.Requestor;
508 out_msg.Destination.add(in_msg.Requestor);
509 out_msg.MessageSize := MessageSizeType:Writeback_Control;
510 }
511 }
512 }
513
514 action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
515 peek(requestQueue_in, RequestMsg) {
516 if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
517 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
518 out_msg.Addr := address;
519 out_msg.Type := CoherenceRequestType:BLOCK_ACK;
520 out_msg.Requestor := in_msg.Requestor;
521 out_msg.Destination.add(in_msg.Requestor);
522 out_msg.MessageSize := MessageSizeType:Writeback_Control;
523 }
524 }
525 }
526 }
527
528 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
529 peek(requestQueue_in, RequestMsg) {
530 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
531 out_msg.Addr := address;
532 out_msg.Type := CoherenceRequestType:WB_NACK;
533 out_msg.Requestor := in_msg.Requestor;
534 out_msg.Destination.add(in_msg.Requestor);
535 out_msg.MessageSize := MessageSizeType:Writeback_Control;
536 }
537 }
538 }
539
540 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
541 if (probe_filter_enabled || full_bit_dir_enabled) {
542 peek(requestQueue_in, RequestMsg) {
543 set_cache_entry(probeFilter.allocate(address, new PfEntry));
544 cache_entry.Owner := in_msg.Requestor;
545 cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
546 }
547 }
548 }
549
550 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
551 if (probe_filter_enabled || full_bit_dir_enabled) {
552 probeFilter.deallocate(address);
553 unset_cache_entry();
554 }
555 }
556
557 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
558 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
559 probeFilter.deallocate(address);
560 unset_cache_entry();
561 }
562 }
563
564 action(v_allocateTBE, "v", desc="Allocate TBE") {
565 check_allocate(TBEs);
566 peek(requestQueue_in, RequestMsg) {
567 TBEs.allocate(address);
568 set_tbe(TBEs[address]);
569 tbe.PhysicalAddress := address;
570 tbe.ResponseType := CoherenceResponseType:NULL;
571 }
572 }
573
574 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
575 check_allocate(TBEs);
576 peek(dmaRequestQueue_in, DMARequestMsg) {
577 TBEs.allocate(address);
578 set_tbe(TBEs[address]);
579 tbe.DmaDataBlk := in_msg.DataBlk;
580 tbe.PhysicalAddress := in_msg.PhysicalAddress;
581 tbe.Len := in_msg.Len;
582 tbe.DmaRequestor := in_msg.Requestor;
583 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
584 //
585 // One ack for each last-level cache
586 //
587 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
588 //
589 // Assume initially that the caches store a clean copy and that memory
590 // will provide the data
591 //
592 tbe.CacheDirty := false;
593 }
594 }
595
596 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
597 assert(is_valid(tbe));
598 if (full_bit_dir_enabled) {
599 assert(is_valid(cache_entry));
600 tbe.NumPendingMsgs := cache_entry.Sharers.count();
601 } else {
602 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
603 }
604 }
605
606 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
607 assert(is_valid(tbe));
608 tbe.NumPendingMsgs := 1;
609 }
610
611 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
612 TBEs.deallocate(address);
613 unset_tbe();
614 }
615
616 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
617 assert(is_valid(tbe));
618 peek(requestQueue_in, RequestMsg) {
619 if (full_bit_dir_enabled) {
620 assert(is_valid(cache_entry));
621 //
622 // If we are using the full-bit directory and no sharers exists beyond
623 // the requestor, then we must set the ack number to all, not one
624 //
625 fwd_set := cache_entry.Sharers;
626 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
627 if (fwd_set.count() > 0) {
628 tbe.Acks := 1;
629 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
630 tbe.SilentAcks := tbe.SilentAcks - 1;
631 } else {
632 tbe.Acks := machineCount(MachineType:L1Cache);
633 tbe.SilentAcks := 0;
634 }
635 } else {
636 tbe.Acks := 1;
637 }
638 }
639 }
640
641 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
642 assert(is_valid(tbe));
643 if (probe_filter_enabled || full_bit_dir_enabled) {
644 tbe.Acks := machineCount(MachineType:L1Cache);
645 tbe.SilentAcks := 0;
646 } else {
647 tbe.Acks := 1;
648 }
649 }
650
651 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
652 peek(responseToDir_in, ResponseMsg) {
653 assert(is_valid(tbe));
654 assert(in_msg.Acks > 0);
655 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
656 //
657 // Note that cache data responses will have an ack count of 2. However,
658 // directory DMA requests must wait for acks from all LLC caches, so
659 // only decrement by 1.
660 //
661 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
662 (in_msg.Type == CoherenceResponseType:DATA) ||
663 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
664 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
665 } else {
666 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
667 }
668 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
669 }
670 }
671
672 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
673 peek(unblockNetwork_in, ResponseMsg) {
674 assert(is_valid(tbe));
675 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
676 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
677 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
678 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
679 }
680 }
681
682 action(n_popResponseQueue, "n", desc="Pop response queue") {
683 responseToDir_in.dequeue();
684 }
685
686 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
687 assert(is_valid(tbe));
688 if (tbe.NumPendingMsgs == 0) {
689 enqueue(triggerQueue_out, TriggerMsg) {
690 out_msg.Addr := address;
691 if (tbe.Sharers) {
692 if (tbe.Owned) {
693 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
694 } else {
695 out_msg.Type := TriggerType:ALL_ACKS;
696 }
697 } else {
698 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
699 }
700 }
701 }
702 }
703
704 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
705 assert(is_valid(tbe));
706 if (tbe.NumPendingMsgs == 0) {
707 enqueue(triggerQueue_out, TriggerMsg) {
708 out_msg.Addr := address;
709 out_msg.Type := TriggerType:ALL_UNBLOCKS;
710 }
711 }
712 }
713
714 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
715 assert(is_valid(tbe));
716 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
717 }
718
719 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
720 if (probe_filter_enabled || full_bit_dir_enabled) {
721 assert(is_valid(tbe));
722 tbe.NumPendingMsgs := 0;
723 }
724 }
725
726 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
727 assert(is_valid(tbe));
728 if (tbe.NumPendingMsgs == 0) {
729 assert(probe_filter_enabled || full_bit_dir_enabled);
730 enqueue(triggerQueue_out, TriggerMsg) {
731 out_msg.Addr := address;
732 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
733 }
734 }
735 }
736
737 action(d_sendData, "d", desc="Send data to requestor") {
738 peek(memQueue_in, MemoryMsg) {
739 enqueue(responseNetwork_out, ResponseMsg, 1) {
740 assert(is_valid(tbe));
741 out_msg.Addr := address;
742 out_msg.Type := tbe.ResponseType;
743 out_msg.Sender := machineID;
744 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
745 out_msg.DataBlk := in_msg.DataBlk;
746 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
747 out_msg.Dirty := false; // By definition, the block is now clean
748 out_msg.Acks := tbe.Acks;
749 out_msg.SilentAcks := tbe.SilentAcks;
750 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
751 assert(out_msg.Acks > 0);
752 out_msg.MessageSize := MessageSizeType:Response_Data;
753 }
754 }
755 }
756
757 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
758 peek(memQueue_in, MemoryMsg) {
759 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
760 assert(is_valid(tbe));
761 out_msg.PhysicalAddress := address;
762 out_msg.LineAddress := address;
763 out_msg.Type := DMAResponseType:DATA;
764 //
765 // we send the entire data block and rely on the dma controller to
766 // split it up if need be
767 //
768 out_msg.DataBlk := in_msg.DataBlk;
769 out_msg.Destination.add(tbe.DmaRequestor);
770 out_msg.MessageSize := MessageSizeType:Response_Data;
771 }
772 }
773 }
774
775 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
776 peek(triggerQueue_in, TriggerMsg) {
777 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
778 assert(is_valid(tbe));
779 out_msg.PhysicalAddress := address;
780 out_msg.LineAddress := address;
781 out_msg.Type := DMAResponseType:DATA;
782 //
783 // we send the entire data block and rely on the dma controller to
784 // split it up if need be
785 //
786 out_msg.DataBlk := tbe.DataBlk;
787 out_msg.Destination.add(tbe.DmaRequestor);
788 out_msg.MessageSize := MessageSizeType:Response_Data;
789 }
790 }
791 }
792
793 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
794 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
795 assert(is_valid(tbe));
796 out_msg.PhysicalAddress := address;
797 out_msg.LineAddress := address;
798 out_msg.Type := DMAResponseType:ACK;
799 out_msg.Destination.add(tbe.DmaRequestor);
800 out_msg.MessageSize := MessageSizeType:Writeback_Control;
801 }
802 }
803
804 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
805 peek(requestQueue_in, RequestMsg) {
806 assert(is_valid(tbe));
807 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
808 }
809 }
810
811 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
812 peek(requestQueue_in, RequestMsg) {
813 assert(is_valid(tbe));
814 if (full_bit_dir_enabled) {
815 fwd_set := cache_entry.Sharers;
816 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
817 if (fwd_set.count() > 0) {
818 tbe.ResponseType := CoherenceResponseType:DATA;
819 } else {
820 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
821 }
822 } else {
823 tbe.ResponseType := CoherenceResponseType:DATA;
824 }
825 }
826 }
827
828 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
829 peek(requestQueue_in, RequestMsg) {
830 assert(is_valid(tbe));
831 tbe.GetSRequestors.add(in_msg.Requestor);
832 }
833 }
834
835 action(r_setSharerBit, "r", desc="We saw other sharers") {
836 assert(is_valid(tbe));
837 tbe.Sharers := true;
838 }
839
840 action(so_setOwnerBit, "so", desc="We saw other sharers") {
841 assert(is_valid(tbe));
842 tbe.Sharers := true;
843 tbe.Owned := true;
844 }
845
846 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
847 peek(requestQueue_in, RequestMsg) {
848 enqueue(memQueue_out, MemoryMsg, 1) {
849 out_msg.Addr := address;
850 out_msg.Type := MemoryRequestType:MEMORY_READ;
851 out_msg.Sender := machineID;
852 out_msg.OriginalRequestorMachId := in_msg.Requestor;
853 out_msg.MessageSize := in_msg.MessageSize;
854 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
855 DPRINTF(RubySlicc, "%s\n", out_msg);
856 }
857 }
858 }
859
860 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
861 peek(dmaRequestQueue_in, DMARequestMsg) {
862 enqueue(memQueue_out, MemoryMsg, 1) {
863 out_msg.Addr := address;
864 out_msg.Type := MemoryRequestType:MEMORY_READ;
865 out_msg.Sender := machineID;
866 out_msg.OriginalRequestorMachId := in_msg.Requestor;
867 out_msg.MessageSize := in_msg.MessageSize;
868 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
869 DPRINTF(RubySlicc, "%s\n", out_msg);
870 }
871 }
872 }
873
874 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
875 assert(is_valid(tbe));
876 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
877 if (full_bit_dir_enabled) {
878 assert(is_valid(cache_entry));
879 peek(requestQueue_in, RequestMsg) {
880 fwd_set := cache_entry.Sharers;
881 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
882 if (fwd_set.count() > 0) {
883 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
884 out_msg.Addr := address;
885 out_msg.Type := in_msg.Type;
886 out_msg.Requestor := in_msg.Requestor;
887 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
888 out_msg.MessageSize := MessageSizeType:Multicast_Control;
889 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
890 out_msg.ForwardRequestTime := curCycle();
891 assert(tbe.SilentAcks > 0);
892 out_msg.SilentAcks := tbe.SilentAcks;
893 }
894 }
895 }
896 } else {
897 peek(requestQueue_in, RequestMsg) {
898 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
899 out_msg.Addr := address;
900 out_msg.Type := in_msg.Type;
901 out_msg.Requestor := in_msg.Requestor;
902 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
903 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
904 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
905 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
906 out_msg.ForwardRequestTime := curCycle();
907 }
908 }
909 }
910 }
911 }
912
913 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
914 if (machineCount(MachineType:L1Cache) > 1) {
915 if (full_bit_dir_enabled) {
916 assert(cache_entry.Sharers.count() > 0);
917 peek(requestQueue_in, RequestMsg) {
918 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
919 out_msg.Addr := address;
920 out_msg.Type := CoherenceRequestType:INV;
921 out_msg.Requestor := machineID;
922 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
923 out_msg.MessageSize := MessageSizeType:Multicast_Control;
924 }
925 }
926 } else {
927 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
928 out_msg.Addr := address;
929 out_msg.Type := CoherenceRequestType:INV;
930 out_msg.Requestor := machineID;
931 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
932 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
933 }
934 }
935 }
936 }
937
938 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
939 if (machineCount(MachineType:L1Cache) > 1) {
940 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
941 assert(is_valid(cache_entry));
942 out_msg.Addr := address;
943 out_msg.Type := CoherenceRequestType:INV;
944 out_msg.Requestor := machineID;
945 out_msg.Destination.add(cache_entry.Owner);
946 out_msg.MessageSize := MessageSizeType:Request_Control;
947 out_msg.DirectedProbe := true;
948 }
949 }
950 }
951
952 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
953 if (machineCount(MachineType:L1Cache) > 1) {
954 peek(requestQueue_in, RequestMsg) {
955 if (full_bit_dir_enabled) {
956 fwd_set := cache_entry.Sharers;
957 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
958 if (fwd_set.count() > 0) {
959 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
960 out_msg.Addr := address;
961 out_msg.Type := in_msg.Type;
962 out_msg.Requestor := in_msg.Requestor;
963 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
964 out_msg.MessageSize := MessageSizeType:Multicast_Control;
965 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
966 out_msg.ForwardRequestTime := curCycle();
967 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
968 out_msg.SilentAcks := out_msg.SilentAcks - 1;
969 }
970 }
971 } else {
972 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
973 out_msg.Addr := address;
974 out_msg.Type := in_msg.Type;
975 out_msg.Requestor := in_msg.Requestor;
976 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
977 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
978 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
979 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
980 out_msg.ForwardRequestTime := curCycle();
981 }
982 }
983 }
984 } else {
985 peek(requestQueue_in, RequestMsg) {
986 enqueue(responseNetwork_out, ResponseMsg, 1) {
987 out_msg.Addr := address;
988 out_msg.Type := CoherenceResponseType:ACK;
989 out_msg.Sender := machineID;
990 out_msg.Destination.add(in_msg.Requestor);
991 out_msg.Dirty := false; // By definition, the block is now clean
992 out_msg.Acks := 0;
993 out_msg.SilentAcks := 0;
994 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
995 out_msg.MessageSize := MessageSizeType:Response_Control;
996 }
997 }
998 }
999 }
1000
1001 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
1002 assert(machineCount(MachineType:L1Cache) > 1);
1003 //
1004 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
1005 // decouple the two.
1006 //
1007 peek(unblockNetwork_in, ResponseMsg) {
1008 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
1009 assert(is_valid(tbe));
1010 out_msg.Addr := address;
1011 out_msg.Type := CoherenceRequestType:MERGED_GETS;
1012 out_msg.MergedRequestors := tbe.GetSRequestors;
1013 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
1014 out_msg.Destination.add(in_msg.CurOwner);
1015 } else {
1016 out_msg.Destination.add(in_msg.Sender);
1017 }
1018 out_msg.MessageSize := MessageSizeType:Request_Control;
1019 out_msg.InitialRequestTime := zero_time();
1020 out_msg.ForwardRequestTime := curCycle();
1021 }
1022 }
1023 }
1024
1025 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
1026 assert(machineCount(MachineType:L1Cache) > 1);
1027 if (probe_filter_enabled || full_bit_dir_enabled) {
1028 peek(requestQueue_in, RequestMsg) {
1029 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
1030 assert(is_valid(cache_entry));
1031 out_msg.Addr := address;
1032 out_msg.Type := in_msg.Type;
1033 out_msg.Requestor := in_msg.Requestor;
1034 out_msg.Destination.add(cache_entry.Owner);
1035 out_msg.MessageSize := MessageSizeType:Request_Control;
1036 out_msg.DirectedProbe := true;
1037 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1038 out_msg.ForwardRequestTime := curCycle();
1039 }
1040 }
1041 } else {
1042 peek(requestQueue_in, RequestMsg) {
1043 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
1044 out_msg.Addr := address;
1045 out_msg.Type := in_msg.Type;
1046 out_msg.Requestor := in_msg.Requestor;
1047 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1048 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1049 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1050 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1051 out_msg.ForwardRequestTime := curCycle();
1052 }
1053 }
1054 }
1055 }
1056
1057 action(nofc_forwardRequestConditionalOwner, "nofc", desc="Forward request to one or more nodes if the requestor is not the owner") {
1058 if (machineCount(MachineType:L1Cache) > 1) {
1059
1060 if (probe_filter_enabled || full_bit_dir_enabled) {
1061 peek(requestQueue_in, RequestMsg) {
1062 if (in_msg.Requestor != cache_entry.Owner) {
1063 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
1064 assert(is_valid(cache_entry));
1065 out_msg.Addr := address;
1066 out_msg.Type := in_msg.Type;
1067 out_msg.Requestor := in_msg.Requestor;
1068 out_msg.Destination.add(cache_entry.Owner);
1069 out_msg.MessageSize := MessageSizeType:Request_Control;
1070 out_msg.DirectedProbe := true;
1071 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1072 out_msg.ForwardRequestTime := curCycle();
1073 }
1074 }
1075 }
1076 } else {
1077 peek(requestQueue_in, RequestMsg) {
1078 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
1079 out_msg.Addr := address;
1080 out_msg.Type := in_msg.Type;
1081 out_msg.Requestor := in_msg.Requestor;
1082 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1083 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1084 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1085 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1086 out_msg.ForwardRequestTime := curCycle();
1087 }
1088 }
1089 }
1090 }
1091 }
1092
1093 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
1094 assert(is_valid(tbe));
1095 if (tbe.NumPendingMsgs > 0) {
1096 peek(dmaRequestQueue_in, DMARequestMsg) {
1097 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
1098 out_msg.Addr := address;
1099 out_msg.Type := CoherenceRequestType:GETX;
1100 //
1101 // Send to all L1 caches, since the requestor is the memory controller
1102 // itself
1103 //
1104 out_msg.Requestor := machineID;
1105 out_msg.Destination.broadcast(MachineType:L1Cache);
1106 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1107 }
1108 }
1109 }
1110 }
1111
1112 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
1113 assert(is_valid(tbe));
1114 if (tbe.NumPendingMsgs > 0) {
1115 peek(dmaRequestQueue_in, DMARequestMsg) {
1116 enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
1117 out_msg.Addr := address;
1118 out_msg.Type := CoherenceRequestType:GETS;
1119 //
1120 // Send to all L1 caches, since the requestor is the memory controller
1121 // itself
1122 //
1123 out_msg.Requestor := machineID;
1124 out_msg.Destination.broadcast(MachineType:L1Cache);
1125 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1126 }
1127 }
1128 }
1129 }
1130
1131 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1132 requestQueue_in.dequeue();
1133 }
1134
1135 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1136 peek(unblockNetwork_in, ResponseMsg) {
1137 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1138 }
1139 unblockNetwork_in.dequeue();
1140 }
1141
1142 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1143 wakeUpBuffers(address);
1144 }
1145
1146 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1147 memQueue_in.dequeue();
1148 }
1149
1150 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1151 triggerQueue_in.dequeue();
1152 }
1153
1154 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1155 dmaRequestQueue_in.dequeue();
1156 }
1157
1158 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1159 peek(dmaRequestQueue_in, DMARequestMsg) {
1160 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1161 }
1162 stall_and_wait(dmaRequestQueue_in, address);
1163 }
1164
1165 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1166 peek(memQueue_in, MemoryMsg) {
1167 assert(is_valid(tbe));
1168 if (tbe.CacheDirty == false) {
1169 tbe.DataBlk := in_msg.DataBlk;
1170 }
1171 }
1172 }
1173
1174 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1175 peek(responseToDir_in, ResponseMsg) {
1176 assert(is_valid(tbe));
1177 tbe.CacheDirty := true;
1178 tbe.DataBlk := in_msg.DataBlk;
1179 }
1180 }
1181
1182 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1183 peek(responseToDir_in, ResponseMsg) {
1184 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1185 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1186 in_msg.Addr, in_msg.DataBlk);
1187 }
1188 }
1189
1190 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1191 peek(memQueue_in, MemoryMsg) {
1192 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1193 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1194 in_msg.Addr, in_msg.DataBlk);
1195 }
1196 }
1197
1198 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1199 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1200 assert(is_valid(tbe));
1201 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1202 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1203 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1204 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1205 }
1206
1207 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1208 assert(is_valid(tbe));
1209 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1210 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1211 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1212 }
1213
1214 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1215 assert(is_valid(tbe));
1216 assert(tbe.CacheDirty);
1217 }
1218
1219 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1220 if (probe_filter_enabled || full_bit_dir_enabled) {
1221 peek(requestQueue_in, RequestMsg) {
1222 assert(is_valid(cache_entry));
1223 assert(cache_entry.Owner != in_msg.Requestor);
1224 }
1225 }
1226 }
1227
1228 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1229 if (full_bit_dir_enabled) {
1230 peek(requestQueue_in, RequestMsg) {
1231 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1232 }
1233 }
1234 }
1235
1236 action(rs_removeSharer, "s", desc="remove current sharer") {
1237 if (full_bit_dir_enabled) {
1238 peek(unblockNetwork_in, ResponseMsg) {
1239 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1240 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1241 }
1242 }
1243 }
1244
1245 action(cs_clearSharers, "cs", desc="clear current sharers") {
1246 if (full_bit_dir_enabled) {
1247 peek(requestQueue_in, RequestMsg) {
1248 cache_entry.Sharers.clear();
1249 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1250 }
1251 }
1252 }
1253
1254 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1255 peek(unblockNetwork_in, ResponseMsg) {
1256 enqueue(memQueue_out, MemoryMsg, 1) {
1257 assert(in_msg.Dirty);
1258 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1259 out_msg.Addr := address;
1260 out_msg.Type := MemoryRequestType:MEMORY_WB;
1261 out_msg.DataBlk := in_msg.DataBlk;
1262 DPRINTF(RubySlicc, "%s\n", out_msg);
1263 }
1264 }
1265 }
1266
1267 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1268 enqueue(memQueue_out, MemoryMsg, 1) {
1269 assert(is_valid(tbe));
1270 out_msg.Addr := address;
1271 out_msg.Type := MemoryRequestType:MEMORY_WB;
1272 // first, initialize the data blk to the current version of system memory
1273 out_msg.DataBlk := tbe.DataBlk;
1274 // then add the dma write data
1275 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1276 DPRINTF(RubySlicc, "%s\n", out_msg);
1277 }
1278 }
1279
1280 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1281 peek(unblockNetwork_in, ResponseMsg) {
1282 assert(in_msg.Dirty == false);
1283 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1284 DPRINTF(RubySlicc, "%s\n", in_msg.DataBlk);
1285 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1286
1287 // NOTE: The following check would not be valid in a real
1288 // implementation. We include the data in the "dataless"
1289 // message so we can assert the clean data matches the datablock
1290 // in memory
1291 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1292 }
1293 }
1294
1295 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1296 peek(requestQueue_in, RequestMsg) {
1297 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1298 }
1299 stall_and_wait(requestQueue_in, address);
1300 }
1301
1302 // TRANSITIONS
1303
1304 // Transitions out of E state
1305 transition(E, GETX, NO_B_W) {
1306 pfa_probeFilterAllocate;
1307 v_allocateTBE;
1308 rx_recordExclusiveInTBE;
1309 saa_setAcksToAllIfPF;
1310 qf_queueMemoryFetchRequest;
1311 fn_forwardRequestIfNecessary;
1312 i_popIncomingRequestQueue;
1313 }
1314
1315 transition(E, GETF, NO_F_W) {
1316 pfa_probeFilterAllocate;
1317 v_allocateTBE;
1318 rx_recordExclusiveInTBE;
1319 saa_setAcksToAllIfPF;
1320 qf_queueMemoryFetchRequest;
1321 fn_forwardRequestIfNecessary;
1322 i_popIncomingRequestQueue;
1323 }
1324
1325 transition(E, GETS, NO_B_W) {
1326 pfa_probeFilterAllocate;
1327 v_allocateTBE;
1328 rx_recordExclusiveInTBE;
1329 saa_setAcksToAllIfPF;
1330 qf_queueMemoryFetchRequest;
1331 fn_forwardRequestIfNecessary;
1332 i_popIncomingRequestQueue;
1333 }
1334
1335 transition(E, DMA_READ, NO_DR_B_W) {
1336 vd_allocateDmaRequestInTBE;
1337 qd_queueMemoryRequestFromDmaRead;
1338 spa_setPendingAcksToZeroIfPF;
1339 f_forwardReadFromDma;
1340 p_popDmaRequestQueue;
1341 }
1342
1343 transition(E, DMA_WRITE, NO_DW_B_W) {
1344 vd_allocateDmaRequestInTBE;
1345 spa_setPendingAcksToZeroIfPF;
1346 sc_signalCompletionIfPF;
1347 f_forwardWriteFromDma;
1348 p_popDmaRequestQueue;
1349 }
1350
1351 // Transitions out of O state
1352 transition(O, GETX, NO_B_W) {
1353 r_setMRU;
1354 v_allocateTBE;
1355 r_recordDataInTBE;
1356 sa_setAcksToOne;
1357 qf_queueMemoryFetchRequest;
1358 fb_forwardRequestBcast;
1359 cs_clearSharers;
1360 i_popIncomingRequestQueue;
1361 }
1362
1363 transition(O, GETF, NO_F_W) {
1364 r_setMRU;
1365 v_allocateTBE;
1366 r_recordDataInTBE;
1367 sa_setAcksToOne;
1368 qf_queueMemoryFetchRequest;
1369 fb_forwardRequestBcast;
1370 cs_clearSharers;
1371 i_popIncomingRequestQueue;
1372 }
1373
1374 // This transition is dumb, if a shared copy exists on-chip, then that should
1375 // provide data, not slow off-chip dram. The problem is that the current
1376 // caches don't provide data in S state
1377 transition(O, GETS, O_B_W) {
1378 r_setMRU;
1379 v_allocateTBE;
1380 r_recordDataInTBE;
1381 saa_setAcksToAllIfPF;
1382 qf_queueMemoryFetchRequest;
1383 fn_forwardRequestIfNecessary;
1384 i_popIncomingRequestQueue;
1385 }
1386
1387 transition(O, DMA_READ, O_DR_B_W) {
1388 vd_allocateDmaRequestInTBE;
1389 spa_setPendingAcksToZeroIfPF;
1390 qd_queueMemoryRequestFromDmaRead;
1391 f_forwardReadFromDma;
1392 p_popDmaRequestQueue;
1393 }
1394
1395 transition(O, Pf_Replacement, O_R) {
1396 v_allocateTBE;
1397 pa_setPendingMsgsToAll;
1398 ia_invalidateAllRequest;
1399 pfd_probeFilterDeallocate;
1400 }
1401
1402 transition(S, Pf_Replacement, S_R) {
1403 v_allocateTBE;
1404 pa_setPendingMsgsToAll;
1405 ia_invalidateAllRequest;
1406 pfd_probeFilterDeallocate;
1407 }
1408
1409 transition(NO, Pf_Replacement, NO_R) {
1410 v_allocateTBE;
1411 po_setPendingMsgsToOne;
1412 io_invalidateOwnerRequest;
1413 pfd_probeFilterDeallocate;
1414 }
1415
1416 transition(NX, Pf_Replacement, NO_R) {
1417 v_allocateTBE;
1418 pa_setPendingMsgsToAll;
1419 ia_invalidateAllRequest;
1420 pfd_probeFilterDeallocate;
1421 }
1422
1423 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1424 vd_allocateDmaRequestInTBE;
1425 f_forwardWriteFromDma;
1426 p_popDmaRequestQueue;
1427 }
1428
1429 // Transitions out of NO state
1430 transition(NX, GETX, NO_B) {
1431 r_setMRU;
1432 fb_forwardRequestBcast;
1433 cs_clearSharers;
1434 i_popIncomingRequestQueue;
1435 }
1436
1437 transition(NX, GETF, NO_F) {
1438 r_setMRU;
1439 fb_forwardRequestBcast;
1440 cs_clearSharers;
1441 i_popIncomingRequestQueue;
1442 }
1443
1444 // Transitions out of NO state
1445 transition(NO, GETX, NO_B) {
1446 r_setMRU;
1447 ano_assertNotOwner;
1448 fc_forwardRequestConditionalOwner;
1449 cs_clearSharers;
1450 i_popIncomingRequestQueue;
1451 }
1452
1453 transition(NO, GETF, NO_F) {
1454 r_setMRU;
1455 //ano_assertNotOwner;
1456 nofc_forwardRequestConditionalOwner; //forward request if the requester is not the owner
1457 cs_clearSharers;
1458 oc_sendBlockAck; // send ack if the owner
1459 i_popIncomingRequestQueue;
1460 }
1461
1462 transition(S, GETX, NO_B) {
1463 r_setMRU;
1464 fb_forwardRequestBcast;
1465 cs_clearSharers;
1466 i_popIncomingRequestQueue;
1467 }
1468
1469 transition(S, GETF, NO_F) {
1470 r_setMRU;
1471 fb_forwardRequestBcast;
1472 cs_clearSharers;
1473 i_popIncomingRequestQueue;
1474 }
1475
1476 transition(S, GETS, NO_B) {
1477 r_setMRU;
1478 ano_assertNotOwner;
1479 fb_forwardRequestBcast;
1480 i_popIncomingRequestQueue;
1481 }
1482
1483 transition(NO, GETS, NO_B) {
1484 r_setMRU;
1485 ano_assertNotOwner;
1486 ans_assertNotSharer;
1487 fc_forwardRequestConditionalOwner;
1488 i_popIncomingRequestQueue;
1489 }
1490
1491 transition(NX, GETS, NO_B) {
1492 r_setMRU;
1493 ano_assertNotOwner;
1494 fc_forwardRequestConditionalOwner;
1495 i_popIncomingRequestQueue;
1496 }
1497
1498 transition({NO, NX, S}, PUT, WB) {
1499 //
1500 // note that the PUT requestor may not be the current owner if an invalidate
1501 // raced with PUT
1502 //
1503 a_sendWriteBackAck;
1504 i_popIncomingRequestQueue;
1505 }
1506
1507 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1508 vd_allocateDmaRequestInTBE;
1509 f_forwardReadFromDma;
1510 p_popDmaRequestQueue;
1511 }
1512
1513 // Nack PUT requests when races cause us to believe we own the data
1514 transition({O, E}, PUT) {
1515 b_sendWriteBackNack;
1516 i_popIncomingRequestQueue;
1517 }
1518
1519 // Blocked transient states
1520 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1521 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1522 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1523 {GETS, GETX, GETF, PUT, Pf_Replacement}) {
1524 z_stallAndWaitRequest;
1525 }
1526
1527 transition(NO_F, {GETS, GETX, GETF, PUT, Pf_Replacement}){
1528 z_stallAndWaitRequest;
1529 }
1530
1531 transition(NO_B, {GETX, GETF}, NO_B_X) {
1532 z_stallAndWaitRequest;
1533 }
1534
1535 transition(NO_B, {PUT, Pf_Replacement}) {
1536 z_stallAndWaitRequest;
1537 }
1538
1539 transition(NO_B_S, {GETX, GETF, PUT, Pf_Replacement}) {
1540 z_stallAndWaitRequest;
1541 }
1542
1543 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1544 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1545 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1546 {DMA_READ, DMA_WRITE}) {
1547 zd_stallAndWaitDMARequest;
1548 }
1549
1550 // merge GETS into one response
1551 transition(NO_B, GETS, NO_B_S) {
1552 v_allocateTBE;
1553 rs_recordGetSRequestor;
1554 i_popIncomingRequestQueue;
1555 }
1556
1557 transition(NO_B_S, GETS) {
1558 rs_recordGetSRequestor;
1559 i_popIncomingRequestQueue;
1560 }
1561
1562 // unblock responses
1563 transition({NO_B, NO_B_X}, UnblockS, NX) {
1564 us_updateSharerIfFBD;
1565 k_wakeUpDependents;
1566 j_popIncomingUnblockQueue;
1567 }
1568
1569 transition({NO_B, NO_B_X}, UnblockM, NO) {
1570 uo_updateOwnerIfPf;
1571 us_updateSharerIfFBD;
1572 k_wakeUpDependents;
1573 j_popIncomingUnblockQueue;
1574 }
1575
1576 transition(NO_B_S, UnblockS, NO_B_S_W) {
1577 us_updateSharerIfFBD;
1578 fr_forwardMergeReadRequestsToOwner;
1579 sp_setPendingMsgsToMergedSharers;
1580 j_popIncomingUnblockQueue;
1581 }
1582
1583 transition(NO_B_S, UnblockM, NO_B_S_W) {
1584 uo_updateOwnerIfPf;
1585 fr_forwardMergeReadRequestsToOwner;
1586 sp_setPendingMsgsToMergedSharers;
1587 j_popIncomingUnblockQueue;
1588 }
1589
1590 transition(NO_B_S_W, UnblockS) {
1591 us_updateSharerIfFBD;
1592 mu_decrementNumberOfUnblocks;
1593 os_checkForMergedGetSCompletion;
1594 j_popIncomingUnblockQueue;
1595 }
1596
1597 transition(NO_B_S_W, All_Unblocks, NX) {
1598 w_deallocateTBE;
1599 k_wakeUpDependents;
1600 g_popTriggerQueue;
1601 }
1602
1603 transition(O_B, UnblockS, O) {
1604 us_updateSharerIfFBD;
1605 k_wakeUpDependents;
1606 j_popIncomingUnblockQueue;
1607 }
1608
1609 transition(O_B, UnblockM, NO) {
1610 us_updateSharerIfFBD;
1611 uo_updateOwnerIfPf;
1612 k_wakeUpDependents;
1613 j_popIncomingUnblockQueue;
1614 }
1615
1616 transition(NO_B_W, Memory_Data, NO_B) {
1617 d_sendData;
1618 w_deallocateTBE;
1619 l_popMemQueue;
1620 }
1621
1622 transition(NO_F_W, Memory_Data, NO_F) {
1623 d_sendData;
1624 w_deallocateTBE;
1625 l_popMemQueue;
1626 }
1627
1628 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1629 r_recordMemoryData;
1630 o_checkForCompletion;
1631 l_popMemQueue;
1632 }
1633
1634 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1635 r_recordMemoryData;
1636 dr_sendDmaData;
1637 o_checkForCompletion;
1638 l_popMemQueue;
1639 }
1640
1641 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1642 m_decrementNumberOfMessages;
1643 o_checkForCompletion;
1644 n_popResponseQueue;
1645 }
1646
1647 transition({O_R, S_R, NO_R}, Ack) {
1648 m_decrementNumberOfMessages;
1649 o_checkForCompletion;
1650 n_popResponseQueue;
1651 }
1652
1653 transition(S_R, Data) {
1654 wr_writeResponseDataToMemory;
1655 m_decrementNumberOfMessages;
1656 o_checkForCompletion;
1657 n_popResponseQueue;
1658 }
1659
1660 transition(NO_R, {Data, Exclusive_Data}) {
1661 wr_writeResponseDataToMemory;
1662 m_decrementNumberOfMessages;
1663 o_checkForCompletion;
1664 n_popResponseQueue;
1665 }
1666
1667 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1668 w_deallocateTBE;
1669 k_wakeUpDependents;
1670 g_popTriggerQueue;
1671 }
1672
1673 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1674 m_decrementNumberOfMessages;
1675 n_popResponseQueue;
1676 }
1677
1678 transition(NO_DR_B_W, Shared_Ack) {
1679 m_decrementNumberOfMessages;
1680 r_setSharerBit;
1681 n_popResponseQueue;
1682 }
1683
1684 transition(O_DR_B, Shared_Ack) {
1685 m_decrementNumberOfMessages;
1686 r_setSharerBit;
1687 o_checkForCompletion;
1688 n_popResponseQueue;
1689 }
1690
1691 transition(O_DR_B_W, Shared_Ack) {
1692 m_decrementNumberOfMessages;
1693 r_setSharerBit;
1694 n_popResponseQueue;
1695 }
1696
1697 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1698 m_decrementNumberOfMessages;
1699 r_setSharerBit;
1700 o_checkForCompletion;
1701 n_popResponseQueue;
1702 }
1703
1704 transition(NO_DR_B_W, Shared_Data) {
1705 r_recordCacheData;
1706 m_decrementNumberOfMessages;
1707 so_setOwnerBit;
1708 o_checkForCompletion;
1709 n_popResponseQueue;
1710 }
1711
1712 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1713 r_recordCacheData;
1714 m_decrementNumberOfMessages;
1715 so_setOwnerBit;
1716 o_checkForCompletion;
1717 n_popResponseQueue;
1718 }
1719
1720 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1721 r_recordCacheData;
1722 m_decrementNumberOfMessages;
1723 n_popResponseQueue;
1724 }
1725
1726 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1727 r_recordCacheData;
1728 m_decrementNumberOfMessages;
1729 o_checkForCompletion;
1730 n_popResponseQueue;
1731 }
1732
1733 transition(NO_DR_B, All_acks_and_owner_data, O) {
1734 //
1735 // Note that the DMA consistency model allows us to send the DMA device
1736 // a response as soon as we receive valid data and prior to receiving
1737 // all acks. However, to simplify the protocol we wait for all acks.
1738 //
1739 dt_sendDmaDataFromTbe;
1740 wdt_writeDataFromTBE;
1741 w_deallocateTBE;
1742 k_wakeUpDependents;
1743 g_popTriggerQueue;
1744 }
1745
1746 transition(NO_DR_B, All_acks_and_shared_data, S) {
1747 //
1748 // Note that the DMA consistency model allows us to send the DMA device
1749 // a response as soon as we receive valid data and prior to receiving
1750 // all acks. However, to simplify the protocol we wait for all acks.
1751 //
1752 dt_sendDmaDataFromTbe;
1753 wdt_writeDataFromTBE;
1754 w_deallocateTBE;
1755 k_wakeUpDependents;
1756 g_popTriggerQueue;
1757 }
1758
1759 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1760 //
1761 // Note that the DMA consistency model allows us to send the DMA device
1762 // a response as soon as we receive valid data and prior to receiving
1763 // all acks. However, to simplify the protocol we wait for all acks.
1764 //
1765 dt_sendDmaDataFromTbe;
1766 wdt_writeDataFromTBE;
1767 w_deallocateTBE;
1768 k_wakeUpDependents;
1769 g_popTriggerQueue;
1770 }
1771
1772 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1773 //
1774 // Note that the DMA consistency model allows us to send the DMA device
1775 // a response as soon as we receive valid data and prior to receiving
1776 // all acks. However, to simplify the protocol we wait for all acks.
1777 //
1778 dt_sendDmaDataFromTbe;
1779 wdt_writeDataFromTBE;
1780 w_deallocateTBE;
1781 k_wakeUpDependents;
1782 g_popTriggerQueue;
1783 }
1784
1785 transition(O_DR_B, All_acks_and_owner_data, O) {
1786 wdt_writeDataFromTBE;
1787 w_deallocateTBE;
1788 k_wakeUpDependents;
1789 g_popTriggerQueue;
1790 }
1791
1792 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1793 wdt_writeDataFromTBE;
1794 w_deallocateTBE;
1795 pfd_probeFilterDeallocate;
1796 k_wakeUpDependents;
1797 g_popTriggerQueue;
1798 }
1799
1800 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1801 //
1802 // Note that the DMA consistency model allows us to send the DMA device
1803 // a response as soon as we receive valid data and prior to receiving
1804 // all acks. However, to simplify the protocol we wait for all acks.
1805 //
1806 dt_sendDmaDataFromTbe;
1807 wdt_writeDataFromTBE;
1808 w_deallocateTBE;
1809 ppfd_possibleProbeFilterDeallocate;
1810 k_wakeUpDependents;
1811 g_popTriggerQueue;
1812 }
1813
1814 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1815 a_assertCacheData;
1816 //
1817 // Note that the DMA consistency model allows us to send the DMA device
1818 // a response as soon as we receive valid data and prior to receiving
1819 // all acks. However, to simplify the protocol we wait for all acks.
1820 //
1821 dt_sendDmaDataFromTbe;
1822 wdt_writeDataFromTBE;
1823 w_deallocateTBE;
1824 ppfd_possibleProbeFilterDeallocate;
1825 k_wakeUpDependents;
1826 g_popTriggerQueue;
1827 }
1828
1829 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1830 dwt_writeDmaDataFromTBE;
1831 ld_queueMemoryDmaWrite;
1832 g_popTriggerQueue;
1833 }
1834
1835 transition(NO_DW_W, Memory_Ack, E) {
1836 da_sendDmaAck;
1837 w_deallocateTBE;
1838 ppfd_possibleProbeFilterDeallocate;
1839 k_wakeUpDependents;
1840 l_popMemQueue;
1841 }
1842
1843 transition(O_B_W, Memory_Data, O_B) {
1844 d_sendData;
1845 w_deallocateTBE;
1846 l_popMemQueue;
1847 }
1848
1849 transition(NO_B_W, UnblockM, NO_W) {
1850 uo_updateOwnerIfPf;
1851 j_popIncomingUnblockQueue;
1852 }
1853
1854 transition(NO_B_W, UnblockS, NO_W) {
1855 us_updateSharerIfFBD;
1856 j_popIncomingUnblockQueue;
1857 }
1858
1859 transition(O_B_W, UnblockS, O_W) {
1860 us_updateSharerIfFBD;
1861 j_popIncomingUnblockQueue;
1862 }
1863
1864 transition(NO_W, Memory_Data, NO) {
1865 w_deallocateTBE;
1866 k_wakeUpDependents;
1867 l_popMemQueue;
1868 }
1869
1870 transition(O_W, Memory_Data, O) {
1871 w_deallocateTBE;
1872 k_wakeUpDependents;
1873 l_popMemQueue;
1874 }
1875
1876 // WB State Transistions
1877 transition(WB, Writeback_Dirty, WB_O_W) {
1878 rs_removeSharer;
1879 l_queueMemoryWBRequest;
1880 j_popIncomingUnblockQueue;
1881 }
1882
1883 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1884 rs_removeSharer;
1885 l_queueMemoryWBRequest;
1886 j_popIncomingUnblockQueue;
1887 }
1888
1889 transition(WB_E_W, Memory_Ack, E) {
1890 l_writeDataToMemory;
1891 pfd_probeFilterDeallocate;
1892 k_wakeUpDependents;
1893 l_popMemQueue;
1894 }
1895
1896 transition(WB_O_W, Memory_Ack, O) {
1897 l_writeDataToMemory;
1898 k_wakeUpDependents;
1899 l_popMemQueue;
1900 }
1901
1902 transition(WB, Writeback_Clean, O) {
1903 ll_checkIncomingWriteback;
1904 rs_removeSharer;
1905 k_wakeUpDependents;
1906 j_popIncomingUnblockQueue;
1907 }
1908
1909 transition(WB, Writeback_Exclusive_Clean, E) {
1910 ll_checkIncomingWriteback;
1911 rs_removeSharer;
1912 pfd_probeFilterDeallocate;
1913 k_wakeUpDependents;
1914 j_popIncomingUnblockQueue;
1915 }
1916
1917 transition(WB, Unblock, NX) {
1918 auno_assertUnblockerNotOwner;
1919 k_wakeUpDependents;
1920 j_popIncomingUnblockQueue;
1921 }
1922
1923 transition(NO_F, PUTF, WB) {
1924 a_sendWriteBackAck;
1925 i_popIncomingRequestQueue;
1926 }
1927
1928 //possible race between GETF and UnblockM -- not sure needed any more?
1929 transition(NO_F, UnblockM) {
1930 us_updateSharerIfFBD;
1931 uo_updateOwnerIfPf;
1932 j_popIncomingUnblockQueue;
1933 }
1934 }