x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory,
38 CacheMemory * probeFilter,
39 MemoryControl * memBuffer,
40 Cycles memory_controller_latency = 2,
41 bool probe_filter_enabled = false,
42 bool full_bit_dir_enabled = false
43 {
44
45 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
47 //
48 // For a finite buffered network, note that the DMA response network only
49 // works at this relatively lower numbered (lower priority) virtual network
50 // because the trigger queue decouples cache responses from DMA responses.
51 //
52 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
53
54 MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
55 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
56 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
57 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
58
59 // STATES
60 state_declaration(State, desc="Directory states", default="Directory_State_E") {
61 // Base states
62 NX, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in O at Owner";
63 NO, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
64 S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
65 O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
66 E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
67
68 O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
69 S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
70 NO_R, AccessPermission:Busy, desc="Was Not Owner or Sharer, replacing probe filter entry";
71
72 NO_B, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked";
73 NO_B_X, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
74 NO_B_S, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
75 NO_B_S_W, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
76 O_B, AccessPermission:Busy, "O^B", desc="Owner, Blocked";
77 NO_B_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
78 O_B_W, AccessPermission:Busy, desc="Owner, Blocked, waiting for Dram";
79 NO_W, AccessPermission:Busy, desc="Not Owner, waiting for Dram";
80 O_W, AccessPermission:Busy, desc="Owner, waiting for Dram";
81 NO_DW_B_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram and cache responses";
82 NO_DR_B_W, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for Dram and cache responses";
83 NO_DR_B_D, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
84 NO_DR_B, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses";
85 NO_DW_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram";
86 O_DR_B_W, AccessPermission:Busy, desc="Owner, Dma Read waiting for Dram and cache responses";
87 O_DR_B, AccessPermission:Busy, desc="Owner, Dma Read waiting for cache responses";
88 WB, AccessPermission:Busy, desc="Blocked on a writeback";
89 WB_O_W, AccessPermission:Busy, desc="Blocked on memory write, will go to O";
90 WB_E_W, AccessPermission:Busy, desc="Blocked on memory write, will go to E";
91
92 NO_F, AccessPermission:Busy, desc="Blocked on a flush";
93 NO_F_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
94 }
95
96 // Events
97 enumeration(Event, desc="Directory events") {
98 GETX, desc="A GETX arrives";
99 GETS, desc="A GETS arrives";
100 PUT, desc="A PUT arrives";
101 Unblock, desc="An unblock message arrives";
102 UnblockS, desc="An unblock message arrives";
103 UnblockM, desc="An unblock message arrives";
104 Writeback_Clean, desc="The final part of a PutX (no data)";
105 Writeback_Dirty, desc="The final part of a PutX (data)";
106 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
107 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
108
109 // Probe filter
110 Pf_Replacement, desc="probe filter replacement";
111
112 // DMA requests
113 DMA_READ, desc="A DMA Read memory request";
114 DMA_WRITE, desc="A DMA Write memory request";
115
116 // Memory Controller
117 Memory_Data, desc="Fetched data from memory arrives";
118 Memory_Ack, desc="Writeback Ack from memory arrives";
119
120 // Cache responses required to handle DMA
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Shared_Data, desc="Received a data message, responder has a shared copy";
124 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 // Triggers
128 All_acks_and_shared_data, desc="Received shared data and message acks";
129 All_acks_and_owner_data, desc="Received shared data and message acks";
130 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
131 All_Unblocks, desc="Received all unblocks for a merged gets request";
132 GETF, desc="A GETF arrives";
133 PUTF, desc="A PUTF arrives";
134 }
135
136 // TYPES
137
138 // DirectoryEntry
139 structure(Entry, desc="...", interface="AbstractEntry") {
140 State DirectoryState, desc="Directory state";
141 DataBlock DataBlk, desc="data for the block";
142 }
143
144 // ProbeFilterEntry
145 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
146 State PfState, desc="Directory state";
147 MachineID Owner, desc="Owner node";
148 DataBlock DataBlk, desc="data for the block";
149 Set Sharers, desc="sharing vector for full bit directory";
150 }
151
152 // TBE entries for DMA requests
153 structure(TBE, desc="TBE entries for outstanding DMA requests") {
154 Address PhysicalAddress, desc="physical address";
155 State TBEState, desc="Transient State";
156 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
157 int Acks, default="0", desc="The number of acks that the waiting response represents";
158 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
159 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
160 DataBlock DataBlk, desc="The current view of system memory";
161 int Len, desc="...";
162 MachineID DmaRequestor, desc="DMA requestor";
163 NetDest GetSRequestors, desc="GETS merged requestors";
164 int NumPendingMsgs, desc="Number of pending acks/messages";
165 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
166 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
167 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
168 }
169
170 structure(TBETable, external="yes") {
171 TBE lookup(Address);
172 void allocate(Address);
173 void deallocate(Address);
174 bool isPresent(Address);
175 }
176
177 void set_cache_entry(AbstractCacheEntry b);
178 void unset_cache_entry();
179 void set_tbe(TBE a);
180 void unset_tbe();
181 void wakeUpBuffers(Address a);
182 Cycles curCycle();
183
184 // ** OBJECTS **
185
186 Set fwd_set;
187
188 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
189
190 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
191 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
192
193 if (is_valid(dir_entry)) {
194 return dir_entry;
195 }
196
197 dir_entry := static_cast(Entry, "pointer",
198 directory.allocate(addr, new Entry));
199 return dir_entry;
200 }
201
202 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
203 Entry dir_entry := getDirectoryEntry(addr);
204 if(is_valid(dir_entry)) {
205 return dir_entry.DataBlk;
206 }
207
208 TBE tbe := TBEs[addr];
209 if(is_valid(tbe)) {
210 return tbe.DataBlk;
211 }
212
213 error("Data block missing!");
214 }
215
216 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
217 if (probe_filter_enabled || full_bit_dir_enabled) {
218 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
219 return pfEntry;
220 }
221 return OOD;
222 }
223
224 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
225 if (is_valid(tbe)) {
226 return tbe.TBEState;
227 } else {
228 if (probe_filter_enabled || full_bit_dir_enabled) {
229 if (is_valid(pf_entry)) {
230 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
231 }
232 }
233 return getDirectoryEntry(addr).DirectoryState;
234 }
235 }
236
237 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
238 if (is_valid(tbe)) {
239 tbe.TBEState := state;
240 }
241 if (probe_filter_enabled || full_bit_dir_enabled) {
242 if (is_valid(pf_entry)) {
243 pf_entry.PfState := state;
244 }
245 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
246 assert(is_valid(pf_entry));
247 }
248 if (state == State:E) {
249 assert(is_valid(pf_entry) == false);
250 }
251 }
252 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
253 state == State:O) {
254 assert(is_valid(tbe) == false);
255 }
256 getDirectoryEntry(addr).DirectoryState := state;
257 }
258
259 AccessPermission getAccessPermission(Address addr) {
260 TBE tbe := TBEs[addr];
261 if(is_valid(tbe)) {
262 return Directory_State_to_permission(tbe.TBEState);
263 }
264
265 if(directory.isPresent(addr)) {
266 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
267 }
268
269 return AccessPermission:NotPresent;
270 }
271
272 void setAccessPermission(PfEntry pf_entry, Address addr, State state) {
273 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
274 }
275
276 Event cache_request_to_event(CoherenceRequestType type) {
277 if (type == CoherenceRequestType:GETS) {
278 return Event:GETS;
279 } else if (type == CoherenceRequestType:GETX) {
280 return Event:GETX;
281 } else if (type == CoherenceRequestType:GETF) {
282 return Event:GETF;
283 } else {
284 error("Invalid CoherenceRequestType");
285 }
286 }
287
288 MessageBuffer triggerQueue, ordered="true";
289
290 // ** OUT_PORTS **
291 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
292 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
293 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
294 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
295 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
296
297 //
298 // Memory buffer for memory controller to DIMM communication
299 //
300 out_port(memQueue_out, MemoryMsg, memBuffer);
301
302 // ** IN_PORTS **
303
304 // Trigger Queue
305 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
306 if (triggerQueue_in.isReady()) {
307 peek(triggerQueue_in, TriggerMsg) {
308 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
309 TBE tbe := TBEs[in_msg.Address];
310 if (in_msg.Type == TriggerType:ALL_ACKS) {
311 trigger(Event:All_acks_and_owner_data, in_msg.Address,
312 pf_entry, tbe);
313 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
314 trigger(Event:All_acks_and_shared_data, in_msg.Address,
315 pf_entry, tbe);
316 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
317 trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
318 pf_entry, tbe);
319 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
320 trigger(Event:All_Unblocks, in_msg.Address,
321 pf_entry, tbe);
322 } else {
323 error("Unexpected message");
324 }
325 }
326 }
327 }
328
329 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
330 if (unblockNetwork_in.isReady()) {
331 peek(unblockNetwork_in, ResponseMsg) {
332 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
333 TBE tbe := TBEs[in_msg.Address];
334 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
335 trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
336 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
337 trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
338 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
339 trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
340 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
341 trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
342 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
343 trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
344 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
345 trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
346 pf_entry, tbe);
347 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
348 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
349 pf_entry, tbe);
350 } else {
351 error("Invalid message");
352 }
353 }
354 }
355 }
356
357 // Response Network
358 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
359 if (responseToDir_in.isReady()) {
360 peek(responseToDir_in, ResponseMsg) {
361 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
362 TBE tbe := TBEs[in_msg.Address];
363 if (in_msg.Type == CoherenceResponseType:ACK) {
364 trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
365 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
366 trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
367 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
368 trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
369 } else if (in_msg.Type == CoherenceResponseType:DATA) {
370 trigger(Event:Data, in_msg.Address, pf_entry, tbe);
371 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
372 trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
373 } else {
374 error("Unexpected message");
375 }
376 }
377 }
378 }
379
380 // off-chip memory request/response is done
381 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
382 if (memQueue_in.isReady()) {
383 peek(memQueue_in, MemoryMsg) {
384 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
385 TBE tbe := TBEs[in_msg.Address];
386 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
387 trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
388 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
389 trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
390 } else {
391 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
392 error("Invalid message");
393 }
394 }
395 }
396 }
397
398 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
399 if (requestQueue_in.isReady()) {
400 peek(requestQueue_in, RequestMsg) {
401 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
402 TBE tbe := TBEs[in_msg.Address];
403 if (in_msg.Type == CoherenceRequestType:PUT) {
404 trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
405 } else if (in_msg.Type == CoherenceRequestType:PUTF) {
406 trigger(Event:PUTF, in_msg.Address, pf_entry, tbe);
407 } else {
408 if (probe_filter_enabled || full_bit_dir_enabled) {
409 if (is_valid(pf_entry)) {
410 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
411 pf_entry, tbe);
412 } else {
413 if (probeFilter.cacheAvail(in_msg.Address)) {
414 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
415 pf_entry, tbe);
416 } else {
417 trigger(Event:Pf_Replacement,
418 probeFilter.cacheProbe(in_msg.Address),
419 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
420 TBEs[probeFilter.cacheProbe(in_msg.Address)]);
421 }
422 }
423 } else {
424 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
425 pf_entry, tbe);
426 }
427 }
428 }
429 }
430 }
431
432 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
433 if (dmaRequestQueue_in.isReady()) {
434 peek(dmaRequestQueue_in, DMARequestMsg) {
435 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
436 TBE tbe := TBEs[in_msg.LineAddress];
437 if (in_msg.Type == DMARequestType:READ) {
438 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
439 } else if (in_msg.Type == DMARequestType:WRITE) {
440 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
441 } else {
442 error("Invalid message");
443 }
444 }
445 }
446 }
447
448 // Actions
449
450 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
451 if (probe_filter_enabled || full_bit_dir_enabled) {
452 assert(is_valid(cache_entry));
453 probeFilter.setMRU(address);
454 }
455 }
456
457 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
458 if (probe_filter_enabled || full_bit_dir_enabled) {
459 assert(is_valid(cache_entry));
460 peek(unblockNetwork_in, ResponseMsg) {
461 assert(cache_entry.Owner != in_msg.Sender);
462 if (full_bit_dir_enabled) {
463 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
464 }
465 }
466 }
467 }
468
469 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
470 if (probe_filter_enabled || full_bit_dir_enabled) {
471 assert(is_valid(cache_entry));
472 peek(unblockNetwork_in, ResponseMsg) {
473 cache_entry.Owner := in_msg.Sender;
474 if (full_bit_dir_enabled) {
475 cache_entry.Sharers.clear();
476 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
477 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
478 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
479 }
480 }
481 }
482 }
483
484 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
485 if (full_bit_dir_enabled) {
486 assert(probeFilter.isTagPresent(address));
487 peek(unblockNetwork_in, ResponseMsg) {
488 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
489 }
490 }
491 }
492
493 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
494 peek(requestQueue_in, RequestMsg) {
495 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
496 out_msg.Address := address;
497 out_msg.Type := CoherenceRequestType:WB_ACK;
498 out_msg.Requestor := in_msg.Requestor;
499 out_msg.Destination.add(in_msg.Requestor);
500 out_msg.MessageSize := MessageSizeType:Writeback_Control;
501 }
502 }
503 }
504
505 action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
506 peek(requestQueue_in, RequestMsg) {
507 if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
508 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
509 out_msg.Address := address;
510 out_msg.Type := CoherenceRequestType:BLOCK_ACK;
511 out_msg.Requestor := in_msg.Requestor;
512 out_msg.Destination.add(in_msg.Requestor);
513 out_msg.MessageSize := MessageSizeType:Writeback_Control;
514 }
515 }
516 }
517 }
518
519 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
520 peek(requestQueue_in, RequestMsg) {
521 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
522 out_msg.Address := address;
523 out_msg.Type := CoherenceRequestType:WB_NACK;
524 out_msg.Requestor := in_msg.Requestor;
525 out_msg.Destination.add(in_msg.Requestor);
526 out_msg.MessageSize := MessageSizeType:Writeback_Control;
527 }
528 }
529 }
530
531 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
532 if (probe_filter_enabled || full_bit_dir_enabled) {
533 peek(requestQueue_in, RequestMsg) {
534 set_cache_entry(probeFilter.allocate(address, new PfEntry));
535 cache_entry.Owner := in_msg.Requestor;
536 cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
537 }
538 }
539 }
540
541 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
542 if (probe_filter_enabled || full_bit_dir_enabled) {
543 probeFilter.deallocate(address);
544 unset_cache_entry();
545 }
546 }
547
548 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
549 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
550 probeFilter.deallocate(address);
551 unset_cache_entry();
552 }
553 }
554
555 action(v_allocateTBE, "v", desc="Allocate TBE") {
556 check_allocate(TBEs);
557 peek(requestQueue_in, RequestMsg) {
558 TBEs.allocate(address);
559 set_tbe(TBEs[address]);
560 tbe.PhysicalAddress := address;
561 tbe.ResponseType := CoherenceResponseType:NULL;
562 }
563 }
564
565 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
566 check_allocate(TBEs);
567 peek(dmaRequestQueue_in, DMARequestMsg) {
568 TBEs.allocate(address);
569 set_tbe(TBEs[address]);
570 tbe.DmaDataBlk := in_msg.DataBlk;
571 tbe.PhysicalAddress := in_msg.PhysicalAddress;
572 tbe.Len := in_msg.Len;
573 tbe.DmaRequestor := in_msg.Requestor;
574 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
575 //
576 // One ack for each last-level cache
577 //
578 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
579 //
580 // Assume initially that the caches store a clean copy and that memory
581 // will provide the data
582 //
583 tbe.CacheDirty := false;
584 }
585 }
586
587 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
588 assert(is_valid(tbe));
589 if (full_bit_dir_enabled) {
590 assert(is_valid(cache_entry));
591 tbe.NumPendingMsgs := cache_entry.Sharers.count();
592 } else {
593 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
594 }
595 }
596
597 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
598 assert(is_valid(tbe));
599 tbe.NumPendingMsgs := 1;
600 }
601
602 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
603 TBEs.deallocate(address);
604 unset_tbe();
605 }
606
607 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
608 assert(is_valid(tbe));
609 peek(requestQueue_in, RequestMsg) {
610 if (full_bit_dir_enabled) {
611 assert(is_valid(cache_entry));
612 //
613 // If we are using the full-bit directory and no sharers exists beyond
614 // the requestor, then we must set the ack number to all, not one
615 //
616 fwd_set := cache_entry.Sharers;
617 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
618 if (fwd_set.count() > 0) {
619 tbe.Acks := 1;
620 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
621 tbe.SilentAcks := tbe.SilentAcks - 1;
622 } else {
623 tbe.Acks := machineCount(MachineType:L1Cache);
624 tbe.SilentAcks := 0;
625 }
626 } else {
627 tbe.Acks := 1;
628 }
629 }
630 }
631
632 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
633 assert(is_valid(tbe));
634 if (probe_filter_enabled || full_bit_dir_enabled) {
635 tbe.Acks := machineCount(MachineType:L1Cache);
636 tbe.SilentAcks := 0;
637 } else {
638 tbe.Acks := 1;
639 }
640 }
641
642 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
643 peek(responseToDir_in, ResponseMsg) {
644 assert(is_valid(tbe));
645 assert(in_msg.Acks > 0);
646 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
647 //
648 // Note that cache data responses will have an ack count of 2. However,
649 // directory DMA requests must wait for acks from all LLC caches, so
650 // only decrement by 1.
651 //
652 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
653 (in_msg.Type == CoherenceResponseType:DATA) ||
654 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
655 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
656 } else {
657 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
658 }
659 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
660 }
661 }
662
663 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
664 peek(unblockNetwork_in, ResponseMsg) {
665 assert(is_valid(tbe));
666 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
667 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
668 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
669 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
670 }
671 }
672
673 action(n_popResponseQueue, "n", desc="Pop response queue") {
674 responseToDir_in.dequeue();
675 }
676
677 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
678 assert(is_valid(tbe));
679 if (tbe.NumPendingMsgs == 0) {
680 enqueue(triggerQueue_out, TriggerMsg) {
681 out_msg.Address := address;
682 if (tbe.Sharers) {
683 if (tbe.Owned) {
684 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
685 } else {
686 out_msg.Type := TriggerType:ALL_ACKS;
687 }
688 } else {
689 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
690 }
691 }
692 }
693 }
694
695 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
696 assert(is_valid(tbe));
697 if (tbe.NumPendingMsgs == 0) {
698 enqueue(triggerQueue_out, TriggerMsg) {
699 out_msg.Address := address;
700 out_msg.Type := TriggerType:ALL_UNBLOCKS;
701 }
702 }
703 }
704
705 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
706 assert(is_valid(tbe));
707 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
708 }
709
710 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
711 if (probe_filter_enabled || full_bit_dir_enabled) {
712 assert(is_valid(tbe));
713 tbe.NumPendingMsgs := 0;
714 }
715 }
716
717 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
718 assert(is_valid(tbe));
719 if (tbe.NumPendingMsgs == 0) {
720 assert(probe_filter_enabled || full_bit_dir_enabled);
721 enqueue(triggerQueue_out, TriggerMsg) {
722 out_msg.Address := address;
723 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
724 }
725 }
726 }
727
728 action(d_sendData, "d", desc="Send data to requestor") {
729 peek(memQueue_in, MemoryMsg) {
730 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
731 assert(is_valid(tbe));
732 out_msg.Address := address;
733 out_msg.Type := tbe.ResponseType;
734 out_msg.Sender := machineID;
735 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
736 out_msg.DataBlk := in_msg.DataBlk;
737 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
738 out_msg.Dirty := false; // By definition, the block is now clean
739 out_msg.Acks := tbe.Acks;
740 out_msg.SilentAcks := tbe.SilentAcks;
741 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
742 assert(out_msg.Acks > 0);
743 out_msg.MessageSize := MessageSizeType:Response_Data;
744 }
745 }
746 }
747
748 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
749 peek(memQueue_in, MemoryMsg) {
750 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
751 assert(is_valid(tbe));
752 out_msg.PhysicalAddress := address;
753 out_msg.LineAddress := address;
754 out_msg.Type := DMAResponseType:DATA;
755 //
756 // we send the entire data block and rely on the dma controller to
757 // split it up if need be
758 //
759 out_msg.DataBlk := in_msg.DataBlk;
760 out_msg.Destination.add(tbe.DmaRequestor);
761 out_msg.MessageSize := MessageSizeType:Response_Data;
762 }
763 }
764 }
765
766 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
767 peek(triggerQueue_in, TriggerMsg) {
768 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
769 assert(is_valid(tbe));
770 out_msg.PhysicalAddress := address;
771 out_msg.LineAddress := address;
772 out_msg.Type := DMAResponseType:DATA;
773 //
774 // we send the entire data block and rely on the dma controller to
775 // split it up if need be
776 //
777 out_msg.DataBlk := tbe.DataBlk;
778 out_msg.Destination.add(tbe.DmaRequestor);
779 out_msg.MessageSize := MessageSizeType:Response_Data;
780 }
781 }
782 }
783
784 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
785 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
786 assert(is_valid(tbe));
787 out_msg.PhysicalAddress := address;
788 out_msg.LineAddress := address;
789 out_msg.Type := DMAResponseType:ACK;
790 out_msg.Destination.add(tbe.DmaRequestor);
791 out_msg.MessageSize := MessageSizeType:Writeback_Control;
792 }
793 }
794
795 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
796 peek(requestQueue_in, RequestMsg) {
797 assert(is_valid(tbe));
798 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
799 }
800 }
801
802 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
803 peek(requestQueue_in, RequestMsg) {
804 assert(is_valid(tbe));
805 if (full_bit_dir_enabled) {
806 fwd_set := cache_entry.Sharers;
807 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
808 if (fwd_set.count() > 0) {
809 tbe.ResponseType := CoherenceResponseType:DATA;
810 } else {
811 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
812 }
813 } else {
814 tbe.ResponseType := CoherenceResponseType:DATA;
815 }
816 }
817 }
818
819 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
820 peek(requestQueue_in, RequestMsg) {
821 assert(is_valid(tbe));
822 tbe.GetSRequestors.add(in_msg.Requestor);
823 }
824 }
825
826 action(r_setSharerBit, "r", desc="We saw other sharers") {
827 assert(is_valid(tbe));
828 tbe.Sharers := true;
829 }
830
831 action(so_setOwnerBit, "so", desc="We saw other sharers") {
832 assert(is_valid(tbe));
833 tbe.Sharers := true;
834 tbe.Owned := true;
835 }
836
837 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
838 peek(requestQueue_in, RequestMsg) {
839 enqueue(memQueue_out, MemoryMsg, latency="1") {
840 out_msg.Address := address;
841 out_msg.Type := MemoryRequestType:MEMORY_READ;
842 out_msg.Sender := machineID;
843 out_msg.OriginalRequestorMachId := in_msg.Requestor;
844 out_msg.MessageSize := in_msg.MessageSize;
845 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
846 DPRINTF(RubySlicc, "%s\n", out_msg);
847 }
848 }
849 }
850
851 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
852 peek(dmaRequestQueue_in, DMARequestMsg) {
853 enqueue(memQueue_out, MemoryMsg, latency="1") {
854 out_msg.Address := address;
855 out_msg.Type := MemoryRequestType:MEMORY_READ;
856 out_msg.Sender := machineID;
857 out_msg.OriginalRequestorMachId := in_msg.Requestor;
858 out_msg.MessageSize := in_msg.MessageSize;
859 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
860 DPRINTF(RubySlicc, "%s\n", out_msg);
861 }
862 }
863 }
864
865 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
866 assert(is_valid(tbe));
867 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
868 if (full_bit_dir_enabled) {
869 assert(is_valid(cache_entry));
870 peek(requestQueue_in, RequestMsg) {
871 fwd_set := cache_entry.Sharers;
872 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
873 if (fwd_set.count() > 0) {
874 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
875 out_msg.Address := address;
876 out_msg.Type := in_msg.Type;
877 out_msg.Requestor := in_msg.Requestor;
878 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
879 out_msg.MessageSize := MessageSizeType:Multicast_Control;
880 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
881 out_msg.ForwardRequestTime := curCycle();
882 assert(tbe.SilentAcks > 0);
883 out_msg.SilentAcks := tbe.SilentAcks;
884 }
885 }
886 }
887 } else {
888 peek(requestQueue_in, RequestMsg) {
889 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
890 out_msg.Address := address;
891 out_msg.Type := in_msg.Type;
892 out_msg.Requestor := in_msg.Requestor;
893 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
894 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
895 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
896 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
897 out_msg.ForwardRequestTime := curCycle();
898 }
899 }
900 }
901 }
902 }
903
904 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
905 if (machineCount(MachineType:L1Cache) > 1) {
906 if (full_bit_dir_enabled) {
907 assert(cache_entry.Sharers.count() > 0);
908 peek(requestQueue_in, RequestMsg) {
909 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
910 out_msg.Address := address;
911 out_msg.Type := CoherenceRequestType:INV;
912 out_msg.Requestor := machineID;
913 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
914 out_msg.MessageSize := MessageSizeType:Multicast_Control;
915 }
916 }
917 } else {
918 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
919 out_msg.Address := address;
920 out_msg.Type := CoherenceRequestType:INV;
921 out_msg.Requestor := machineID;
922 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
923 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
924 }
925 }
926 }
927 }
928
929 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
930 if (machineCount(MachineType:L1Cache) > 1) {
931 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
932 assert(is_valid(cache_entry));
933 out_msg.Address := address;
934 out_msg.Type := CoherenceRequestType:INV;
935 out_msg.Requestor := machineID;
936 out_msg.Destination.add(cache_entry.Owner);
937 out_msg.MessageSize := MessageSizeType:Request_Control;
938 out_msg.DirectedProbe := true;
939 }
940 }
941 }
942
943 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
944 if (machineCount(MachineType:L1Cache) > 1) {
945 peek(requestQueue_in, RequestMsg) {
946 if (full_bit_dir_enabled) {
947 fwd_set := cache_entry.Sharers;
948 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
949 if (fwd_set.count() > 0) {
950 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
951 out_msg.Address := address;
952 out_msg.Type := in_msg.Type;
953 out_msg.Requestor := in_msg.Requestor;
954 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
955 out_msg.MessageSize := MessageSizeType:Multicast_Control;
956 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
957 out_msg.ForwardRequestTime := curCycle();
958 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
959 out_msg.SilentAcks := out_msg.SilentAcks - 1;
960 }
961 }
962 } else {
963 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
964 out_msg.Address := address;
965 out_msg.Type := in_msg.Type;
966 out_msg.Requestor := in_msg.Requestor;
967 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
968 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
969 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
970 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
971 out_msg.ForwardRequestTime := curCycle();
972 }
973 }
974 }
975 } else {
976 peek(requestQueue_in, RequestMsg) {
977 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
978 out_msg.Address := address;
979 out_msg.Type := CoherenceResponseType:ACK;
980 out_msg.Sender := machineID;
981 out_msg.Destination.add(in_msg.Requestor);
982 out_msg.Dirty := false; // By definition, the block is now clean
983 out_msg.Acks := 0;
984 out_msg.SilentAcks := 0;
985 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
986 out_msg.MessageSize := MessageSizeType:Response_Control;
987 }
988 }
989 }
990 }
991
992 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
993 assert(machineCount(MachineType:L1Cache) > 1);
994 //
995 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
996 // decouple the two.
997 //
998 peek(unblockNetwork_in, ResponseMsg) {
999 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1000 assert(is_valid(tbe));
1001 out_msg.Address := address;
1002 out_msg.Type := CoherenceRequestType:MERGED_GETS;
1003 out_msg.MergedRequestors := tbe.GetSRequestors;
1004 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
1005 out_msg.Destination.add(in_msg.CurOwner);
1006 } else {
1007 out_msg.Destination.add(in_msg.Sender);
1008 }
1009 out_msg.MessageSize := MessageSizeType:Request_Control;
1010 out_msg.InitialRequestTime := zero_time();
1011 out_msg.ForwardRequestTime := curCycle();
1012 }
1013 }
1014 }
1015
1016 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
1017 assert(machineCount(MachineType:L1Cache) > 1);
1018 if (probe_filter_enabled || full_bit_dir_enabled) {
1019 peek(requestQueue_in, RequestMsg) {
1020 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1021 assert(is_valid(cache_entry));
1022 out_msg.Address := address;
1023 out_msg.Type := in_msg.Type;
1024 out_msg.Requestor := in_msg.Requestor;
1025 out_msg.Destination.add(cache_entry.Owner);
1026 out_msg.MessageSize := MessageSizeType:Request_Control;
1027 out_msg.DirectedProbe := true;
1028 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1029 out_msg.ForwardRequestTime := curCycle();
1030 }
1031 }
1032 } else {
1033 peek(requestQueue_in, RequestMsg) {
1034 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1035 out_msg.Address := address;
1036 out_msg.Type := in_msg.Type;
1037 out_msg.Requestor := in_msg.Requestor;
1038 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1039 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1040 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1041 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1042 out_msg.ForwardRequestTime := curCycle();
1043 }
1044 }
1045 }
1046 }
1047
1048 action(nofc_forwardRequestConditionalOwner, "nofc", desc="Forward request to one or more nodes if the requestor is not the owner") {
1049 if (machineCount(MachineType:L1Cache) > 1) {
1050
1051 if (probe_filter_enabled || full_bit_dir_enabled) {
1052 peek(requestQueue_in, RequestMsg) {
1053 if (in_msg.Requestor != cache_entry.Owner) {
1054 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1055 assert(is_valid(cache_entry));
1056 out_msg.Address := address;
1057 out_msg.Type := in_msg.Type;
1058 out_msg.Requestor := in_msg.Requestor;
1059 out_msg.Destination.add(cache_entry.Owner);
1060 out_msg.MessageSize := MessageSizeType:Request_Control;
1061 out_msg.DirectedProbe := true;
1062 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1063 out_msg.ForwardRequestTime := curCycle();
1064 }
1065 }
1066 }
1067 } else {
1068 peek(requestQueue_in, RequestMsg) {
1069 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1070 out_msg.Address := address;
1071 out_msg.Type := in_msg.Type;
1072 out_msg.Requestor := in_msg.Requestor;
1073 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1074 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1075 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1076 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1077 out_msg.ForwardRequestTime := curCycle();
1078 }
1079 }
1080 }
1081 }
1082 }
1083
1084 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
1085 assert(is_valid(tbe));
1086 if (tbe.NumPendingMsgs > 0) {
1087 peek(dmaRequestQueue_in, DMARequestMsg) {
1088 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1089 out_msg.Address := address;
1090 out_msg.Type := CoherenceRequestType:GETX;
1091 //
1092 // Send to all L1 caches, since the requestor is the memory controller
1093 // itself
1094 //
1095 out_msg.Requestor := machineID;
1096 out_msg.Destination.broadcast(MachineType:L1Cache);
1097 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1098 }
1099 }
1100 }
1101 }
1102
1103 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
1104 assert(is_valid(tbe));
1105 if (tbe.NumPendingMsgs > 0) {
1106 peek(dmaRequestQueue_in, DMARequestMsg) {
1107 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1108 out_msg.Address := address;
1109 out_msg.Type := CoherenceRequestType:GETS;
1110 //
1111 // Send to all L1 caches, since the requestor is the memory controller
1112 // itself
1113 //
1114 out_msg.Requestor := machineID;
1115 out_msg.Destination.broadcast(MachineType:L1Cache);
1116 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1117 }
1118 }
1119 }
1120 }
1121
1122 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1123 requestQueue_in.dequeue();
1124 }
1125
1126 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1127 peek(unblockNetwork_in, ResponseMsg) {
1128 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1129 }
1130 unblockNetwork_in.dequeue();
1131 }
1132
1133 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1134 wakeUpBuffers(address);
1135 }
1136
1137 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1138 memQueue_in.dequeue();
1139 }
1140
1141 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1142 triggerQueue_in.dequeue();
1143 }
1144
1145 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1146 dmaRequestQueue_in.dequeue();
1147 }
1148
1149 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1150 peek(dmaRequestQueue_in, DMARequestMsg) {
1151 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1152 }
1153 stall_and_wait(dmaRequestQueue_in, address);
1154 }
1155
1156 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1157 peek(memQueue_in, MemoryMsg) {
1158 assert(is_valid(tbe));
1159 if (tbe.CacheDirty == false) {
1160 tbe.DataBlk := in_msg.DataBlk;
1161 }
1162 }
1163 }
1164
1165 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1166 peek(responseToDir_in, ResponseMsg) {
1167 assert(is_valid(tbe));
1168 tbe.CacheDirty := true;
1169 tbe.DataBlk := in_msg.DataBlk;
1170 }
1171 }
1172
1173 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1174 peek(responseToDir_in, ResponseMsg) {
1175 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1176 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1177 in_msg.Address, in_msg.DataBlk);
1178 }
1179 }
1180
1181 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1182 peek(memQueue_in, MemoryMsg) {
1183 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1184 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1185 in_msg.Address, in_msg.DataBlk);
1186 }
1187 }
1188
1189 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1190 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1191 assert(is_valid(tbe));
1192 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1193 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1194 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1195 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1196 }
1197
1198 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1199 assert(is_valid(tbe));
1200 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1201 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1202 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1203 }
1204
1205 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1206 assert(is_valid(tbe));
1207 assert(tbe.CacheDirty);
1208 }
1209
1210 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1211 if (probe_filter_enabled || full_bit_dir_enabled) {
1212 peek(requestQueue_in, RequestMsg) {
1213 assert(is_valid(cache_entry));
1214 assert(cache_entry.Owner != in_msg.Requestor);
1215 }
1216 }
1217 }
1218
1219 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1220 if (full_bit_dir_enabled) {
1221 peek(requestQueue_in, RequestMsg) {
1222 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1223 }
1224 }
1225 }
1226
1227 action(rs_removeSharer, "s", desc="remove current sharer") {
1228 if (full_bit_dir_enabled) {
1229 peek(unblockNetwork_in, ResponseMsg) {
1230 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1231 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1232 }
1233 }
1234 }
1235
1236 action(cs_clearSharers, "cs", desc="clear current sharers") {
1237 if (full_bit_dir_enabled) {
1238 peek(requestQueue_in, RequestMsg) {
1239 cache_entry.Sharers.clear();
1240 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1241 }
1242 }
1243 }
1244
1245 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1246 peek(unblockNetwork_in, ResponseMsg) {
1247 enqueue(memQueue_out, MemoryMsg, latency="1") {
1248 assert(in_msg.Dirty);
1249 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1250 out_msg.Address := address;
1251 out_msg.Type := MemoryRequestType:MEMORY_WB;
1252 out_msg.DataBlk := in_msg.DataBlk;
1253 DPRINTF(RubySlicc, "%s\n", out_msg);
1254 }
1255 }
1256 }
1257
1258 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1259 enqueue(memQueue_out, MemoryMsg, latency="1") {
1260 assert(is_valid(tbe));
1261 out_msg.Address := address;
1262 out_msg.Type := MemoryRequestType:MEMORY_WB;
1263 // first, initialize the data blk to the current version of system memory
1264 out_msg.DataBlk := tbe.DataBlk;
1265 // then add the dma write data
1266 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1267 DPRINTF(RubySlicc, "%s\n", out_msg);
1268 }
1269 }
1270
1271 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1272 peek(unblockNetwork_in, ResponseMsg) {
1273 assert(in_msg.Dirty == false);
1274 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1275 DPRINTF(RubySlicc, "%s\n", in_msg.DataBlk);
1276 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1277
1278 // NOTE: The following check would not be valid in a real
1279 // implementation. We include the data in the "dataless"
1280 // message so we can assert the clean data matches the datablock
1281 // in memory
1282 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1283 }
1284 }
1285
1286 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1287 peek(requestQueue_in, RequestMsg) {
1288 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1289 }
1290 stall_and_wait(requestQueue_in, address);
1291 }
1292
1293 // TRANSITIONS
1294
1295 // Transitions out of E state
1296 transition(E, GETX, NO_B_W) {
1297 pfa_probeFilterAllocate;
1298 v_allocateTBE;
1299 rx_recordExclusiveInTBE;
1300 saa_setAcksToAllIfPF;
1301 qf_queueMemoryFetchRequest;
1302 fn_forwardRequestIfNecessary;
1303 i_popIncomingRequestQueue;
1304 }
1305
1306 transition(E, GETF, NO_F_W) {
1307 pfa_probeFilterAllocate;
1308 v_allocateTBE;
1309 rx_recordExclusiveInTBE;
1310 saa_setAcksToAllIfPF;
1311 qf_queueMemoryFetchRequest;
1312 fn_forwardRequestIfNecessary;
1313 i_popIncomingRequestQueue;
1314 }
1315
1316 transition(E, GETS, NO_B_W) {
1317 pfa_probeFilterAllocate;
1318 v_allocateTBE;
1319 rx_recordExclusiveInTBE;
1320 saa_setAcksToAllIfPF;
1321 qf_queueMemoryFetchRequest;
1322 fn_forwardRequestIfNecessary;
1323 i_popIncomingRequestQueue;
1324 }
1325
1326 transition(E, DMA_READ, NO_DR_B_W) {
1327 vd_allocateDmaRequestInTBE;
1328 qd_queueMemoryRequestFromDmaRead;
1329 spa_setPendingAcksToZeroIfPF;
1330 f_forwardReadFromDma;
1331 p_popDmaRequestQueue;
1332 }
1333
1334 transition(E, DMA_WRITE, NO_DW_B_W) {
1335 vd_allocateDmaRequestInTBE;
1336 spa_setPendingAcksToZeroIfPF;
1337 sc_signalCompletionIfPF;
1338 f_forwardWriteFromDma;
1339 p_popDmaRequestQueue;
1340 }
1341
1342 // Transitions out of O state
1343 transition(O, GETX, NO_B_W) {
1344 r_setMRU;
1345 v_allocateTBE;
1346 r_recordDataInTBE;
1347 sa_setAcksToOne;
1348 qf_queueMemoryFetchRequest;
1349 fb_forwardRequestBcast;
1350 cs_clearSharers;
1351 i_popIncomingRequestQueue;
1352 }
1353
1354 transition(O, GETF, NO_F_W) {
1355 r_setMRU;
1356 v_allocateTBE;
1357 r_recordDataInTBE;
1358 sa_setAcksToOne;
1359 qf_queueMemoryFetchRequest;
1360 fb_forwardRequestBcast;
1361 cs_clearSharers;
1362 i_popIncomingRequestQueue;
1363 }
1364
1365 // This transition is dumb, if a shared copy exists on-chip, then that should
1366 // provide data, not slow off-chip dram. The problem is that the current
1367 // caches don't provide data in S state
1368 transition(O, GETS, O_B_W) {
1369 r_setMRU;
1370 v_allocateTBE;
1371 r_recordDataInTBE;
1372 saa_setAcksToAllIfPF;
1373 qf_queueMemoryFetchRequest;
1374 fn_forwardRequestIfNecessary;
1375 i_popIncomingRequestQueue;
1376 }
1377
1378 transition(O, DMA_READ, O_DR_B_W) {
1379 vd_allocateDmaRequestInTBE;
1380 spa_setPendingAcksToZeroIfPF;
1381 qd_queueMemoryRequestFromDmaRead;
1382 f_forwardReadFromDma;
1383 p_popDmaRequestQueue;
1384 }
1385
1386 transition(O, Pf_Replacement, O_R) {
1387 v_allocateTBE;
1388 pa_setPendingMsgsToAll;
1389 ia_invalidateAllRequest;
1390 pfd_probeFilterDeallocate;
1391 }
1392
1393 transition(S, Pf_Replacement, S_R) {
1394 v_allocateTBE;
1395 pa_setPendingMsgsToAll;
1396 ia_invalidateAllRequest;
1397 pfd_probeFilterDeallocate;
1398 }
1399
1400 transition(NO, Pf_Replacement, NO_R) {
1401 v_allocateTBE;
1402 po_setPendingMsgsToOne;
1403 io_invalidateOwnerRequest;
1404 pfd_probeFilterDeallocate;
1405 }
1406
1407 transition(NX, Pf_Replacement, NO_R) {
1408 v_allocateTBE;
1409 pa_setPendingMsgsToAll;
1410 ia_invalidateAllRequest;
1411 pfd_probeFilterDeallocate;
1412 }
1413
1414 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1415 vd_allocateDmaRequestInTBE;
1416 f_forwardWriteFromDma;
1417 p_popDmaRequestQueue;
1418 }
1419
1420 // Transitions out of NO state
1421 transition(NX, GETX, NO_B) {
1422 r_setMRU;
1423 fb_forwardRequestBcast;
1424 cs_clearSharers;
1425 i_popIncomingRequestQueue;
1426 }
1427
1428 transition(NX, GETF, NO_F) {
1429 r_setMRU;
1430 fb_forwardRequestBcast;
1431 cs_clearSharers;
1432 i_popIncomingRequestQueue;
1433 }
1434
1435 // Transitions out of NO state
1436 transition(NO, GETX, NO_B) {
1437 r_setMRU;
1438 ano_assertNotOwner;
1439 fc_forwardRequestConditionalOwner;
1440 cs_clearSharers;
1441 i_popIncomingRequestQueue;
1442 }
1443
1444 transition(NO, GETF, NO_F) {
1445 r_setMRU;
1446 //ano_assertNotOwner;
1447 nofc_forwardRequestConditionalOwner; //forward request if the requester is not the owner
1448 cs_clearSharers;
1449 oc_sendBlockAck; // send ack if the owner
1450 i_popIncomingRequestQueue;
1451 }
1452
1453 transition(S, GETX, NO_B) {
1454 r_setMRU;
1455 fb_forwardRequestBcast;
1456 cs_clearSharers;
1457 i_popIncomingRequestQueue;
1458 }
1459
1460 transition(S, GETF, NO_F) {
1461 r_setMRU;
1462 fb_forwardRequestBcast;
1463 cs_clearSharers;
1464 i_popIncomingRequestQueue;
1465 }
1466
1467 transition(S, GETS, NO_B) {
1468 r_setMRU;
1469 ano_assertNotOwner;
1470 fb_forwardRequestBcast;
1471 i_popIncomingRequestQueue;
1472 }
1473
1474 transition(NO, GETS, NO_B) {
1475 r_setMRU;
1476 ano_assertNotOwner;
1477 ans_assertNotSharer;
1478 fc_forwardRequestConditionalOwner;
1479 i_popIncomingRequestQueue;
1480 }
1481
1482 transition(NX, GETS, NO_B) {
1483 r_setMRU;
1484 ano_assertNotOwner;
1485 fc_forwardRequestConditionalOwner;
1486 i_popIncomingRequestQueue;
1487 }
1488
1489 transition({NO, NX, S}, PUT, WB) {
1490 //
1491 // note that the PUT requestor may not be the current owner if an invalidate
1492 // raced with PUT
1493 //
1494 a_sendWriteBackAck;
1495 i_popIncomingRequestQueue;
1496 }
1497
1498 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1499 vd_allocateDmaRequestInTBE;
1500 f_forwardReadFromDma;
1501 p_popDmaRequestQueue;
1502 }
1503
1504 // Nack PUT requests when races cause us to believe we own the data
1505 transition({O, E}, PUT) {
1506 b_sendWriteBackNack;
1507 i_popIncomingRequestQueue;
1508 }
1509
1510 // Blocked transient states
1511 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1512 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1513 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1514 {GETS, GETX, GETF, PUT, Pf_Replacement}) {
1515 z_stallAndWaitRequest;
1516 }
1517
1518 transition(NO_F, {GETS, GETX, GETF, PUT, Pf_Replacement}){
1519 z_stallAndWaitRequest;
1520 }
1521
1522 transition(NO_B, {GETX, GETF}, NO_B_X) {
1523 z_stallAndWaitRequest;
1524 }
1525
1526 transition(NO_B, {PUT, Pf_Replacement}) {
1527 z_stallAndWaitRequest;
1528 }
1529
1530 transition(NO_B_S, {GETX, GETF, PUT, Pf_Replacement}) {
1531 z_stallAndWaitRequest;
1532 }
1533
1534 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1535 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1536 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1537 {DMA_READ, DMA_WRITE}) {
1538 zd_stallAndWaitDMARequest;
1539 }
1540
1541 // merge GETS into one response
1542 transition(NO_B, GETS, NO_B_S) {
1543 v_allocateTBE;
1544 rs_recordGetSRequestor;
1545 i_popIncomingRequestQueue;
1546 }
1547
1548 transition(NO_B_S, GETS) {
1549 rs_recordGetSRequestor;
1550 i_popIncomingRequestQueue;
1551 }
1552
1553 // unblock responses
1554 transition({NO_B, NO_B_X}, UnblockS, NX) {
1555 us_updateSharerIfFBD;
1556 k_wakeUpDependents;
1557 j_popIncomingUnblockQueue;
1558 }
1559
1560 transition({NO_B, NO_B_X}, UnblockM, NO) {
1561 uo_updateOwnerIfPf;
1562 us_updateSharerIfFBD;
1563 k_wakeUpDependents;
1564 j_popIncomingUnblockQueue;
1565 }
1566
1567 transition(NO_B_S, UnblockS, NO_B_S_W) {
1568 us_updateSharerIfFBD;
1569 fr_forwardMergeReadRequestsToOwner;
1570 sp_setPendingMsgsToMergedSharers;
1571 j_popIncomingUnblockQueue;
1572 }
1573
1574 transition(NO_B_S, UnblockM, NO_B_S_W) {
1575 uo_updateOwnerIfPf;
1576 fr_forwardMergeReadRequestsToOwner;
1577 sp_setPendingMsgsToMergedSharers;
1578 j_popIncomingUnblockQueue;
1579 }
1580
1581 transition(NO_B_S_W, UnblockS) {
1582 us_updateSharerIfFBD;
1583 mu_decrementNumberOfUnblocks;
1584 os_checkForMergedGetSCompletion;
1585 j_popIncomingUnblockQueue;
1586 }
1587
1588 transition(NO_B_S_W, All_Unblocks, NX) {
1589 w_deallocateTBE;
1590 k_wakeUpDependents;
1591 g_popTriggerQueue;
1592 }
1593
1594 transition(O_B, UnblockS, O) {
1595 us_updateSharerIfFBD;
1596 k_wakeUpDependents;
1597 j_popIncomingUnblockQueue;
1598 }
1599
1600 transition(O_B, UnblockM, NO) {
1601 us_updateSharerIfFBD;
1602 uo_updateOwnerIfPf;
1603 k_wakeUpDependents;
1604 j_popIncomingUnblockQueue;
1605 }
1606
1607 transition(NO_B_W, Memory_Data, NO_B) {
1608 d_sendData;
1609 w_deallocateTBE;
1610 l_popMemQueue;
1611 }
1612
1613 transition(NO_F_W, Memory_Data, NO_F) {
1614 d_sendData;
1615 w_deallocateTBE;
1616 l_popMemQueue;
1617 }
1618
1619 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1620 r_recordMemoryData;
1621 o_checkForCompletion;
1622 l_popMemQueue;
1623 }
1624
1625 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1626 r_recordMemoryData;
1627 dr_sendDmaData;
1628 o_checkForCompletion;
1629 l_popMemQueue;
1630 }
1631
1632 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1633 m_decrementNumberOfMessages;
1634 o_checkForCompletion;
1635 n_popResponseQueue;
1636 }
1637
1638 transition({O_R, S_R, NO_R}, Ack) {
1639 m_decrementNumberOfMessages;
1640 o_checkForCompletion;
1641 n_popResponseQueue;
1642 }
1643
1644 transition(S_R, Data) {
1645 wr_writeResponseDataToMemory;
1646 m_decrementNumberOfMessages;
1647 o_checkForCompletion;
1648 n_popResponseQueue;
1649 }
1650
1651 transition(NO_R, {Data, Exclusive_Data}) {
1652 wr_writeResponseDataToMemory;
1653 m_decrementNumberOfMessages;
1654 o_checkForCompletion;
1655 n_popResponseQueue;
1656 }
1657
1658 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1659 w_deallocateTBE;
1660 k_wakeUpDependents;
1661 g_popTriggerQueue;
1662 }
1663
1664 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1665 m_decrementNumberOfMessages;
1666 n_popResponseQueue;
1667 }
1668
1669 transition(NO_DR_B_W, Shared_Ack) {
1670 m_decrementNumberOfMessages;
1671 r_setSharerBit;
1672 n_popResponseQueue;
1673 }
1674
1675 transition(O_DR_B, Shared_Ack) {
1676 m_decrementNumberOfMessages;
1677 r_setSharerBit;
1678 o_checkForCompletion;
1679 n_popResponseQueue;
1680 }
1681
1682 transition(O_DR_B_W, Shared_Ack) {
1683 m_decrementNumberOfMessages;
1684 r_setSharerBit;
1685 n_popResponseQueue;
1686 }
1687
1688 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1689 m_decrementNumberOfMessages;
1690 r_setSharerBit;
1691 o_checkForCompletion;
1692 n_popResponseQueue;
1693 }
1694
1695 transition(NO_DR_B_W, Shared_Data) {
1696 r_recordCacheData;
1697 m_decrementNumberOfMessages;
1698 so_setOwnerBit;
1699 o_checkForCompletion;
1700 n_popResponseQueue;
1701 }
1702
1703 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1704 r_recordCacheData;
1705 m_decrementNumberOfMessages;
1706 so_setOwnerBit;
1707 o_checkForCompletion;
1708 n_popResponseQueue;
1709 }
1710
1711 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1712 r_recordCacheData;
1713 m_decrementNumberOfMessages;
1714 n_popResponseQueue;
1715 }
1716
1717 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1718 r_recordCacheData;
1719 m_decrementNumberOfMessages;
1720 o_checkForCompletion;
1721 n_popResponseQueue;
1722 }
1723
1724 transition(NO_DR_B, All_acks_and_owner_data, O) {
1725 //
1726 // Note that the DMA consistency model allows us to send the DMA device
1727 // a response as soon as we receive valid data and prior to receiving
1728 // all acks. However, to simplify the protocol we wait for all acks.
1729 //
1730 dt_sendDmaDataFromTbe;
1731 wdt_writeDataFromTBE;
1732 w_deallocateTBE;
1733 k_wakeUpDependents;
1734 g_popTriggerQueue;
1735 }
1736
1737 transition(NO_DR_B, All_acks_and_shared_data, S) {
1738 //
1739 // Note that the DMA consistency model allows us to send the DMA device
1740 // a response as soon as we receive valid data and prior to receiving
1741 // all acks. However, to simplify the protocol we wait for all acks.
1742 //
1743 dt_sendDmaDataFromTbe;
1744 wdt_writeDataFromTBE;
1745 w_deallocateTBE;
1746 k_wakeUpDependents;
1747 g_popTriggerQueue;
1748 }
1749
1750 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1751 //
1752 // Note that the DMA consistency model allows us to send the DMA device
1753 // a response as soon as we receive valid data and prior to receiving
1754 // all acks. However, to simplify the protocol we wait for all acks.
1755 //
1756 dt_sendDmaDataFromTbe;
1757 wdt_writeDataFromTBE;
1758 w_deallocateTBE;
1759 k_wakeUpDependents;
1760 g_popTriggerQueue;
1761 }
1762
1763 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1764 //
1765 // Note that the DMA consistency model allows us to send the DMA device
1766 // a response as soon as we receive valid data and prior to receiving
1767 // all acks. However, to simplify the protocol we wait for all acks.
1768 //
1769 dt_sendDmaDataFromTbe;
1770 wdt_writeDataFromTBE;
1771 w_deallocateTBE;
1772 k_wakeUpDependents;
1773 g_popTriggerQueue;
1774 }
1775
1776 transition(O_DR_B, All_acks_and_owner_data, O) {
1777 wdt_writeDataFromTBE;
1778 w_deallocateTBE;
1779 k_wakeUpDependents;
1780 g_popTriggerQueue;
1781 }
1782
1783 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1784 wdt_writeDataFromTBE;
1785 w_deallocateTBE;
1786 pfd_probeFilterDeallocate;
1787 k_wakeUpDependents;
1788 g_popTriggerQueue;
1789 }
1790
1791 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1792 //
1793 // Note that the DMA consistency model allows us to send the DMA device
1794 // a response as soon as we receive valid data and prior to receiving
1795 // all acks. However, to simplify the protocol we wait for all acks.
1796 //
1797 dt_sendDmaDataFromTbe;
1798 wdt_writeDataFromTBE;
1799 w_deallocateTBE;
1800 ppfd_possibleProbeFilterDeallocate;
1801 k_wakeUpDependents;
1802 g_popTriggerQueue;
1803 }
1804
1805 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1806 a_assertCacheData;
1807 //
1808 // Note that the DMA consistency model allows us to send the DMA device
1809 // a response as soon as we receive valid data and prior to receiving
1810 // all acks. However, to simplify the protocol we wait for all acks.
1811 //
1812 dt_sendDmaDataFromTbe;
1813 wdt_writeDataFromTBE;
1814 w_deallocateTBE;
1815 ppfd_possibleProbeFilterDeallocate;
1816 k_wakeUpDependents;
1817 g_popTriggerQueue;
1818 }
1819
1820 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1821 dwt_writeDmaDataFromTBE;
1822 ld_queueMemoryDmaWrite;
1823 g_popTriggerQueue;
1824 }
1825
1826 transition(NO_DW_W, Memory_Ack, E) {
1827 da_sendDmaAck;
1828 w_deallocateTBE;
1829 ppfd_possibleProbeFilterDeallocate;
1830 k_wakeUpDependents;
1831 l_popMemQueue;
1832 }
1833
1834 transition(O_B_W, Memory_Data, O_B) {
1835 d_sendData;
1836 w_deallocateTBE;
1837 l_popMemQueue;
1838 }
1839
1840 transition(NO_B_W, UnblockM, NO_W) {
1841 uo_updateOwnerIfPf;
1842 j_popIncomingUnblockQueue;
1843 }
1844
1845 transition(NO_B_W, UnblockS, NO_W) {
1846 us_updateSharerIfFBD;
1847 j_popIncomingUnblockQueue;
1848 }
1849
1850 transition(O_B_W, UnblockS, O_W) {
1851 us_updateSharerIfFBD;
1852 j_popIncomingUnblockQueue;
1853 }
1854
1855 transition(NO_W, Memory_Data, NO) {
1856 w_deallocateTBE;
1857 k_wakeUpDependents;
1858 l_popMemQueue;
1859 }
1860
1861 transition(O_W, Memory_Data, O) {
1862 w_deallocateTBE;
1863 k_wakeUpDependents;
1864 l_popMemQueue;
1865 }
1866
1867 // WB State Transistions
1868 transition(WB, Writeback_Dirty, WB_O_W) {
1869 rs_removeSharer;
1870 l_queueMemoryWBRequest;
1871 j_popIncomingUnblockQueue;
1872 }
1873
1874 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1875 rs_removeSharer;
1876 l_queueMemoryWBRequest;
1877 j_popIncomingUnblockQueue;
1878 }
1879
1880 transition(WB_E_W, Memory_Ack, E) {
1881 l_writeDataToMemory;
1882 pfd_probeFilterDeallocate;
1883 k_wakeUpDependents;
1884 l_popMemQueue;
1885 }
1886
1887 transition(WB_O_W, Memory_Ack, O) {
1888 l_writeDataToMemory;
1889 k_wakeUpDependents;
1890 l_popMemQueue;
1891 }
1892
1893 transition(WB, Writeback_Clean, O) {
1894 ll_checkIncomingWriteback;
1895 rs_removeSharer;
1896 k_wakeUpDependents;
1897 j_popIncomingUnblockQueue;
1898 }
1899
1900 transition(WB, Writeback_Exclusive_Clean, E) {
1901 ll_checkIncomingWriteback;
1902 rs_removeSharer;
1903 pfd_probeFilterDeallocate;
1904 k_wakeUpDependents;
1905 j_popIncomingUnblockQueue;
1906 }
1907
1908 transition(WB, Unblock, NX) {
1909 auno_assertUnblockerNotOwner;
1910 k_wakeUpDependents;
1911 j_popIncomingUnblockQueue;
1912 }
1913
1914 transition(NO_F, PUTF, WB) {
1915 a_sendWriteBackAck;
1916 i_popIncomingRequestQueue;
1917 }
1918
1919 //possible race between GETF and UnblockM -- not sure needed any more?
1920 transition(NO_F, UnblockM) {
1921 us_updateSharerIfFBD;
1922 uo_updateOwnerIfPf;
1923 j_popIncomingUnblockQueue;
1924 }
1925 }