Ruby: Add support for functional accesses
[gem5.git] / src / mem / protocol / MOESI_hammer-dir.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(Directory, "AMD Hammer-like protocol")
37 : DirectoryMemory * directory,
38 CacheMemory * probeFilter,
39 MemoryControl * memBuffer,
40 int memory_controller_latency = 2,
41 bool probe_filter_enabled = false,
42 bool full_bit_dir_enabled = false
43 {
44
45 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
46 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false", vnet_type="response";
47 //
48 // For a finite buffered network, note that the DMA response network only
49 // works at this relatively lower numbered (lower priority) virtual network
50 // because the trigger queue decouples cache responses from DMA responses.
51 //
52 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true", vnet_type="response";
53
54 MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false", vnet_type="unblock";
55 MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false", vnet_type="response";
56 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", vnet_type="request", recycle_latency="1";
57 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true", vnet_type="request";
58
59 // STATES
60 state_declaration(State, desc="Directory states", default="Directory_State_E") {
61 // Base states
62 NX, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in O at Owner";
63 NO, AccessPermission:Maybe_Stale, desc="Not Owner, probe filter entry exists, block in E/M at Owner";
64 S, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists pointing to the current owner";
65 O, AccessPermission:Read_Only, desc="Data clean, probe filter entry exists";
66 E, AccessPermission:Read_Write, desc="Exclusive Owner, no probe filter entry";
67
68 O_R, AccessPermission:Read_Only, desc="Was data Owner, replacing probe filter entry";
69 S_R, AccessPermission:Read_Only, desc="Was Not Owner or Sharer, replacing probe filter entry";
70 NO_R, AccessPermission:Busy, desc="Was Not Owner or Sharer, replacing probe filter entry";
71
72 NO_B, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked";
73 NO_B_X, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETX";
74 NO_B_S, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, next queued request GETS";
75 NO_B_S_W, AccessPermission:Busy, "NO^B", desc="Not Owner, Blocked, forwarded merged GETS, waiting for responses";
76 O_B, AccessPermission:Busy, "O^B", desc="Owner, Blocked";
77 NO_B_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
78 O_B_W, AccessPermission:Busy, desc="Owner, Blocked, waiting for Dram";
79 NO_W, AccessPermission:Busy, desc="Not Owner, waiting for Dram";
80 O_W, AccessPermission:Busy, desc="Owner, waiting for Dram";
81 NO_DW_B_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram and cache responses";
82 NO_DR_B_W, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for Dram and cache responses";
83 NO_DR_B_D, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses including dirty data";
84 NO_DR_B, AccessPermission:Busy, desc="Not Owner, Dma Read waiting for cache responses";
85 NO_DW_W, AccessPermission:Busy, desc="Not Owner, Dma Write waiting for Dram";
86 O_DR_B_W, AccessPermission:Busy, desc="Owner, Dma Read waiting for Dram and cache responses";
87 O_DR_B, AccessPermission:Busy, desc="Owner, Dma Read waiting for cache responses";
88 WB, AccessPermission:Busy, desc="Blocked on a writeback";
89 WB_O_W, AccessPermission:Busy, desc="Blocked on memory write, will go to O";
90 WB_E_W, AccessPermission:Busy, desc="Blocked on memory write, will go to E";
91
92 NO_F, AccessPermission:Busy, desc="Blocked on a flush";
93 NO_F_W, AccessPermission:Busy, desc="Not Owner, Blocked, waiting for Dram";
94 }
95
96 // Events
97 enumeration(Event, desc="Directory events") {
98 GETX, desc="A GETX arrives";
99 GETS, desc="A GETS arrives";
100 PUT, desc="A PUT arrives";
101 Unblock, desc="An unblock message arrives";
102 UnblockS, desc="An unblock message arrives";
103 UnblockM, desc="An unblock message arrives";
104 Writeback_Clean, desc="The final part of a PutX (no data)";
105 Writeback_Dirty, desc="The final part of a PutX (data)";
106 Writeback_Exclusive_Clean, desc="The final part of a PutX (no data, exclusive)";
107 Writeback_Exclusive_Dirty, desc="The final part of a PutX (data, exclusive)";
108
109 // Probe filter
110 Pf_Replacement, desc="probe filter replacement";
111
112 // DMA requests
113 DMA_READ, desc="A DMA Read memory request";
114 DMA_WRITE, desc="A DMA Write memory request";
115
116 // Memory Controller
117 Memory_Data, desc="Fetched data from memory arrives";
118 Memory_Ack, desc="Writeback Ack from memory arrives";
119
120 // Cache responses required to handle DMA
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Shared_Data, desc="Received a data message, responder has a shared copy";
124 Data, desc="Received a data message, responder had a owner or exclusive copy, they gave it to us";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 // Triggers
128 All_acks_and_shared_data, desc="Received shared data and message acks";
129 All_acks_and_owner_data, desc="Received shared data and message acks";
130 All_acks_and_data_no_sharers, desc="Received all acks and no other processor has a shared copy";
131 All_Unblocks, desc="Received all unblocks for a merged gets request";
132 GETF, desc="A GETF arrives";
133 PUTF, desc="A PUTF arrives";
134 }
135
136 // TYPES
137
138 // DirectoryEntry
139 structure(Entry, desc="...", interface="AbstractEntry") {
140 State DirectoryState, desc="Directory state";
141 DataBlock DataBlk, desc="data for the block";
142 }
143
144 // ProbeFilterEntry
145 structure(PfEntry, desc="...", interface="AbstractCacheEntry") {
146 State PfState, desc="Directory state";
147 MachineID Owner, desc="Owner node";
148 DataBlock DataBlk, desc="data for the block";
149 Set Sharers, desc="sharing vector for full bit directory";
150 }
151
152 // TBE entries for DMA requests
153 structure(TBE, desc="TBE entries for outstanding DMA requests") {
154 Address PhysicalAddress, desc="physical address";
155 State TBEState, desc="Transient State";
156 CoherenceResponseType ResponseType, desc="The type for the subsequent response message";
157 int Acks, default="0", desc="The number of acks that the waiting response represents";
158 int SilentAcks, default="0", desc="The number of silent acks associated with this transaction";
159 DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
160 DataBlock DataBlk, desc="The current view of system memory";
161 int Len, desc="...";
162 MachineID DmaRequestor, desc="DMA requestor";
163 NetDest GetSRequestors, desc="GETS merged requestors";
164 int NumPendingMsgs, desc="Number of pending acks/messages";
165 bool CacheDirty, default="false", desc="Indicates whether a cache has responded with dirty data";
166 bool Sharers, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
167 bool Owned, default="false", desc="Indicates whether a cache has indicated it is currently a sharer";
168 }
169
170 structure(TBETable, external="yes") {
171 TBE lookup(Address);
172 void allocate(Address);
173 void deallocate(Address);
174 bool isPresent(Address);
175 }
176
177 void set_cache_entry(AbstractCacheEntry b);
178 void unset_cache_entry();
179 void set_tbe(TBE a);
180 void unset_tbe();
181 void wakeUpBuffers(Address a);
182
183 // ** OBJECTS **
184
185 Set fwd_set;
186
187 TBETable TBEs, template_hack="<Directory_TBE>";
188
189 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
190 return static_cast(Entry, directory[addr]);
191 }
192
193 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
194 return getDirectoryEntry(addr).DataBlk;
195 }
196
197 PfEntry getProbeFilterEntry(Address addr), return_by_pointer="yes" {
198 if (probe_filter_enabled || full_bit_dir_enabled) {
199 PfEntry pfEntry := static_cast(PfEntry, "pointer", probeFilter.lookup(addr));
200 return pfEntry;
201 }
202 return OOD;
203 }
204
205 State getState(TBE tbe, PfEntry pf_entry, Address addr) {
206 if (is_valid(tbe)) {
207 return tbe.TBEState;
208 } else {
209 if (probe_filter_enabled || full_bit_dir_enabled) {
210 if (is_valid(pf_entry)) {
211 assert(pf_entry.PfState == getDirectoryEntry(addr).DirectoryState);
212 }
213 }
214 return getDirectoryEntry(addr).DirectoryState;
215 }
216 }
217
218 void setState(TBE tbe, PfEntry pf_entry, Address addr, State state) {
219 if (is_valid(tbe)) {
220 tbe.TBEState := state;
221 }
222 if (probe_filter_enabled || full_bit_dir_enabled) {
223 if (is_valid(pf_entry)) {
224 pf_entry.PfState := state;
225 }
226 if (state == State:NX || state == State:NO || state == State:S || state == State:O) {
227 assert(is_valid(pf_entry));
228 }
229 if (state == State:E) {
230 assert(is_valid(pf_entry) == false);
231 }
232 }
233 if (state == State:E || state == State:NX || state == State:NO || state == State:S ||
234 state == State:O) {
235 assert(is_valid(tbe) == false);
236 }
237 getDirectoryEntry(addr).DirectoryState := state;
238 }
239
240 AccessPermission getAccessPermission(Address addr) {
241 TBE tbe := TBEs[addr];
242 if(is_valid(tbe)) {
243 return Directory_State_to_permission(tbe.TBEState);
244 }
245
246 if(directory.isPresent(addr)) {
247 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
248 }
249
250 return AccessPermission:NotPresent;
251 }
252
253 void setAccessPermission(PfEntry pf_entry, Address addr, State state) {
254 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
255 }
256
257 Event cache_request_to_event(CoherenceRequestType type) {
258 if (type == CoherenceRequestType:GETS) {
259 return Event:GETS;
260 } else if (type == CoherenceRequestType:GETX) {
261 return Event:GETX;
262 } else if (type == CoherenceRequestType:GETF) {
263 return Event:GETF;
264 } else {
265 error("Invalid CoherenceRequestType");
266 }
267 }
268
269 MessageBuffer triggerQueue, ordered="true";
270
271 // ** OUT_PORTS **
272 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
273 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
274 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
275 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
276 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
277
278 //
279 // Memory buffer for memory controller to DIMM communication
280 //
281 out_port(memQueue_out, MemoryMsg, memBuffer);
282
283 // ** IN_PORTS **
284
285 // Trigger Queue
286 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
287 if (triggerQueue_in.isReady()) {
288 peek(triggerQueue_in, TriggerMsg) {
289 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
290 TBE tbe := TBEs[in_msg.Address];
291 if (in_msg.Type == TriggerType:ALL_ACKS) {
292 trigger(Event:All_acks_and_owner_data, in_msg.Address,
293 pf_entry, tbe);
294 } else if (in_msg.Type == TriggerType:ALL_ACKS_OWNER_EXISTS) {
295 trigger(Event:All_acks_and_shared_data, in_msg.Address,
296 pf_entry, tbe);
297 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
298 trigger(Event:All_acks_and_data_no_sharers, in_msg.Address,
299 pf_entry, tbe);
300 } else if (in_msg.Type == TriggerType:ALL_UNBLOCKS) {
301 trigger(Event:All_Unblocks, in_msg.Address,
302 pf_entry, tbe);
303 } else {
304 error("Unexpected message");
305 }
306 }
307 }
308 }
309
310 in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
311 if (unblockNetwork_in.isReady()) {
312 peek(unblockNetwork_in, ResponseMsg) {
313 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
314 TBE tbe := TBEs[in_msg.Address];
315 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
316 trigger(Event:Unblock, in_msg.Address, pf_entry, tbe);
317 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
318 trigger(Event:UnblockS, in_msg.Address, pf_entry, tbe);
319 } else if (in_msg.Type == CoherenceResponseType:UNBLOCKM) {
320 trigger(Event:UnblockM, in_msg.Address, pf_entry, tbe);
321 } else if (in_msg.Type == CoherenceResponseType:WB_CLEAN) {
322 trigger(Event:Writeback_Clean, in_msg.Address, pf_entry, tbe);
323 } else if (in_msg.Type == CoherenceResponseType:WB_DIRTY) {
324 trigger(Event:Writeback_Dirty, in_msg.Address, pf_entry, tbe);
325 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_CLEAN) {
326 trigger(Event:Writeback_Exclusive_Clean, in_msg.Address,
327 pf_entry, tbe);
328 } else if (in_msg.Type == CoherenceResponseType:WB_EXCLUSIVE_DIRTY) {
329 trigger(Event:Writeback_Exclusive_Dirty, in_msg.Address,
330 pf_entry, tbe);
331 } else {
332 error("Invalid message");
333 }
334 }
335 }
336 }
337
338 // Response Network
339 in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
340 if (responseToDir_in.isReady()) {
341 peek(responseToDir_in, ResponseMsg) {
342 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
343 TBE tbe := TBEs[in_msg.Address];
344 if (in_msg.Type == CoherenceResponseType:ACK) {
345 trigger(Event:Ack, in_msg.Address, pf_entry, tbe);
346 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
347 trigger(Event:Shared_Ack, in_msg.Address, pf_entry, tbe);
348 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
349 trigger(Event:Shared_Data, in_msg.Address, pf_entry, tbe);
350 } else if (in_msg.Type == CoherenceResponseType:DATA) {
351 trigger(Event:Data, in_msg.Address, pf_entry, tbe);
352 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
353 trigger(Event:Exclusive_Data, in_msg.Address, pf_entry, tbe);
354 } else {
355 error("Unexpected message");
356 }
357 }
358 }
359 }
360
361 // off-chip memory request/response is done
362 in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
363 if (memQueue_in.isReady()) {
364 peek(memQueue_in, MemoryMsg) {
365 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
366 TBE tbe := TBEs[in_msg.Address];
367 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
368 trigger(Event:Memory_Data, in_msg.Address, pf_entry, tbe);
369 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
370 trigger(Event:Memory_Ack, in_msg.Address, pf_entry, tbe);
371 } else {
372 DPRINTF(RubySlicc, "%d\n", in_msg.Type);
373 error("Invalid message");
374 }
375 }
376 }
377 }
378
379 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
380 if (requestQueue_in.isReady()) {
381 peek(requestQueue_in, RequestMsg) {
382 PfEntry pf_entry := getProbeFilterEntry(in_msg.Address);
383 TBE tbe := TBEs[in_msg.Address];
384 if (in_msg.Type == CoherenceRequestType:PUT) {
385 trigger(Event:PUT, in_msg.Address, pf_entry, tbe);
386 } else if (in_msg.Type == CoherenceRequestType:PUTF) {
387 trigger(Event:PUTF, in_msg.Address, pf_entry, tbe);
388 } else {
389 if (probe_filter_enabled || full_bit_dir_enabled) {
390 if (is_valid(pf_entry)) {
391 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
392 pf_entry, tbe);
393 } else {
394 if (probeFilter.cacheAvail(in_msg.Address)) {
395 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
396 pf_entry, tbe);
397 } else {
398 trigger(Event:Pf_Replacement,
399 probeFilter.cacheProbe(in_msg.Address),
400 getProbeFilterEntry(probeFilter.cacheProbe(in_msg.Address)),
401 TBEs[probeFilter.cacheProbe(in_msg.Address)]);
402 }
403 }
404 } else {
405 trigger(cache_request_to_event(in_msg.Type), in_msg.Address,
406 pf_entry, tbe);
407 }
408 }
409 }
410 }
411 }
412
413 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
414 if (dmaRequestQueue_in.isReady()) {
415 peek(dmaRequestQueue_in, DMARequestMsg) {
416 PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
417 TBE tbe := TBEs[in_msg.LineAddress];
418 if (in_msg.Type == DMARequestType:READ) {
419 trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
420 } else if (in_msg.Type == DMARequestType:WRITE) {
421 trigger(Event:DMA_WRITE, in_msg.LineAddress, pf_entry, tbe);
422 } else {
423 error("Invalid message");
424 }
425 }
426 }
427 }
428
429 // Actions
430
431 action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
432 if (probe_filter_enabled || full_bit_dir_enabled) {
433 assert(is_valid(cache_entry));
434 probeFilter.setMRU(address);
435 }
436 }
437
438 action(auno_assertUnblockerNotOwner, "auno", desc="assert unblocker not owner") {
439 if (probe_filter_enabled || full_bit_dir_enabled) {
440 assert(is_valid(cache_entry));
441 peek(unblockNetwork_in, ResponseMsg) {
442 assert(cache_entry.Owner != in_msg.Sender);
443 if (full_bit_dir_enabled) {
444 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)) == false);
445 }
446 }
447 }
448 }
449
450 action(uo_updateOwnerIfPf, "uo", desc="update owner") {
451 if (probe_filter_enabled || full_bit_dir_enabled) {
452 assert(is_valid(cache_entry));
453 peek(unblockNetwork_in, ResponseMsg) {
454 cache_entry.Owner := in_msg.Sender;
455 if (full_bit_dir_enabled) {
456 cache_entry.Sharers.clear();
457 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
458 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
459 DPRINTF(RubySlicc, "Sharers = %d\n", cache_entry.Sharers);
460 }
461 }
462 }
463 }
464
465 action(us_updateSharerIfFBD, "us", desc="update sharer if full-bit directory") {
466 if (full_bit_dir_enabled) {
467 assert(probeFilter.isTagPresent(address));
468 peek(unblockNetwork_in, ResponseMsg) {
469 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Sender));
470 }
471 }
472 }
473
474 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
475 peek(requestQueue_in, RequestMsg) {
476 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
477 out_msg.Address := address;
478 out_msg.Type := CoherenceRequestType:WB_ACK;
479 out_msg.Requestor := in_msg.Requestor;
480 out_msg.Destination.add(in_msg.Requestor);
481 out_msg.MessageSize := MessageSizeType:Writeback_Control;
482 }
483 }
484 }
485
486 action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
487 peek(requestQueue_in, RequestMsg) {
488 if ((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) {
489 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
490 out_msg.Address := address;
491 out_msg.Type := CoherenceRequestType:BLOCK_ACK;
492 out_msg.Requestor := in_msg.Requestor;
493 out_msg.Destination.add(in_msg.Requestor);
494 out_msg.MessageSize := MessageSizeType:Writeback_Control;
495 }
496 }
497 }
498 }
499
500 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
501 peek(requestQueue_in, RequestMsg) {
502 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
503 out_msg.Address := address;
504 out_msg.Type := CoherenceRequestType:WB_NACK;
505 out_msg.Requestor := in_msg.Requestor;
506 out_msg.Destination.add(in_msg.Requestor);
507 out_msg.MessageSize := MessageSizeType:Writeback_Control;
508 }
509 }
510 }
511
512 action(pfa_probeFilterAllocate, "pfa", desc="Allocate ProbeFilterEntry") {
513 if (probe_filter_enabled || full_bit_dir_enabled) {
514 peek(requestQueue_in, RequestMsg) {
515 set_cache_entry(probeFilter.allocate(address, new PfEntry));
516 cache_entry.Owner := in_msg.Requestor;
517 cache_entry.Sharers.setSize(machineCount(MachineType:L1Cache));
518 }
519 }
520 }
521
522 action(pfd_probeFilterDeallocate, "pfd", desc="Deallocate ProbeFilterEntry") {
523 if (probe_filter_enabled || full_bit_dir_enabled) {
524 probeFilter.deallocate(address);
525 unset_cache_entry();
526 }
527 }
528
529 action(ppfd_possibleProbeFilterDeallocate, "ppfd", desc="Deallocate ProbeFilterEntry") {
530 if ((probe_filter_enabled || full_bit_dir_enabled) && is_valid(cache_entry)) {
531 probeFilter.deallocate(address);
532 unset_cache_entry();
533 }
534 }
535
536 action(v_allocateTBE, "v", desc="Allocate TBE") {
537 peek(requestQueue_in, RequestMsg) {
538 TBEs.allocate(address);
539 set_tbe(TBEs[address]);
540 tbe.PhysicalAddress := address;
541 tbe.ResponseType := CoherenceResponseType:NULL;
542 }
543 }
544
545 action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
546 peek(dmaRequestQueue_in, DMARequestMsg) {
547 TBEs.allocate(address);
548 set_tbe(TBEs[address]);
549 tbe.DmaDataBlk := in_msg.DataBlk;
550 tbe.PhysicalAddress := in_msg.PhysicalAddress;
551 tbe.Len := in_msg.Len;
552 tbe.DmaRequestor := in_msg.Requestor;
553 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
554 //
555 // One ack for each last-level cache
556 //
557 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
558 //
559 // Assume initially that the caches store a clean copy and that memory
560 // will provide the data
561 //
562 tbe.CacheDirty := false;
563 }
564 }
565
566 action(pa_setPendingMsgsToAll, "pa", desc="set pending msgs to all") {
567 assert(is_valid(tbe));
568 if (full_bit_dir_enabled) {
569 assert(is_valid(cache_entry));
570 tbe.NumPendingMsgs := cache_entry.Sharers.count();
571 } else {
572 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
573 }
574 }
575
576 action(po_setPendingMsgsToOne, "po", desc="set pending msgs to one") {
577 assert(is_valid(tbe));
578 tbe.NumPendingMsgs := 1;
579 }
580
581 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
582 TBEs.deallocate(address);
583 unset_tbe();
584 }
585
586 action(sa_setAcksToOne, "sa", desc="Forwarded request, set the ack amount to one") {
587 assert(is_valid(tbe));
588 peek(requestQueue_in, RequestMsg) {
589 if (full_bit_dir_enabled) {
590 assert(is_valid(cache_entry));
591 //
592 // If we are using the full-bit directory and no sharers exists beyond
593 // the requestor, then we must set the ack number to all, not one
594 //
595 fwd_set := cache_entry.Sharers;
596 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
597 if (fwd_set.count() > 0) {
598 tbe.Acks := 1;
599 tbe.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
600 tbe.SilentAcks := tbe.SilentAcks - 1;
601 } else {
602 tbe.Acks := machineCount(MachineType:L1Cache);
603 tbe.SilentAcks := 0;
604 }
605 } else {
606 tbe.Acks := 1;
607 }
608 }
609 }
610
611 action(saa_setAcksToAllIfPF, "saa", desc="Non-forwarded request, set the ack amount to all") {
612 assert(is_valid(tbe));
613 if (probe_filter_enabled || full_bit_dir_enabled) {
614 tbe.Acks := machineCount(MachineType:L1Cache);
615 tbe.SilentAcks := 0;
616 } else {
617 tbe.Acks := 1;
618 }
619 }
620
621 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
622 peek(responseToDir_in, ResponseMsg) {
623 assert(is_valid(tbe));
624 assert(in_msg.Acks > 0);
625 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
626 //
627 // Note that cache data responses will have an ack count of 2. However,
628 // directory DMA requests must wait for acks from all LLC caches, so
629 // only decrement by 1.
630 //
631 if ((in_msg.Type == CoherenceResponseType:DATA_SHARED) ||
632 (in_msg.Type == CoherenceResponseType:DATA) ||
633 (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE)) {
634 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
635 } else {
636 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
637 }
638 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
639 }
640 }
641
642 action(mu_decrementNumberOfUnblocks, "mu", desc="Decrement the number of messages for which we're waiting") {
643 peek(unblockNetwork_in, ResponseMsg) {
644 assert(is_valid(tbe));
645 assert(in_msg.Type == CoherenceResponseType:UNBLOCKS);
646 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
647 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
648 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
649 }
650 }
651
652 action(n_popResponseQueue, "n", desc="Pop response queue") {
653 responseToDir_in.dequeue();
654 }
655
656 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
657 assert(is_valid(tbe));
658 if (tbe.NumPendingMsgs == 0) {
659 enqueue(triggerQueue_out, TriggerMsg) {
660 out_msg.Address := address;
661 if (tbe.Sharers) {
662 if (tbe.Owned) {
663 out_msg.Type := TriggerType:ALL_ACKS_OWNER_EXISTS;
664 } else {
665 out_msg.Type := TriggerType:ALL_ACKS;
666 }
667 } else {
668 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
669 }
670 }
671 }
672 }
673
674 action(os_checkForMergedGetSCompletion, "os", desc="Check for merged GETS completion") {
675 assert(is_valid(tbe));
676 if (tbe.NumPendingMsgs == 0) {
677 enqueue(triggerQueue_out, TriggerMsg) {
678 out_msg.Address := address;
679 out_msg.Type := TriggerType:ALL_UNBLOCKS;
680 }
681 }
682 }
683
684 action(sp_setPendingMsgsToMergedSharers, "sp", desc="Set pending messages to waiting sharers") {
685 assert(is_valid(tbe));
686 tbe.NumPendingMsgs := tbe.GetSRequestors.count();
687 }
688
689 action(spa_setPendingAcksToZeroIfPF, "spa", desc="if probe filter, no need to wait for acks") {
690 if (probe_filter_enabled || full_bit_dir_enabled) {
691 assert(is_valid(tbe));
692 tbe.NumPendingMsgs := 0;
693 }
694 }
695
696 action(sc_signalCompletionIfPF, "sc", desc="indicate that we should skip waiting for cpu acks") {
697 assert(is_valid(tbe));
698 if (tbe.NumPendingMsgs == 0) {
699 assert(probe_filter_enabled || full_bit_dir_enabled);
700 enqueue(triggerQueue_out, TriggerMsg) {
701 out_msg.Address := address;
702 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
703 }
704 }
705 }
706
707 action(d_sendData, "d", desc="Send data to requestor") {
708 peek(memQueue_in, MemoryMsg) {
709 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
710 assert(is_valid(tbe));
711 out_msg.Address := address;
712 out_msg.Type := tbe.ResponseType;
713 out_msg.Sender := machineID;
714 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
715 out_msg.DataBlk := in_msg.DataBlk;
716 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
717 out_msg.Dirty := false; // By definition, the block is now clean
718 out_msg.Acks := tbe.Acks;
719 out_msg.SilentAcks := tbe.SilentAcks;
720 DPRINTF(RubySlicc, "%d\n", out_msg.Acks);
721 assert(out_msg.Acks > 0);
722 out_msg.MessageSize := MessageSizeType:Response_Data;
723 }
724 }
725 }
726
727 action(dr_sendDmaData, "dr", desc="Send Data to DMA controller from memory") {
728 peek(memQueue_in, MemoryMsg) {
729 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
730 assert(is_valid(tbe));
731 out_msg.PhysicalAddress := address;
732 out_msg.LineAddress := address;
733 out_msg.Type := DMAResponseType:DATA;
734 //
735 // we send the entire data block and rely on the dma controller to
736 // split it up if need be
737 //
738 out_msg.DataBlk := in_msg.DataBlk;
739 out_msg.Destination.add(tbe.DmaRequestor);
740 out_msg.MessageSize := MessageSizeType:Response_Data;
741 }
742 }
743 }
744
745 action(dt_sendDmaDataFromTbe, "dt", desc="Send Data to DMA controller from tbe") {
746 peek(triggerQueue_in, TriggerMsg) {
747 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
748 assert(is_valid(tbe));
749 out_msg.PhysicalAddress := address;
750 out_msg.LineAddress := address;
751 out_msg.Type := DMAResponseType:DATA;
752 //
753 // we send the entire data block and rely on the dma controller to
754 // split it up if need be
755 //
756 out_msg.DataBlk := tbe.DataBlk;
757 out_msg.Destination.add(tbe.DmaRequestor);
758 out_msg.MessageSize := MessageSizeType:Response_Data;
759 }
760 }
761 }
762
763 action(da_sendDmaAck, "da", desc="Send Ack to DMA controller") {
764 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
765 assert(is_valid(tbe));
766 out_msg.PhysicalAddress := address;
767 out_msg.LineAddress := address;
768 out_msg.Type := DMAResponseType:ACK;
769 out_msg.Destination.add(tbe.DmaRequestor);
770 out_msg.MessageSize := MessageSizeType:Writeback_Control;
771 }
772 }
773
774 action(rx_recordExclusiveInTBE, "rx", desc="Record Exclusive in TBE") {
775 peek(requestQueue_in, RequestMsg) {
776 assert(is_valid(tbe));
777 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
778 }
779 }
780
781 action(r_recordDataInTBE, "rt", desc="Record Data in TBE") {
782 peek(requestQueue_in, RequestMsg) {
783 assert(is_valid(tbe));
784 if (full_bit_dir_enabled) {
785 fwd_set := cache_entry.Sharers;
786 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
787 if (fwd_set.count() > 0) {
788 tbe.ResponseType := CoherenceResponseType:DATA;
789 } else {
790 tbe.ResponseType := CoherenceResponseType:DATA_EXCLUSIVE;
791 }
792 } else {
793 tbe.ResponseType := CoherenceResponseType:DATA;
794 }
795 }
796 }
797
798 action(rs_recordGetSRequestor, "rs", desc="Record GETS requestor in TBE") {
799 peek(requestQueue_in, RequestMsg) {
800 assert(is_valid(tbe));
801 tbe.GetSRequestors.add(in_msg.Requestor);
802 }
803 }
804
805 action(r_setSharerBit, "r", desc="We saw other sharers") {
806 assert(is_valid(tbe));
807 tbe.Sharers := true;
808 }
809
810 action(so_setOwnerBit, "so", desc="We saw other sharers") {
811 assert(is_valid(tbe));
812 tbe.Sharers := true;
813 tbe.Owned := true;
814 }
815
816 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
817 peek(requestQueue_in, RequestMsg) {
818 enqueue(memQueue_out, MemoryMsg, latency="1") {
819 out_msg.Address := address;
820 out_msg.Type := MemoryRequestType:MEMORY_READ;
821 out_msg.Sender := machineID;
822 out_msg.OriginalRequestorMachId := in_msg.Requestor;
823 out_msg.MessageSize := in_msg.MessageSize;
824 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
825 DPRINTF(RubySlicc, "%s\n", out_msg);
826 }
827 }
828 }
829
830 action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
831 peek(dmaRequestQueue_in, DMARequestMsg) {
832 enqueue(memQueue_out, MemoryMsg, latency="1") {
833 out_msg.Address := address;
834 out_msg.Type := MemoryRequestType:MEMORY_READ;
835 out_msg.Sender := machineID;
836 out_msg.OriginalRequestorMachId := in_msg.Requestor;
837 out_msg.MessageSize := in_msg.MessageSize;
838 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
839 DPRINTF(RubySlicc, "%s\n", out_msg);
840 }
841 }
842 }
843
844 action(fn_forwardRequestIfNecessary, "fn", desc="Forward requests if necessary") {
845 assert(is_valid(tbe));
846 if ((machineCount(MachineType:L1Cache) > 1) && (tbe.Acks <= 1)) {
847 if (full_bit_dir_enabled) {
848 assert(is_valid(cache_entry));
849 peek(requestQueue_in, RequestMsg) {
850 fwd_set := cache_entry.Sharers;
851 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
852 if (fwd_set.count() > 0) {
853 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
854 out_msg.Address := address;
855 out_msg.Type := in_msg.Type;
856 out_msg.Requestor := in_msg.Requestor;
857 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
858 out_msg.MessageSize := MessageSizeType:Multicast_Control;
859 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
860 out_msg.ForwardRequestTime := get_time();
861 assert(tbe.SilentAcks > 0);
862 out_msg.SilentAcks := tbe.SilentAcks;
863 }
864 }
865 }
866 } else {
867 peek(requestQueue_in, RequestMsg) {
868 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
869 out_msg.Address := address;
870 out_msg.Type := in_msg.Type;
871 out_msg.Requestor := in_msg.Requestor;
872 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
873 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
874 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
875 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
876 out_msg.ForwardRequestTime := get_time();
877 }
878 }
879 }
880 }
881 }
882
883 action(ia_invalidateAllRequest, "ia", desc="invalidate all copies") {
884 if (machineCount(MachineType:L1Cache) > 1) {
885 if (full_bit_dir_enabled) {
886 assert(cache_entry.Sharers.count() > 0);
887 peek(requestQueue_in, RequestMsg) {
888 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
889 out_msg.Address := address;
890 out_msg.Type := CoherenceRequestType:INV;
891 out_msg.Requestor := machineID;
892 out_msg.Destination.setNetDest(MachineType:L1Cache, cache_entry.Sharers);
893 out_msg.MessageSize := MessageSizeType:Multicast_Control;
894 }
895 }
896 } else {
897 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
898 out_msg.Address := address;
899 out_msg.Type := CoherenceRequestType:INV;
900 out_msg.Requestor := machineID;
901 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
902 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
903 }
904 }
905 }
906 }
907
908 action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
909 if (machineCount(MachineType:L1Cache) > 1) {
910 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
911 assert(is_valid(cache_entry));
912 out_msg.Address := address;
913 out_msg.Type := CoherenceRequestType:INV;
914 out_msg.Requestor := machineID;
915 out_msg.Destination.add(cache_entry.Owner);
916 out_msg.MessageSize := MessageSizeType:Request_Control;
917 out_msg.DirectedProbe := true;
918 }
919 }
920 }
921
922 action(fb_forwardRequestBcast, "fb", desc="Forward requests to all nodes") {
923 if (machineCount(MachineType:L1Cache) > 1) {
924 peek(requestQueue_in, RequestMsg) {
925 if (full_bit_dir_enabled) {
926 fwd_set := cache_entry.Sharers;
927 fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
928 if (fwd_set.count() > 0) {
929 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
930 out_msg.Address := address;
931 out_msg.Type := in_msg.Type;
932 out_msg.Requestor := in_msg.Requestor;
933 out_msg.Destination.setNetDest(MachineType:L1Cache, fwd_set);
934 out_msg.MessageSize := MessageSizeType:Multicast_Control;
935 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
936 out_msg.ForwardRequestTime := get_time();
937 out_msg.SilentAcks := machineCount(MachineType:L1Cache) - fwd_set.count();
938 out_msg.SilentAcks := out_msg.SilentAcks - 1;
939 }
940 }
941 } else {
942 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
943 out_msg.Address := address;
944 out_msg.Type := in_msg.Type;
945 out_msg.Requestor := in_msg.Requestor;
946 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
947 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
948 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
949 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
950 out_msg.ForwardRequestTime := get_time();
951 }
952 }
953 }
954 }
955 }
956
957 action(fr_forwardMergeReadRequestsToOwner, "frr", desc="Forward coalesced read request to owner") {
958 assert(machineCount(MachineType:L1Cache) > 1);
959 //
960 // Fixme! The unblock network should not stall on the forward network. Add a trigger queue to
961 // decouple the two.
962 //
963 peek(unblockNetwork_in, ResponseMsg) {
964 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
965 assert(is_valid(tbe));
966 out_msg.Address := address;
967 out_msg.Type := CoherenceRequestType:MERGED_GETS;
968 out_msg.MergedRequestors := tbe.GetSRequestors;
969 if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
970 out_msg.Destination.add(in_msg.CurOwner);
971 } else {
972 out_msg.Destination.add(in_msg.Sender);
973 }
974 out_msg.MessageSize := MessageSizeType:Request_Control;
975 out_msg.InitialRequestTime := zero_time();
976 out_msg.ForwardRequestTime := get_time();
977 }
978 }
979 }
980
981 action(fc_forwardRequestConditionalOwner, "fc", desc="Forward request to one or more nodes") {
982 assert(machineCount(MachineType:L1Cache) > 1);
983 if (probe_filter_enabled || full_bit_dir_enabled) {
984 peek(requestQueue_in, RequestMsg) {
985 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
986 assert(is_valid(cache_entry));
987 out_msg.Address := address;
988 out_msg.Type := in_msg.Type;
989 out_msg.Requestor := in_msg.Requestor;
990 out_msg.Destination.add(cache_entry.Owner);
991 out_msg.MessageSize := MessageSizeType:Request_Control;
992 out_msg.DirectedProbe := true;
993 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
994 out_msg.ForwardRequestTime := get_time();
995 }
996 }
997 } else {
998 peek(requestQueue_in, RequestMsg) {
999 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1000 out_msg.Address := address;
1001 out_msg.Type := in_msg.Type;
1002 out_msg.Requestor := in_msg.Requestor;
1003 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1004 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1005 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1006 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1007 out_msg.ForwardRequestTime := get_time();
1008 }
1009 }
1010 }
1011 }
1012
1013 action(nofc_forwardRequestConditionalOwner, "nofc", desc="Forward request to one or more nodes if the requestor is not the owner") {
1014 assert(machineCount(MachineType:L1Cache) > 1);
1015
1016 if (probe_filter_enabled || full_bit_dir_enabled) {
1017 peek(requestQueue_in, RequestMsg) {
1018 if (in_msg.Requestor != cache_entry.Owner) {
1019 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1020 assert(is_valid(cache_entry));
1021 out_msg.Address := address;
1022 out_msg.Type := in_msg.Type;
1023 out_msg.Requestor := in_msg.Requestor;
1024 out_msg.Destination.add(cache_entry.Owner);
1025 out_msg.MessageSize := MessageSizeType:Request_Control;
1026 out_msg.DirectedProbe := true;
1027 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1028 out_msg.ForwardRequestTime := get_time();
1029 }
1030 }
1031 }
1032 } else {
1033 peek(requestQueue_in, RequestMsg) {
1034 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1035 out_msg.Address := address;
1036 out_msg.Type := in_msg.Type;
1037 out_msg.Requestor := in_msg.Requestor;
1038 out_msg.Destination.broadcast(MachineType:L1Cache); // Send to all L1 caches
1039 out_msg.Destination.remove(in_msg.Requestor); // Don't include the original requestor
1040 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1041 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1042 out_msg.ForwardRequestTime := get_time();
1043 }
1044 }
1045 }
1046
1047 }
1048
1049 action(f_forwardWriteFromDma, "fw", desc="Forward requests") {
1050 assert(is_valid(tbe));
1051 if (tbe.NumPendingMsgs > 0) {
1052 peek(dmaRequestQueue_in, DMARequestMsg) {
1053 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1054 out_msg.Address := address;
1055 out_msg.Type := CoherenceRequestType:GETX;
1056 //
1057 // Send to all L1 caches, since the requestor is the memory controller
1058 // itself
1059 //
1060 out_msg.Requestor := machineID;
1061 out_msg.Destination.broadcast(MachineType:L1Cache);
1062 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1063 }
1064 }
1065 }
1066 }
1067
1068 action(f_forwardReadFromDma, "fr", desc="Forward requests") {
1069 assert(is_valid(tbe));
1070 if (tbe.NumPendingMsgs > 0) {
1071 peek(dmaRequestQueue_in, DMARequestMsg) {
1072 enqueue(forwardNetwork_out, RequestMsg, latency=memory_controller_latency) {
1073 out_msg.Address := address;
1074 out_msg.Type := CoherenceRequestType:GETS;
1075 //
1076 // Send to all L1 caches, since the requestor is the memory controller
1077 // itself
1078 //
1079 out_msg.Requestor := machineID;
1080 out_msg.Destination.broadcast(MachineType:L1Cache);
1081 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
1082 }
1083 }
1084 }
1085 }
1086
1087 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1088 requestQueue_in.dequeue();
1089 }
1090
1091 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1092 peek(unblockNetwork_in, ResponseMsg) {
1093 APPEND_TRANSITION_COMMENT(in_msg.Sender);
1094 }
1095 unblockNetwork_in.dequeue();
1096 }
1097
1098 action(k_wakeUpDependents, "k", desc="wake-up dependents") {
1099 wakeUpBuffers(address);
1100 }
1101
1102 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
1103 memQueue_in.dequeue();
1104 }
1105
1106 action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
1107 triggerQueue_in.dequeue();
1108 }
1109
1110 action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
1111 dmaRequestQueue_in.dequeue();
1112 }
1113
1114 action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
1115 peek(dmaRequestQueue_in, DMARequestMsg) {
1116 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1117 }
1118 stall_and_wait(dmaRequestQueue_in, address);
1119 }
1120
1121 action(r_recordMemoryData, "rd", desc="record data from memory to TBE") {
1122 peek(memQueue_in, MemoryMsg) {
1123 assert(is_valid(tbe));
1124 if (tbe.CacheDirty == false) {
1125 tbe.DataBlk := in_msg.DataBlk;
1126 }
1127 }
1128 }
1129
1130 action(r_recordCacheData, "rc", desc="record data from cache response to TBE") {
1131 peek(responseToDir_in, ResponseMsg) {
1132 assert(is_valid(tbe));
1133 tbe.CacheDirty := true;
1134 tbe.DataBlk := in_msg.DataBlk;
1135 }
1136 }
1137
1138 action(wr_writeResponseDataToMemory, "wr", desc="Write response data to memory") {
1139 peek(responseToDir_in, ResponseMsg) {
1140 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1141 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1142 in_msg.Address, in_msg.DataBlk);
1143 }
1144 }
1145
1146 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
1147 peek(unblockNetwork_in, ResponseMsg) {
1148 assert(in_msg.Dirty);
1149 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
1150 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1151 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
1152 in_msg.Address, in_msg.DataBlk);
1153 }
1154 }
1155
1156 action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
1157 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1158 assert(is_valid(tbe));
1159 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1160 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1161 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1162 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1163 }
1164
1165 action(wdt_writeDataFromTBE, "wdt", desc="DMA Write data to memory from TBE") {
1166 assert(is_valid(tbe));
1167 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1168 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1169 DPRINTF(RubySlicc, "%s\n", getDirectoryEntry(address).DataBlk);
1170 }
1171
1172 action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
1173 assert(is_valid(tbe));
1174 assert(tbe.CacheDirty);
1175 }
1176
1177 action(ano_assertNotOwner, "ano", desc="Assert that request is not current owner") {
1178 if (probe_filter_enabled || full_bit_dir_enabled) {
1179 peek(requestQueue_in, RequestMsg) {
1180 assert(is_valid(cache_entry));
1181 assert(cache_entry.Owner != in_msg.Requestor);
1182 }
1183 }
1184 }
1185
1186 action(ans_assertNotSharer, "ans", desc="Assert that request is not a current sharer") {
1187 if (full_bit_dir_enabled) {
1188 peek(requestQueue_in, RequestMsg) {
1189 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Requestor)) == false);
1190 }
1191 }
1192 }
1193
1194 action(rs_removeSharer, "s", desc="remove current sharer") {
1195 if (full_bit_dir_enabled) {
1196 peek(unblockNetwork_in, ResponseMsg) {
1197 assert(cache_entry.Sharers.isElement(machineIDToNodeID(in_msg.Sender)));
1198 cache_entry.Sharers.remove(machineIDToNodeID(in_msg.Sender));
1199 }
1200 }
1201 }
1202
1203 action(cs_clearSharers, "cs", desc="clear current sharers") {
1204 if (full_bit_dir_enabled) {
1205 peek(requestQueue_in, RequestMsg) {
1206 cache_entry.Sharers.clear();
1207 cache_entry.Sharers.add(machineIDToNodeID(in_msg.Requestor));
1208 }
1209 }
1210 }
1211
1212 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
1213 peek(unblockNetwork_in, ResponseMsg) {
1214 enqueue(memQueue_out, MemoryMsg, latency="1") {
1215 out_msg.Address := address;
1216 out_msg.Type := MemoryRequestType:MEMORY_WB;
1217 DPRINTF(RubySlicc, "%s\n", out_msg);
1218 }
1219 }
1220 }
1221
1222 action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
1223 enqueue(memQueue_out, MemoryMsg, latency="1") {
1224 assert(is_valid(tbe));
1225 out_msg.Address := address;
1226 out_msg.Type := MemoryRequestType:MEMORY_WB;
1227 // first, initialize the data blk to the current version of system memory
1228 out_msg.DataBlk := tbe.DataBlk;
1229 // then add the dma write data
1230 out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
1231 DPRINTF(RubySlicc, "%s\n", out_msg);
1232 }
1233 }
1234
1235 action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
1236 peek(unblockNetwork_in, ResponseMsg) {
1237 assert(in_msg.Dirty == false);
1238 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
1239
1240 // NOTE: The following check would not be valid in a real
1241 // implementation. We include the data in the "dataless"
1242 // message so we can assert the clean data matches the datablock
1243 // in memory
1244 assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
1245 }
1246 }
1247
1248 action(z_stallAndWaitRequest, "z", desc="Recycle the request queue") {
1249 peek(requestQueue_in, RequestMsg) {
1250 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
1251 }
1252 stall_and_wait(requestQueue_in, address);
1253 }
1254
1255 // TRANSITIONS
1256
1257 // Transitions out of E state
1258 transition(E, GETX, NO_B_W) {
1259 pfa_probeFilterAllocate;
1260 v_allocateTBE;
1261 rx_recordExclusiveInTBE;
1262 saa_setAcksToAllIfPF;
1263 qf_queueMemoryFetchRequest;
1264 fn_forwardRequestIfNecessary;
1265 i_popIncomingRequestQueue;
1266 }
1267
1268 transition(E, GETF, NO_F_W) {
1269 pfa_probeFilterAllocate;
1270 v_allocateTBE;
1271 rx_recordExclusiveInTBE;
1272 saa_setAcksToAllIfPF;
1273 qf_queueMemoryFetchRequest;
1274 fn_forwardRequestIfNecessary;
1275 i_popIncomingRequestQueue;
1276 }
1277
1278 transition(E, GETS, NO_B_W) {
1279 pfa_probeFilterAllocate;
1280 v_allocateTBE;
1281 rx_recordExclusiveInTBE;
1282 saa_setAcksToAllIfPF;
1283 qf_queueMemoryFetchRequest;
1284 fn_forwardRequestIfNecessary;
1285 i_popIncomingRequestQueue;
1286 }
1287
1288 transition(E, DMA_READ, NO_DR_B_W) {
1289 vd_allocateDmaRequestInTBE;
1290 qd_queueMemoryRequestFromDmaRead;
1291 spa_setPendingAcksToZeroIfPF;
1292 f_forwardReadFromDma;
1293 p_popDmaRequestQueue;
1294 }
1295
1296 transition(E, DMA_WRITE, NO_DW_B_W) {
1297 vd_allocateDmaRequestInTBE;
1298 spa_setPendingAcksToZeroIfPF;
1299 sc_signalCompletionIfPF;
1300 f_forwardWriteFromDma;
1301 p_popDmaRequestQueue;
1302 }
1303
1304 // Transitions out of O state
1305 transition(O, GETX, NO_B_W) {
1306 r_setMRU;
1307 v_allocateTBE;
1308 r_recordDataInTBE;
1309 sa_setAcksToOne;
1310 qf_queueMemoryFetchRequest;
1311 fb_forwardRequestBcast;
1312 cs_clearSharers;
1313 i_popIncomingRequestQueue;
1314 }
1315
1316 transition(O, GETF, NO_F_W) {
1317 r_setMRU;
1318 v_allocateTBE;
1319 r_recordDataInTBE;
1320 sa_setAcksToOne;
1321 qf_queueMemoryFetchRequest;
1322 fb_forwardRequestBcast;
1323 cs_clearSharers;
1324 i_popIncomingRequestQueue;
1325 }
1326
1327 // This transition is dumb, if a shared copy exists on-chip, then that should
1328 // provide data, not slow off-chip dram. The problem is that the current
1329 // caches don't provide data in S state
1330 transition(O, GETS, O_B_W) {
1331 r_setMRU;
1332 v_allocateTBE;
1333 r_recordDataInTBE;
1334 saa_setAcksToAllIfPF;
1335 qf_queueMemoryFetchRequest;
1336 fn_forwardRequestIfNecessary;
1337 i_popIncomingRequestQueue;
1338 }
1339
1340 transition(O, DMA_READ, O_DR_B_W) {
1341 vd_allocateDmaRequestInTBE;
1342 spa_setPendingAcksToZeroIfPF;
1343 qd_queueMemoryRequestFromDmaRead;
1344 f_forwardReadFromDma;
1345 p_popDmaRequestQueue;
1346 }
1347
1348 transition(O, Pf_Replacement, O_R) {
1349 v_allocateTBE;
1350 pa_setPendingMsgsToAll;
1351 ia_invalidateAllRequest;
1352 pfd_probeFilterDeallocate;
1353 }
1354
1355 transition(S, Pf_Replacement, S_R) {
1356 v_allocateTBE;
1357 pa_setPendingMsgsToAll;
1358 ia_invalidateAllRequest;
1359 pfd_probeFilterDeallocate;
1360 }
1361
1362 transition(NO, Pf_Replacement, NO_R) {
1363 v_allocateTBE;
1364 po_setPendingMsgsToOne;
1365 io_invalidateOwnerRequest;
1366 pfd_probeFilterDeallocate;
1367 }
1368
1369 transition(NX, Pf_Replacement, NO_R) {
1370 v_allocateTBE;
1371 pa_setPendingMsgsToAll;
1372 ia_invalidateAllRequest;
1373 pfd_probeFilterDeallocate;
1374 }
1375
1376 transition({O, S, NO, NX}, DMA_WRITE, NO_DW_B_W) {
1377 vd_allocateDmaRequestInTBE;
1378 f_forwardWriteFromDma;
1379 p_popDmaRequestQueue;
1380 }
1381
1382 // Transitions out of NO state
1383 transition(NX, GETX, NO_B) {
1384 r_setMRU;
1385 fb_forwardRequestBcast;
1386 cs_clearSharers;
1387 i_popIncomingRequestQueue;
1388 }
1389
1390 transition(NX, GETF, NO_F) {
1391 r_setMRU;
1392 fb_forwardRequestBcast;
1393 cs_clearSharers;
1394 i_popIncomingRequestQueue;
1395 }
1396
1397 // Transitions out of NO state
1398 transition(NO, GETX, NO_B) {
1399 r_setMRU;
1400 ano_assertNotOwner;
1401 fc_forwardRequestConditionalOwner;
1402 cs_clearSharers;
1403 i_popIncomingRequestQueue;
1404 }
1405
1406 transition(NO, GETF, NO_F) {
1407 r_setMRU;
1408 //ano_assertNotOwner;
1409 nofc_forwardRequestConditionalOwner; //forward request if the requester is not the owner
1410 cs_clearSharers;
1411 oc_sendBlockAck; // send ack if the owner
1412 i_popIncomingRequestQueue;
1413 }
1414
1415 transition(S, GETX, NO_B) {
1416 r_setMRU;
1417 fb_forwardRequestBcast;
1418 cs_clearSharers;
1419 i_popIncomingRequestQueue;
1420 }
1421
1422 transition(S, GETF, NO_F) {
1423 r_setMRU;
1424 fb_forwardRequestBcast;
1425 cs_clearSharers;
1426 i_popIncomingRequestQueue;
1427 }
1428
1429 transition(S, GETS, NO_B) {
1430 r_setMRU;
1431 ano_assertNotOwner;
1432 fb_forwardRequestBcast;
1433 i_popIncomingRequestQueue;
1434 }
1435
1436 transition(NO, GETS, NO_B) {
1437 r_setMRU;
1438 ano_assertNotOwner;
1439 ans_assertNotSharer;
1440 fc_forwardRequestConditionalOwner;
1441 i_popIncomingRequestQueue;
1442 }
1443
1444 transition(NX, GETS, NO_B) {
1445 r_setMRU;
1446 ano_assertNotOwner;
1447 fc_forwardRequestConditionalOwner;
1448 i_popIncomingRequestQueue;
1449 }
1450
1451 transition({NO, NX, S}, PUT, WB) {
1452 //
1453 // note that the PUT requestor may not be the current owner if an invalidate
1454 // raced with PUT
1455 //
1456 a_sendWriteBackAck;
1457 i_popIncomingRequestQueue;
1458 }
1459
1460 transition({NO, NX, S}, DMA_READ, NO_DR_B_D) {
1461 vd_allocateDmaRequestInTBE;
1462 f_forwardReadFromDma;
1463 p_popDmaRequestQueue;
1464 }
1465
1466 // Nack PUT requests when races cause us to believe we own the data
1467 transition({O, E}, PUT) {
1468 b_sendWriteBackNack;
1469 i_popIncomingRequestQueue;
1470 }
1471
1472 // Blocked transient states
1473 transition({NO_B_X, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1474 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1475 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1476 {GETS, GETX, GETF, PUT, Pf_Replacement}) {
1477 z_stallAndWaitRequest;
1478 }
1479
1480 transition(NO_F, {GETS, GETX, GETF, PUT, Pf_Replacement}){
1481 z_stallAndWaitRequest;
1482 }
1483
1484 transition(NO_B, {GETX, GETF}, NO_B_X) {
1485 z_stallAndWaitRequest;
1486 }
1487
1488 transition(NO_B, {PUT, Pf_Replacement}) {
1489 z_stallAndWaitRequest;
1490 }
1491
1492 transition(NO_B_S, {GETX, GETF, PUT, Pf_Replacement}) {
1493 z_stallAndWaitRequest;
1494 }
1495
1496 transition({NO_B_X, NO_B, NO_B_S, O_B, NO_DR_B_W, NO_DW_B_W, NO_B_W, NO_DR_B_D,
1497 NO_DR_B, O_DR_B, O_B_W, O_DR_B_W, NO_DW_W, NO_B_S_W,
1498 NO_W, O_W, WB, WB_E_W, WB_O_W, O_R, S_R, NO_R, NO_F_W},
1499 {DMA_READ, DMA_WRITE}) {
1500 zd_stallAndWaitDMARequest;
1501 }
1502
1503 // merge GETS into one response
1504 transition(NO_B, GETS, NO_B_S) {
1505 v_allocateTBE;
1506 rs_recordGetSRequestor;
1507 i_popIncomingRequestQueue;
1508 }
1509
1510 transition(NO_B_S, GETS) {
1511 rs_recordGetSRequestor;
1512 i_popIncomingRequestQueue;
1513 }
1514
1515 // unblock responses
1516 transition({NO_B, NO_B_X}, UnblockS, NX) {
1517 us_updateSharerIfFBD;
1518 k_wakeUpDependents;
1519 j_popIncomingUnblockQueue;
1520 }
1521
1522 transition({NO_B, NO_B_X}, UnblockM, NO) {
1523 uo_updateOwnerIfPf;
1524 us_updateSharerIfFBD;
1525 k_wakeUpDependents;
1526 j_popIncomingUnblockQueue;
1527 }
1528
1529 transition(NO_B_S, UnblockS, NO_B_S_W) {
1530 us_updateSharerIfFBD;
1531 fr_forwardMergeReadRequestsToOwner;
1532 sp_setPendingMsgsToMergedSharers;
1533 j_popIncomingUnblockQueue;
1534 }
1535
1536 transition(NO_B_S, UnblockM, NO_B_S_W) {
1537 uo_updateOwnerIfPf;
1538 fr_forwardMergeReadRequestsToOwner;
1539 sp_setPendingMsgsToMergedSharers;
1540 j_popIncomingUnblockQueue;
1541 }
1542
1543 transition(NO_B_S_W, UnblockS) {
1544 us_updateSharerIfFBD;
1545 mu_decrementNumberOfUnblocks;
1546 os_checkForMergedGetSCompletion;
1547 j_popIncomingUnblockQueue;
1548 }
1549
1550 transition(NO_B_S_W, All_Unblocks, NX) {
1551 w_deallocateTBE;
1552 k_wakeUpDependents;
1553 g_popTriggerQueue;
1554 }
1555
1556 transition(O_B, UnblockS, O) {
1557 us_updateSharerIfFBD;
1558 k_wakeUpDependents;
1559 j_popIncomingUnblockQueue;
1560 }
1561
1562 transition(O_B, UnblockM, NO) {
1563 us_updateSharerIfFBD;
1564 uo_updateOwnerIfPf;
1565 k_wakeUpDependents;
1566 j_popIncomingUnblockQueue;
1567 }
1568
1569 transition(NO_B_W, Memory_Data, NO_B) {
1570 d_sendData;
1571 w_deallocateTBE;
1572 l_popMemQueue;
1573 }
1574
1575 transition(NO_F_W, Memory_Data, NO_F) {
1576 d_sendData;
1577 w_deallocateTBE;
1578 l_popMemQueue;
1579 }
1580
1581 transition(NO_DR_B_W, Memory_Data, NO_DR_B) {
1582 r_recordMemoryData;
1583 o_checkForCompletion;
1584 l_popMemQueue;
1585 }
1586
1587 transition(O_DR_B_W, Memory_Data, O_DR_B) {
1588 r_recordMemoryData;
1589 dr_sendDmaData;
1590 o_checkForCompletion;
1591 l_popMemQueue;
1592 }
1593
1594 transition({NO_DR_B, O_DR_B, NO_DR_B_D, NO_DW_B_W}, Ack) {
1595 m_decrementNumberOfMessages;
1596 o_checkForCompletion;
1597 n_popResponseQueue;
1598 }
1599
1600 transition({O_R, S_R, NO_R}, Ack) {
1601 m_decrementNumberOfMessages;
1602 o_checkForCompletion;
1603 n_popResponseQueue;
1604 }
1605
1606 transition(S_R, Data) {
1607 wr_writeResponseDataToMemory;
1608 m_decrementNumberOfMessages;
1609 o_checkForCompletion;
1610 n_popResponseQueue;
1611 }
1612
1613 transition(NO_R, {Data, Exclusive_Data}) {
1614 wr_writeResponseDataToMemory;
1615 m_decrementNumberOfMessages;
1616 o_checkForCompletion;
1617 n_popResponseQueue;
1618 }
1619
1620 transition({O_R, S_R, NO_R}, All_acks_and_data_no_sharers, E) {
1621 w_deallocateTBE;
1622 k_wakeUpDependents;
1623 g_popTriggerQueue;
1624 }
1625
1626 transition({NO_DR_B_W, O_DR_B_W}, Ack) {
1627 m_decrementNumberOfMessages;
1628 n_popResponseQueue;
1629 }
1630
1631 transition(NO_DR_B_W, Shared_Ack) {
1632 m_decrementNumberOfMessages;
1633 r_setSharerBit;
1634 n_popResponseQueue;
1635 }
1636
1637 transition(O_DR_B, Shared_Ack) {
1638 m_decrementNumberOfMessages;
1639 r_setSharerBit;
1640 o_checkForCompletion;
1641 n_popResponseQueue;
1642 }
1643
1644 transition(O_DR_B_W, Shared_Ack) {
1645 m_decrementNumberOfMessages;
1646 r_setSharerBit;
1647 n_popResponseQueue;
1648 }
1649
1650 transition({NO_DR_B, NO_DR_B_D}, Shared_Ack) {
1651 m_decrementNumberOfMessages;
1652 r_setSharerBit;
1653 o_checkForCompletion;
1654 n_popResponseQueue;
1655 }
1656
1657 transition(NO_DR_B_W, Shared_Data) {
1658 r_recordCacheData;
1659 m_decrementNumberOfMessages;
1660 so_setOwnerBit;
1661 o_checkForCompletion;
1662 n_popResponseQueue;
1663 }
1664
1665 transition({NO_DR_B, NO_DR_B_D}, Shared_Data) {
1666 r_recordCacheData;
1667 m_decrementNumberOfMessages;
1668 so_setOwnerBit;
1669 o_checkForCompletion;
1670 n_popResponseQueue;
1671 }
1672
1673 transition(NO_DR_B_W, {Exclusive_Data, Data}) {
1674 r_recordCacheData;
1675 m_decrementNumberOfMessages;
1676 n_popResponseQueue;
1677 }
1678
1679 transition({NO_DR_B, NO_DR_B_D, NO_DW_B_W}, {Exclusive_Data, Data}) {
1680 r_recordCacheData;
1681 m_decrementNumberOfMessages;
1682 o_checkForCompletion;
1683 n_popResponseQueue;
1684 }
1685
1686 transition(NO_DR_B, All_acks_and_owner_data, O) {
1687 //
1688 // Note that the DMA consistency model allows us to send the DMA device
1689 // a response as soon as we receive valid data and prior to receiving
1690 // all acks. However, to simplify the protocol we wait for all acks.
1691 //
1692 dt_sendDmaDataFromTbe;
1693 wdt_writeDataFromTBE;
1694 w_deallocateTBE;
1695 k_wakeUpDependents;
1696 g_popTriggerQueue;
1697 }
1698
1699 transition(NO_DR_B, All_acks_and_shared_data, S) {
1700 //
1701 // Note that the DMA consistency model allows us to send the DMA device
1702 // a response as soon as we receive valid data and prior to receiving
1703 // all acks. However, to simplify the protocol we wait for all acks.
1704 //
1705 dt_sendDmaDataFromTbe;
1706 wdt_writeDataFromTBE;
1707 w_deallocateTBE;
1708 k_wakeUpDependents;
1709 g_popTriggerQueue;
1710 }
1711
1712 transition(NO_DR_B_D, All_acks_and_owner_data, O) {
1713 //
1714 // Note that the DMA consistency model allows us to send the DMA device
1715 // a response as soon as we receive valid data and prior to receiving
1716 // all acks. However, to simplify the protocol we wait for all acks.
1717 //
1718 dt_sendDmaDataFromTbe;
1719 wdt_writeDataFromTBE;
1720 w_deallocateTBE;
1721 k_wakeUpDependents;
1722 g_popTriggerQueue;
1723 }
1724
1725 transition(NO_DR_B_D, All_acks_and_shared_data, S) {
1726 //
1727 // Note that the DMA consistency model allows us to send the DMA device
1728 // a response as soon as we receive valid data and prior to receiving
1729 // all acks. However, to simplify the protocol we wait for all acks.
1730 //
1731 dt_sendDmaDataFromTbe;
1732 wdt_writeDataFromTBE;
1733 w_deallocateTBE;
1734 k_wakeUpDependents;
1735 g_popTriggerQueue;
1736 }
1737
1738 transition(O_DR_B, All_acks_and_owner_data, O) {
1739 wdt_writeDataFromTBE;
1740 w_deallocateTBE;
1741 k_wakeUpDependents;
1742 g_popTriggerQueue;
1743 }
1744
1745 transition(O_DR_B, All_acks_and_data_no_sharers, E) {
1746 wdt_writeDataFromTBE;
1747 w_deallocateTBE;
1748 pfd_probeFilterDeallocate;
1749 k_wakeUpDependents;
1750 g_popTriggerQueue;
1751 }
1752
1753 transition(NO_DR_B, All_acks_and_data_no_sharers, E) {
1754 //
1755 // Note that the DMA consistency model allows us to send the DMA device
1756 // a response as soon as we receive valid data and prior to receiving
1757 // all acks. However, to simplify the protocol we wait for all acks.
1758 //
1759 dt_sendDmaDataFromTbe;
1760 wdt_writeDataFromTBE;
1761 w_deallocateTBE;
1762 ppfd_possibleProbeFilterDeallocate;
1763 k_wakeUpDependents;
1764 g_popTriggerQueue;
1765 }
1766
1767 transition(NO_DR_B_D, All_acks_and_data_no_sharers, E) {
1768 a_assertCacheData;
1769 //
1770 // Note that the DMA consistency model allows us to send the DMA device
1771 // a response as soon as we receive valid data and prior to receiving
1772 // all acks. However, to simplify the protocol we wait for all acks.
1773 //
1774 dt_sendDmaDataFromTbe;
1775 wdt_writeDataFromTBE;
1776 w_deallocateTBE;
1777 ppfd_possibleProbeFilterDeallocate;
1778 k_wakeUpDependents;
1779 g_popTriggerQueue;
1780 }
1781
1782 transition(NO_DW_B_W, All_acks_and_data_no_sharers, NO_DW_W) {
1783 dwt_writeDmaDataFromTBE;
1784 ld_queueMemoryDmaWrite;
1785 g_popTriggerQueue;
1786 }
1787
1788 transition(NO_DW_W, Memory_Ack, E) {
1789 da_sendDmaAck;
1790 w_deallocateTBE;
1791 ppfd_possibleProbeFilterDeallocate;
1792 k_wakeUpDependents;
1793 l_popMemQueue;
1794 }
1795
1796 transition(O_B_W, Memory_Data, O_B) {
1797 d_sendData;
1798 w_deallocateTBE;
1799 l_popMemQueue;
1800 }
1801
1802 transition(NO_B_W, UnblockM, NO_W) {
1803 uo_updateOwnerIfPf;
1804 j_popIncomingUnblockQueue;
1805 }
1806
1807 transition(NO_B_W, UnblockS, NO_W) {
1808 us_updateSharerIfFBD;
1809 j_popIncomingUnblockQueue;
1810 }
1811
1812 transition(O_B_W, UnblockS, O_W) {
1813 us_updateSharerIfFBD;
1814 j_popIncomingUnblockQueue;
1815 }
1816
1817 transition(NO_W, Memory_Data, NO) {
1818 w_deallocateTBE;
1819 k_wakeUpDependents;
1820 l_popMemQueue;
1821 }
1822
1823 transition(O_W, Memory_Data, O) {
1824 w_deallocateTBE;
1825 k_wakeUpDependents;
1826 l_popMemQueue;
1827 }
1828
1829 // WB State Transistions
1830 transition(WB, Writeback_Dirty, WB_O_W) {
1831 l_writeDataToMemory;
1832 rs_removeSharer;
1833 l_queueMemoryWBRequest;
1834 j_popIncomingUnblockQueue;
1835 }
1836
1837 transition(WB, Writeback_Exclusive_Dirty, WB_E_W) {
1838 l_writeDataToMemory;
1839 rs_removeSharer;
1840 l_queueMemoryWBRequest;
1841 j_popIncomingUnblockQueue;
1842 }
1843
1844 transition(WB_E_W, Memory_Ack, E) {
1845 pfd_probeFilterDeallocate;
1846 k_wakeUpDependents;
1847 l_popMemQueue;
1848 }
1849
1850 transition(WB_O_W, Memory_Ack, O) {
1851 k_wakeUpDependents;
1852 l_popMemQueue;
1853 }
1854
1855 transition(WB, Writeback_Clean, O) {
1856 ll_checkIncomingWriteback;
1857 rs_removeSharer;
1858 k_wakeUpDependents;
1859 j_popIncomingUnblockQueue;
1860 }
1861
1862 transition(WB, Writeback_Exclusive_Clean, E) {
1863 ll_checkIncomingWriteback;
1864 rs_removeSharer;
1865 pfd_probeFilterDeallocate;
1866 k_wakeUpDependents;
1867 j_popIncomingUnblockQueue;
1868 }
1869
1870 transition(WB, Unblock, NX) {
1871 auno_assertUnblockerNotOwner;
1872 k_wakeUpDependents;
1873 j_popIncomingUnblockQueue;
1874 }
1875
1876 transition(NO_F, PUTF, WB) {
1877 a_sendWriteBackAck;
1878 i_popIncomingRequestQueue;
1879 }
1880
1881 //possible race between GETF and UnblockM -- not sure needed any more?
1882 transition(NO_F, UnblockM) {
1883 us_updateSharerIfFBD;
1884 uo_updateOwnerIfPf;
1885 j_popIncomingUnblockQueue;
1886 }
1887 }