3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 machine(Directory, "Directory protocol")
35 : DirectoryMemory * directory,
36 MemoryControl * memBuffer,
37 int directory_latency = 6
41 MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
42 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
43 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
45 MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
46 MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
47 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
51 state_declaration(State, desc="Directory states", default="Directory_State_I") {
53 I, AccessPermission:Invalid, desc="Invalid";
54 S, AccessPermission:Read_Only, desc="Shared";
55 O, AccessPermission:Read_Only, desc="Owner";
56 M, AccessPermission:Read_Write, desc="Modified";
58 IS, AccessPermission:Busy, desc="Blocked, was in idle";
59 SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
60 OO, AccessPermission:Read_Only, desc="Blocked, was in owned";
61 MO, AccessPermission:Read_Only, desc="Blocked, going to owner or maybe modified";
62 MM, AccessPermission:Read_Only, desc="Blocked, going to modified";
63 MM_DMA, AccessPermission:Busy, desc="Blocked, going to I";
65 MI, AccessPermission:Busy, desc="Blocked on a writeback";
66 MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
67 OS, AccessPermission:Busy, desc="Blocked on a writeback";
68 OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
70 XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
71 XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
72 OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
74 OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
75 MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
79 enumeration(Event, desc="Directory events") {
80 GETX, desc="A GETX arrives";
81 GETS, desc="A GETS arrives";
82 PUTX, desc="A PUTX arrives";
83 PUTO, desc="A PUTO arrives";
84 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
85 Unblock, desc="An unblock message arrives";
86 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
87 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
88 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
89 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
90 Memory_Data, desc="Fetched data from memory arrives";
91 Memory_Ack, desc="Writeback Ack from memory arrives";
92 DMA_READ, desc="DMA Read";
93 DMA_WRITE, desc="DMA Write";
94 DMA_ACK, desc="DMA Ack";
95 Data, desc="Data to directory";
101 structure(Entry, desc="...", interface='AbstractEntry') {
102 State DirectoryState, desc="Directory state";
103 DataBlock DataBlk, desc="data for the block";
104 NetDest Sharers, desc="Sharers for this block";
105 NetDest Owner, desc="Owner of this block";
106 int WaitingUnblocks, desc="Number of acks we're waiting for";
109 structure(TBE, desc="...") {
110 Address PhysicalAddress, desc="Physical address for this entry";
111 int Len, desc="Length of request";
112 DataBlock DataBlk, desc="DataBlk";
113 MachineID Requestor, desc="original requestor";
116 structure(TBETable, external = "yes") {
118 void allocate(Address);
119 void deallocate(Address);
120 bool isPresent(Address);
124 TBETable TBEs, template_hack="<Directory_TBE>";
129 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
130 return static_cast(Entry, directory[addr]);
133 State getState(TBE tbe, Address addr) {
134 return getDirectoryEntry(addr).DirectoryState;
137 void setState(TBE tbe, Address addr, State state) {
138 if (directory.isPresent(addr)) {
140 if (state == State:I) {
141 assert(getDirectoryEntry(addr).Owner.count() == 0);
142 assert(getDirectoryEntry(addr).Sharers.count() == 0);
145 if (state == State:S) {
146 assert(getDirectoryEntry(addr).Owner.count() == 0);
149 if (state == State:O) {
150 assert(getDirectoryEntry(addr).Owner.count() == 1);
151 assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
154 if (state == State:M) {
155 assert(getDirectoryEntry(addr).Owner.count() == 1);
156 assert(getDirectoryEntry(addr).Sharers.count() == 0);
159 if ((state != State:SS) && (state != State:OO)) {
160 assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
163 if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
164 getDirectoryEntry(addr).DirectoryState := state;
165 // disable coherence checker
166 // sequencer.checkCoherence(addr);
169 getDirectoryEntry(addr).DirectoryState := state;
174 // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
175 bool isBlockShared(Address addr) {
176 if (directory.isPresent(addr)) {
177 if (getDirectoryEntry(addr).DirectoryState == State:I) {
184 bool isBlockExclusive(Address addr) {
185 if (directory.isPresent(addr)) {
186 if (getDirectoryEntry(addr).DirectoryState == State:I) {
195 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
196 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
197 // out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
198 out_port(goo1_out, ResponseMsg, goo1);
199 out_port(memQueue_out, MemoryMsg, memBuffer);
203 in_port(foo1_in, ResponseMsg, foo1) {
207 // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
208 // if (unblockNetwork_in.isReady()) {
209 in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
210 if (unblockNetwork_in.isReady()) {
211 peek(unblockNetwork_in, ResponseMsg) {
212 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
213 if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
214 trigger(Event:Last_Unblock, in_msg.Address,
215 TBEs[in_msg.Address]);
217 trigger(Event:Unblock, in_msg.Address,
218 TBEs[in_msg.Address]);
220 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
221 trigger(Event:Exclusive_Unblock, in_msg.Address,
222 TBEs[in_msg.Address]);
223 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
224 trigger(Event:Dirty_Writeback, in_msg.Address,
225 TBEs[in_msg.Address]);
226 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
227 trigger(Event:Clean_Writeback, in_msg.Address,
228 TBEs[in_msg.Address]);
229 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
230 trigger(Event:Data, in_msg.Address,
231 TBEs[in_msg.Address]);
232 } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
233 trigger(Event:DMA_ACK, in_msg.Address,
234 TBEs[in_msg.Address]);
236 error("Invalid message");
242 in_port(requestQueue_in, RequestMsg, requestToDir) {
243 if (requestQueue_in.isReady()) {
244 peek(requestQueue_in, RequestMsg) {
245 if (in_msg.Type == CoherenceRequestType:GETS) {
246 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
247 } else if (in_msg.Type == CoherenceRequestType:GETX) {
248 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
249 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
250 trigger(Event:PUTX, in_msg.Address, TBEs[in_msg.Address]);
251 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
252 trigger(Event:PUTO, in_msg.Address, TBEs[in_msg.Address]);
253 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
254 trigger(Event:PUTO_SHARERS, in_msg.Address, TBEs[in_msg.Address]);
255 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
256 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
257 TBEs[makeLineAddress(in_msg.Address)]);
258 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
259 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
260 TBEs[makeLineAddress(in_msg.Address)]);
262 error("Invalid message");
268 // off-chip memory request/response is done
269 in_port(memQueue_in, MemoryMsg, memBuffer) {
270 if (memQueue_in.isReady()) {
271 peek(memQueue_in, MemoryMsg) {
272 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
273 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
274 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
275 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
277 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
278 error("Invalid message");
286 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
287 peek(requestQueue_in, RequestMsg) {
288 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
289 out_msg.Address := address;
290 out_msg.Type := CoherenceRequestType:WB_ACK;
291 out_msg.Requestor := in_msg.Requestor;
292 out_msg.RequestorMachine := MachineType:Directory;
293 out_msg.Destination.add(in_msg.Requestor);
294 out_msg.MessageSize := MessageSizeType:Writeback_Control;
299 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
300 peek(requestQueue_in, RequestMsg) {
301 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
302 out_msg.Address := address;
303 out_msg.Type := CoherenceRequestType:WB_NACK;
304 out_msg.Requestor := in_msg.Requestor;
305 out_msg.RequestorMachine := MachineType:Directory;
306 out_msg.Destination.add(in_msg.Requestor);
307 out_msg.MessageSize := MessageSizeType:Writeback_Control;
312 action(c_clearOwner, "c", desc="Clear the owner field") {
313 getDirectoryEntry(address).Owner.clear();
316 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
317 getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
318 getDirectoryEntry(address).Owner.clear();
321 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
322 getDirectoryEntry(address).Sharers.clear();
325 action(d_sendDataMsg, "d", desc="Send data to requestor") {
326 peek(memQueue_in, MemoryMsg) {
327 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
328 out_msg.Address := address;
329 out_msg.Sender := machineID;
330 out_msg.SenderMachine := MachineType:Directory;
331 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
332 //out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
333 out_msg.DataBlk := in_msg.DataBlk;
334 out_msg.Dirty := false; // By definition, the block is now clean
335 out_msg.Acks := in_msg.Acks;
337 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
339 out_msg.Type := CoherenceResponseType:DATA;
341 out_msg.MessageSize := MessageSizeType:Response_Data;
346 action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
347 peek(requestQueue_in, RequestMsg) {
348 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
349 out_msg.Address := address;
350 out_msg.Sender := machineID;
351 out_msg.SenderMachine := MachineType:Directory;
352 out_msg.Destination.add(in_msg.Requestor);
353 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
354 out_msg.Dirty := false; // By definition, the block is now clean
355 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
356 out_msg.MessageSize := MessageSizeType:Response_Data;
363 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
364 peek(unblockNetwork_in, ResponseMsg) {
365 getDirectoryEntry(address).Owner.clear();
366 getDirectoryEntry(address).Owner.add(in_msg.Sender);
370 action(f_forwardRequest, "f", desc="Forward request to owner") {
371 peek(requestQueue_in, RequestMsg) {
372 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
373 out_msg.Address := address;
374 out_msg.Type := in_msg.Type;
375 out_msg.Requestor := in_msg.Requestor;
376 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
377 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
378 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
379 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
380 out_msg.Acks := out_msg.Acks - 1;
382 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
387 action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
388 peek(requestQueue_in, RequestMsg) {
389 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
390 out_msg.Address := address;
391 out_msg.Type := in_msg.Type;
392 out_msg.Requestor := machineID;
393 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
394 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
395 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
396 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
397 out_msg.Acks := out_msg.Acks - 1;
399 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
404 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
405 peek(requestQueue_in, RequestMsg) {
406 if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
407 ((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
408 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
409 out_msg.Address := address;
410 out_msg.Type := CoherenceRequestType:INV;
411 out_msg.Requestor := in_msg.Requestor;
412 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
413 // out_msg.Destination := getDirectoryEntry(in_msg.Address).Sharers;
414 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Sharers);
415 out_msg.Destination.remove(in_msg.Requestor);
416 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
422 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
423 requestQueue_in.dequeue();
426 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
427 unblockNetwork_in.dequeue();
430 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
431 peek(unblockNetwork_in, ResponseMsg) {
432 assert(in_msg.Dirty);
433 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
434 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
435 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
436 in_msg.Address, in_msg.DataBlk);
440 action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
441 peek(unblockNetwork_in, ResponseMsg) {
442 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
443 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
444 in_msg.Address, in_msg.DataBlk);
448 action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
449 peek(unblockNetwork_in, ResponseMsg) {
450 assert(in_msg.Dirty == false);
451 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
453 // NOTE: The following check would not be valid in a real
454 // implementation. We include the data in the "dataless"
455 // message so we can assert the clean data matches the datablock
457 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
461 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
462 peek(unblockNetwork_in, ResponseMsg) {
463 getDirectoryEntry(address).Sharers.add(in_msg.Sender);
467 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
468 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
471 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
472 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
473 assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
476 action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
477 memQueue_in.dequeue();
480 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
481 peek(requestQueue_in, RequestMsg) {
482 enqueue(memQueue_out, MemoryMsg, latency="1") {
483 out_msg.Address := address;
484 out_msg.Type := MemoryRequestType:MEMORY_READ;
485 out_msg.Sender := machineID;
486 out_msg.OriginalRequestorMachId := in_msg.Requestor;
487 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
488 out_msg.MessageSize := in_msg.MessageSize;
489 //out_msg.Prefetch := false;
490 // These are not used by memory but are passed back here with the read data:
491 out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
492 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
493 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
494 out_msg.Acks := out_msg.Acks - 1;
496 DPRINTF(RubySlicc, "%s\n", out_msg);
501 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
502 peek(unblockNetwork_in, ResponseMsg) {
503 enqueue(memQueue_out, MemoryMsg, latency="1") {
504 out_msg.Address := address;
505 out_msg.Type := MemoryRequestType:MEMORY_WB;
506 out_msg.Sender := machineID;
508 out_msg.OriginalRequestorMachId := tbe.Requestor;
510 out_msg.DataBlk := in_msg.DataBlk;
511 out_msg.MessageSize := in_msg.MessageSize;
512 //out_msg.Prefetch := false;
514 out_msg.ReadX := false;
515 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
516 DPRINTF(RubySlicc, "%s\n", out_msg);
521 action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
522 peek(requestQueue_in, RequestMsg) {
523 enqueue(memQueue_out, MemoryMsg, latency="1") {
524 out_msg.Address := address;
525 out_msg.Type := MemoryRequestType:MEMORY_WB;
526 out_msg.Sender := machineID;
527 out_msg.OriginalRequestorMachId := in_msg.Requestor;
528 out_msg.DataBlk := in_msg.DataBlk;
529 out_msg.MessageSize := in_msg.MessageSize;
530 //out_msg.Prefetch := false;
532 out_msg.ReadX := false;
533 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
534 DPRINTF(RubySlicc, "%s\n", out_msg);
540 // action(z_stall, "z", desc="Cannot be handled right now.") {
541 // Special name recognized as do nothing case
544 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
545 requestQueue_in.recycle();
548 action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
549 peek(requestQueue_in, RequestMsg) {
550 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
551 out_msg.Address := address;
552 out_msg.Sender := machineID;
553 out_msg.SenderMachine := MachineType:Directory;
554 out_msg.Destination.add(in_msg.Requestor);
555 out_msg.DataBlk := in_msg.DataBlk;
556 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
557 out_msg.Type := CoherenceResponseType:DMA_ACK;
558 out_msg.MessageSize := MessageSizeType:Writeback_Control;
563 action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
564 peek(unblockNetwork_in, ResponseMsg) {
565 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
566 out_msg.Address := address;
567 out_msg.Sender := machineID;
568 out_msg.SenderMachine := MachineType:Directory;
570 out_msg.Destination.add(tbe.Requestor);
572 out_msg.DataBlk := in_msg.DataBlk;
573 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
574 out_msg.Type := CoherenceResponseType:DMA_ACK;
575 out_msg.MessageSize := MessageSizeType:Writeback_Control;
580 action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
581 peek(requestQueue_in, RequestMsg) {
582 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
586 action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
587 assert(is_valid(tbe));
588 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
589 addressOffset(tbe.PhysicalAddress), tbe.Len);
592 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
593 peek (requestQueue_in, RequestMsg) {
594 TBEs.allocate(address);
595 set_tbe(TBEs[address]);
596 tbe.PhysicalAddress := in_msg.Address;
597 tbe.Len := in_msg.Len;
598 tbe.DataBlk := in_msg.DataBlk;
599 tbe.Requestor := in_msg.Requestor;
603 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
604 TBEs.deallocate(address);
612 transition(I, GETX, MM) {
613 qf_queueMemoryFetchRequest;
614 i_popIncomingRequestQueue;
617 transition(I, DMA_READ, XI_M) {
618 qf_queueMemoryFetchRequest;
619 i_popIncomingRequestQueue;
622 transition(I, DMA_WRITE, XI_U) {
623 qw_queueMemoryWBRequest2;
624 a_sendDMAAck; // ack count may be zero
625 l_writeDMADataToMemory;
626 i_popIncomingRequestQueue;
629 transition(XI_M, Memory_Data, I) {
630 d_sendDataMsg; // ack count may be zero
634 transition(XI_U, Exclusive_Unblock, I) {
637 j_popIncomingUnblockQueue;
640 transition(S, GETX, MM) {
641 qf_queueMemoryFetchRequest;
643 i_popIncomingRequestQueue;
646 transition(S, DMA_READ) {
647 //qf_queueMemoryFetchRequest;
649 //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
650 i_popIncomingRequestQueue;
653 transition(S, DMA_WRITE, XI_U) {
654 qw_queueMemoryWBRequest2;
655 a_sendDMAAck; // ack count may be zero
656 l_writeDMADataToMemory;
657 g_sendInvalidations; // the DMA will collect invalidations
658 i_popIncomingRequestQueue;
661 transition(I, GETS, IS) {
662 qf_queueMemoryFetchRequest;
663 i_popIncomingRequestQueue;
666 transition({S, SS}, GETS, SS) {
667 qf_queueMemoryFetchRequest;
668 n_incrementOutstanding;
669 i_popIncomingRequestQueue;
672 transition({I, S}, PUTO) {
674 i_popIncomingRequestQueue;
677 transition({I, S, O}, PUTX) {
679 i_popIncomingRequestQueue;
682 transition(O, GETX, MM) {
685 i_popIncomingRequestQueue;
688 transition(O, DMA_READ, OD) {
689 f_forwardRequest; // this will cause the data to go to DMA directly
690 //g_sendInvalidations; // this will cause acks to be sent to the DMA
691 i_popIncomingRequestQueue;
694 transition(OD, DMA_ACK, O) {
695 j_popIncomingUnblockQueue;
698 transition({O,M}, DMA_WRITE, OI_D) {
699 f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
700 g_sendInvalidations; // these go to the DMA Controller
702 i_popIncomingRequestQueue;
705 transition(OI_D, Data, XI_U) {
706 qw_queueMemoryWBRequest;
707 a_sendDMAAck2; // ack count may be zero
708 p_writeFwdDataToMemory;
709 l_writeDMADataToMemoryFromTBE;
711 j_popIncomingUnblockQueue;
714 transition({O, OO}, GETS, OO) {
716 n_incrementOutstanding;
717 i_popIncomingRequestQueue;
720 transition(M, GETX, MM) {
722 i_popIncomingRequestQueue;
725 // no exclusive unblock will show up to the directory
726 transition(M, DMA_READ, MD) {
727 f_forwardRequest; // this will cause the data to go to DMA directly
728 i_popIncomingRequestQueue;
731 transition(MD, DMA_ACK, M) {
732 j_popIncomingUnblockQueue;
735 transition(M, GETS, MO) {
737 i_popIncomingRequestQueue;
740 transition(M, PUTX, MI) {
742 i_popIncomingRequestQueue;
745 // happens if M->O transition happens on-chip
746 transition(M, PUTO, MI) {
748 i_popIncomingRequestQueue;
751 transition(M, PUTO_SHARERS, MIS) {
753 i_popIncomingRequestQueue;
756 transition(O, PUTO, OS) {
758 i_popIncomingRequestQueue;
761 transition(O, PUTO_SHARERS, OSS) {
763 i_popIncomingRequestQueue;
767 transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
771 transition({MM, MO}, Exclusive_Unblock, M) {
774 j_popIncomingUnblockQueue;
777 transition(MO, Unblock, O) {
778 m_addUnlockerToSharers;
779 j_popIncomingUnblockQueue;
782 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
786 transition(IS, GETS) {
790 transition(IS, Unblock, S) {
791 m_addUnlockerToSharers;
792 j_popIncomingUnblockQueue;
795 transition(IS, Exclusive_Unblock, M) {
798 j_popIncomingUnblockQueue;
801 transition(SS, Unblock) {
802 m_addUnlockerToSharers;
803 o_decrementOutstanding;
804 j_popIncomingUnblockQueue;
807 transition(SS, Last_Unblock, S) {
808 m_addUnlockerToSharers;
809 o_decrementOutstanding;
810 j_popIncomingUnblockQueue;
813 transition(OO, Unblock) {
814 m_addUnlockerToSharers;
815 o_decrementOutstanding;
816 j_popIncomingUnblockQueue;
819 transition(OO, Last_Unblock, O) {
820 m_addUnlockerToSharers;
821 o_decrementOutstanding;
822 j_popIncomingUnblockQueue;
825 transition(MI, Dirty_Writeback, I) {
829 qw_queueMemoryWBRequest;
830 j_popIncomingUnblockQueue;
833 transition(MIS, Dirty_Writeback, S) {
836 qw_queueMemoryWBRequest;
837 j_popIncomingUnblockQueue;
840 transition(MIS, Clean_Writeback, S) {
842 j_popIncomingUnblockQueue;
845 transition(OS, Dirty_Writeback, S) {
848 qw_queueMemoryWBRequest;
849 j_popIncomingUnblockQueue;
852 transition(OSS, Dirty_Writeback, S) {
855 qw_queueMemoryWBRequest;
856 j_popIncomingUnblockQueue;
859 transition(OSS, Clean_Writeback, S) {
861 j_popIncomingUnblockQueue;
864 transition(MI, Clean_Writeback, I) {
867 ll_checkDataInMemory;
868 j_popIncomingUnblockQueue;
871 transition(OS, Clean_Writeback, S) {
873 ll_checkDataInMemory;
874 j_popIncomingUnblockQueue;
877 transition({MI, MIS}, Unblock, M) {
878 j_popIncomingUnblockQueue;
881 transition({OS, OSS}, Unblock, O) {
882 j_popIncomingUnblockQueue;
885 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
890 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {