MOESI_hammer: Added full-bit directory support
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34 machine(Directory, "Directory protocol")
35 : DirectoryMemory * directory,
36 MemoryControl * memBuffer,
37 int directory_latency = 6
38 {
39
40 // ** IN QUEUES **
41 MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
42 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
43 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
44
45 MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
46 MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
47 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
48
49
50 // STATES
51 enumeration(State, desc="Directory states", default="Directory_State_I") {
52 // Base states
53 I, desc="Invalid";
54 S, desc="Shared";
55 O, desc="Owner";
56 M, desc="Modified";
57
58 IS, desc="Blocked, was in idle";
59 SS, desc="Blocked, was in shared";
60 OO, desc="Blocked, was in owned";
61 MO, desc="Blocked, going to owner or maybe modified";
62 MM, desc="Blocked, going to modified";
63 MM_DMA, desc="Blocked, going to I";
64
65 MI, desc="Blocked on a writeback";
66 MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
67 OS, desc="Blocked on a writeback";
68 OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
69
70 XI_M, desc="In a stable state, going to I, waiting for the memory controller";
71 XI_U, desc="In a stable state, going to I, waiting for an unblock";
72 OI_D, desc="In O, going to I, waiting for data";
73 }
74
75 // Events
76 enumeration(Event, desc="Directory events") {
77 GETX, desc="A GETX arrives";
78 GETS, desc="A GETS arrives";
79 PUTX, desc="A PUTX arrives";
80 PUTO, desc="A PUTO arrives";
81 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
82 Unblock, desc="An unblock message arrives";
83 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
84 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
85 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
86 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
87 Memory_Data, desc="Fetched data from memory arrives";
88 Memory_Ack, desc="Writeback Ack from memory arrives";
89 DMA_READ, desc="DMA Read";
90 DMA_WRITE, desc="DMA Write";
91 Data, desc="Data to directory";
92 }
93
94 // TYPES
95
96 // DirectoryEntry
97 structure(Entry, desc="...", interface='AbstractEntry') {
98 State DirectoryState, desc="Directory state";
99 DataBlock DataBlk, desc="data for the block";
100 NetDest Sharers, desc="Sharers for this block";
101 NetDest Owner, desc="Owner of this block";
102 int WaitingUnblocks, desc="Number of acks we're waiting for";
103 }
104
105 structure(TBE, desc="...") {
106 Address PhysicalAddress, desc="Physical address for this entry";
107 int Len, desc="Length of request";
108 DataBlock DataBlk, desc="DataBlk";
109 MachineID Requestor, desc="original requestor";
110 }
111
112 external_type(TBETable) {
113 TBE lookup(Address);
114 void allocate(Address);
115 void deallocate(Address);
116 bool isPresent(Address);
117 }
118
119 // ** OBJECTS **
120 TBETable TBEs, template_hack="<Directory_TBE>";
121
122 void set_tbe(TBE b);
123 void unset_tbe();
124
125 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
126 return static_cast(Entry, directory[addr]);
127 }
128
129 State getState(TBE tbe, Address addr) {
130 return getDirectoryEntry(addr).DirectoryState;
131 }
132
133 void setState(TBE tbe, Address addr, State state) {
134 if (directory.isPresent(addr)) {
135
136 if (state == State:I) {
137 assert(getDirectoryEntry(addr).Owner.count() == 0);
138 assert(getDirectoryEntry(addr).Sharers.count() == 0);
139 }
140
141 if (state == State:S) {
142 assert(getDirectoryEntry(addr).Owner.count() == 0);
143 }
144
145 if (state == State:O) {
146 assert(getDirectoryEntry(addr).Owner.count() == 1);
147 assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
148 }
149
150 if (state == State:M) {
151 assert(getDirectoryEntry(addr).Owner.count() == 1);
152 assert(getDirectoryEntry(addr).Sharers.count() == 0);
153 }
154
155 if ((state != State:SS) && (state != State:OO)) {
156 assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
157 }
158
159 if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
160 getDirectoryEntry(addr).DirectoryState := state;
161 // disable coherence checker
162 // sequencer.checkCoherence(addr);
163 }
164 else {
165 getDirectoryEntry(addr).DirectoryState := state;
166 }
167 }
168 }
169
170 // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
171 bool isBlockShared(Address addr) {
172 if (directory.isPresent(addr)) {
173 if (getDirectoryEntry(addr).DirectoryState == State:I) {
174 return true;
175 }
176 }
177 return false;
178 }
179
180 bool isBlockExclusive(Address addr) {
181 if (directory.isPresent(addr)) {
182 if (getDirectoryEntry(addr).DirectoryState == State:I) {
183 return true;
184 }
185 }
186 return false;
187 }
188
189
190 // ** OUT_PORTS **
191 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
192 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
193 // out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
194 out_port(goo1_out, ResponseMsg, goo1);
195 out_port(memQueue_out, MemoryMsg, memBuffer);
196
197 // ** IN_PORTS **
198
199 in_port(foo1_in, ResponseMsg, foo1) {
200
201 }
202
203 // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
204 // if (unblockNetwork_in.isReady()) {
205 in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
206 if (unblockNetwork_in.isReady()) {
207 peek(unblockNetwork_in, ResponseMsg) {
208 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
209 if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
210 trigger(Event:Last_Unblock, in_msg.Address,
211 TBEs[in_msg.Address]);
212 } else {
213 trigger(Event:Unblock, in_msg.Address,
214 TBEs[in_msg.Address]);
215 }
216 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
217 trigger(Event:Exclusive_Unblock, in_msg.Address,
218 TBEs[in_msg.Address]);
219 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
220 trigger(Event:Dirty_Writeback, in_msg.Address,
221 TBEs[in_msg.Address]);
222 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
223 trigger(Event:Clean_Writeback, in_msg.Address,
224 TBEs[in_msg.Address]);
225 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
226 trigger(Event:Data, in_msg.Address,
227 TBEs[in_msg.Address]);
228 } else {
229 error("Invalid message");
230 }
231 }
232 }
233 }
234
235 in_port(requestQueue_in, RequestMsg, requestToDir) {
236 if (requestQueue_in.isReady()) {
237 peek(requestQueue_in, RequestMsg) {
238 if (in_msg.Type == CoherenceRequestType:GETS) {
239 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
240 } else if (in_msg.Type == CoherenceRequestType:GETX) {
241 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
242 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
243 trigger(Event:PUTX, in_msg.Address, TBEs[in_msg.Address]);
244 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
245 trigger(Event:PUTO, in_msg.Address, TBEs[in_msg.Address]);
246 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
247 trigger(Event:PUTO_SHARERS, in_msg.Address, TBEs[in_msg.Address]);
248 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
249 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
250 TBEs[makeLineAddress(in_msg.Address)]);
251 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
252 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
253 TBEs[makeLineAddress(in_msg.Address)]);
254 } else {
255 error("Invalid message");
256 }
257 }
258 }
259 }
260
261 // off-chip memory request/response is done
262 in_port(memQueue_in, MemoryMsg, memBuffer) {
263 if (memQueue_in.isReady()) {
264 peek(memQueue_in, MemoryMsg) {
265 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
266 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
267 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
268 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
269 } else {
270 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
271 error("Invalid message");
272 }
273 }
274 }
275 }
276
277 // Actions
278
279 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
280 peek(requestQueue_in, RequestMsg) {
281 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
282 out_msg.Address := address;
283 out_msg.Type := CoherenceRequestType:WB_ACK;
284 out_msg.Requestor := in_msg.Requestor;
285 out_msg.RequestorMachine := MachineType:Directory;
286 out_msg.Destination.add(in_msg.Requestor);
287 out_msg.MessageSize := MessageSizeType:Writeback_Control;
288 }
289 }
290 }
291
292 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
293 peek(requestQueue_in, RequestMsg) {
294 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
295 out_msg.Address := address;
296 out_msg.Type := CoherenceRequestType:WB_NACK;
297 out_msg.Requestor := in_msg.Requestor;
298 out_msg.Destination.add(in_msg.Requestor);
299 out_msg.MessageSize := MessageSizeType:Writeback_Control;
300 }
301 }
302 }
303
304 action(c_clearOwner, "c", desc="Clear the owner field") {
305 getDirectoryEntry(address).Owner.clear();
306 }
307
308 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
309 getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
310 getDirectoryEntry(address).Owner.clear();
311 }
312
313 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
314 getDirectoryEntry(address).Sharers.clear();
315 }
316
317 action(d_sendDataMsg, "d", desc="Send data to requestor") {
318 peek(memQueue_in, MemoryMsg) {
319 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
320 out_msg.Address := address;
321 out_msg.Sender := machineID;
322 out_msg.SenderMachine := MachineType:Directory;
323 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
324 //out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
325 out_msg.DataBlk := in_msg.DataBlk;
326 out_msg.Dirty := false; // By definition, the block is now clean
327 out_msg.Acks := in_msg.Acks;
328 if (in_msg.ReadX) {
329 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
330 } else {
331 out_msg.Type := CoherenceResponseType:DATA;
332 }
333 out_msg.MessageSize := MessageSizeType:Response_Data;
334 }
335 }
336 }
337
338 action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
339 peek(requestQueue_in, RequestMsg) {
340 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
341 out_msg.Address := address;
342 out_msg.Sender := machineID;
343 out_msg.SenderMachine := MachineType:Directory;
344 out_msg.Destination.add(in_msg.Requestor);
345 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
346 out_msg.Dirty := false; // By definition, the block is now clean
347 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
348 out_msg.MessageSize := MessageSizeType:Response_Data;
349 }
350 }
351 }
352
353
354
355 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
356 peek(unblockNetwork_in, ResponseMsg) {
357 getDirectoryEntry(address).Owner.clear();
358 getDirectoryEntry(address).Owner.add(in_msg.Sender);
359 }
360 }
361
362 action(f_forwardRequest, "f", desc="Forward request to owner") {
363 peek(requestQueue_in, RequestMsg) {
364 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
365 out_msg.Address := address;
366 out_msg.Type := in_msg.Type;
367 out_msg.Requestor := in_msg.Requestor;
368 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
369 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
370 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
371 out_msg.Acks := out_msg.Acks - 1;
372 }
373 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
374 }
375 }
376 }
377
378 action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
379 peek(requestQueue_in, RequestMsg) {
380 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
381 out_msg.Address := address;
382 out_msg.Type := in_msg.Type;
383 out_msg.Requestor := machineID;
384 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
385 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
386 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
387 out_msg.Acks := out_msg.Acks - 1;
388 }
389 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
390 }
391 }
392 }
393
394 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
395 peek(requestQueue_in, RequestMsg) {
396 if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
397 ((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
398 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
399 out_msg.Address := address;
400 out_msg.Type := CoherenceRequestType:INV;
401 out_msg.Requestor := in_msg.Requestor;
402 // out_msg.Destination := getDirectoryEntry(in_msg.Address).Sharers;
403 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Sharers);
404 out_msg.Destination.remove(in_msg.Requestor);
405 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
406 }
407 }
408 }
409 }
410
411 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
412 requestQueue_in.dequeue();
413 }
414
415 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
416 unblockNetwork_in.dequeue();
417 }
418
419 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
420 peek(unblockNetwork_in, ResponseMsg) {
421 assert(in_msg.Dirty);
422 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
423 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
424 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
425 in_msg.Address, in_msg.DataBlk);
426 }
427 }
428
429 action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
430 peek(unblockNetwork_in, ResponseMsg) {
431 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
432 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
433 in_msg.Address, in_msg.DataBlk);
434 }
435 }
436
437 action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
438 peek(unblockNetwork_in, ResponseMsg) {
439 assert(in_msg.Dirty == false);
440 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
441
442 // NOTE: The following check would not be valid in a real
443 // implementation. We include the data in the "dataless"
444 // message so we can assert the clean data matches the datablock
445 // in memory
446 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
447 }
448 }
449
450 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
451 peek(unblockNetwork_in, ResponseMsg) {
452 getDirectoryEntry(address).Sharers.add(in_msg.Sender);
453 }
454 }
455
456 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
457 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
458 }
459
460 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
461 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
462 assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
463 }
464
465 action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
466 memQueue_in.dequeue();
467 }
468
469 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
470 peek(requestQueue_in, RequestMsg) {
471 enqueue(memQueue_out, MemoryMsg, latency="1") {
472 out_msg.Address := address;
473 out_msg.Type := MemoryRequestType:MEMORY_READ;
474 out_msg.Sender := machineID;
475 out_msg.OriginalRequestorMachId := in_msg.Requestor;
476 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
477 out_msg.MessageSize := in_msg.MessageSize;
478 //out_msg.Prefetch := false;
479 // These are not used by memory but are passed back here with the read data:
480 out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
481 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
482 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
483 out_msg.Acks := out_msg.Acks - 1;
484 }
485 DPRINTF(RubySlicc, "%s\n", out_msg);
486 }
487 }
488 }
489
490 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
491 peek(unblockNetwork_in, ResponseMsg) {
492 enqueue(memQueue_out, MemoryMsg, latency="1") {
493 out_msg.Address := address;
494 out_msg.Type := MemoryRequestType:MEMORY_WB;
495 out_msg.Sender := machineID;
496 if (is_valid(tbe)) {
497 out_msg.OriginalRequestorMachId := tbe.Requestor;
498 }
499 out_msg.DataBlk := in_msg.DataBlk;
500 out_msg.MessageSize := in_msg.MessageSize;
501 //out_msg.Prefetch := false;
502 // Not used:
503 out_msg.ReadX := false;
504 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
505 DPRINTF(RubySlicc, "%s\n", out_msg);
506 }
507 }
508 }
509
510 action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
511 peek(requestQueue_in, RequestMsg) {
512 enqueue(memQueue_out, MemoryMsg, latency="1") {
513 out_msg.Address := address;
514 out_msg.Type := MemoryRequestType:MEMORY_WB;
515 out_msg.Sender := machineID;
516 out_msg.OriginalRequestorMachId := in_msg.Requestor;
517 out_msg.DataBlk := in_msg.DataBlk;
518 out_msg.MessageSize := in_msg.MessageSize;
519 //out_msg.Prefetch := false;
520 // Not used:
521 out_msg.ReadX := false;
522 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
523 DPRINTF(RubySlicc, "%s\n", out_msg);
524 }
525 }
526 }
527
528
529 // action(z_stall, "z", desc="Cannot be handled right now.") {
530 // Special name recognized as do nothing case
531 // }
532
533 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
534 requestQueue_in.recycle();
535 }
536
537 action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
538 peek(requestQueue_in, RequestMsg) {
539 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
540 out_msg.Address := address;
541 out_msg.Sender := machineID;
542 out_msg.SenderMachine := MachineType:Directory;
543 out_msg.Destination.add(in_msg.Requestor);
544 out_msg.DataBlk := in_msg.DataBlk;
545 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
546 out_msg.Type := CoherenceResponseType:DMA_ACK;
547 out_msg.MessageSize := MessageSizeType:Writeback_Control;
548 }
549 }
550 }
551
552 action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
553 peek(unblockNetwork_in, ResponseMsg) {
554 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
555 out_msg.Address := address;
556 out_msg.Sender := machineID;
557 out_msg.SenderMachine := MachineType:Directory;
558 if (is_valid(tbe)) {
559 out_msg.Destination.add(tbe.Requestor);
560 }
561 out_msg.DataBlk := in_msg.DataBlk;
562 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
563 out_msg.Type := CoherenceResponseType:DMA_ACK;
564 out_msg.MessageSize := MessageSizeType:Writeback_Control;
565 }
566 }
567 }
568
569 action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
570 peek(requestQueue_in, RequestMsg) {
571 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
572 }
573 }
574
575 action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
576 assert(is_valid(tbe));
577 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
578 addressOffset(tbe.PhysicalAddress), tbe.Len);
579 }
580
581 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
582 peek (requestQueue_in, RequestMsg) {
583 TBEs.allocate(address);
584 set_tbe(TBEs[address]);
585 tbe.PhysicalAddress := in_msg.Address;
586 tbe.Len := in_msg.Len;
587 tbe.DataBlk := in_msg.DataBlk;
588 tbe.Requestor := in_msg.Requestor;
589 }
590 }
591
592 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
593 TBEs.deallocate(address);
594 unset_tbe();
595 }
596
597
598
599 // TRANSITIONS
600
601 transition(I, GETX, MM) {
602 qf_queueMemoryFetchRequest;
603 i_popIncomingRequestQueue;
604 }
605
606 transition(I, DMA_READ, XI_M) {
607 qf_queueMemoryFetchRequest;
608 i_popIncomingRequestQueue;
609 }
610
611 transition(I, DMA_WRITE, XI_U) {
612 qw_queueMemoryWBRequest2;
613 a_sendDMAAck; // ack count may be zero
614 l_writeDMADataToMemory;
615 i_popIncomingRequestQueue;
616 }
617
618 transition(XI_M, Memory_Data, I) {
619 d_sendDataMsg; // ack count may be zero
620 q_popMemQueue;
621 }
622
623 transition(XI_U, Exclusive_Unblock, I) {
624 cc_clearSharers;
625 c_clearOwner;
626 j_popIncomingUnblockQueue;
627 }
628
629 transition(S, GETX, MM) {
630 qf_queueMemoryFetchRequest;
631 g_sendInvalidations;
632 i_popIncomingRequestQueue;
633 }
634
635 transition(S, DMA_READ, S) {
636 //qf_queueMemoryFetchRequest;
637 p_fwdDataToDMA;
638 //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
639 i_popIncomingRequestQueue;
640 }
641
642 transition(S, DMA_WRITE, XI_U) {
643 qw_queueMemoryWBRequest2;
644 a_sendDMAAck; // ack count may be zero
645 l_writeDMADataToMemory;
646 g_sendInvalidations; // the DMA will collect invalidations
647 i_popIncomingRequestQueue;
648 }
649
650 transition(I, GETS, IS) {
651 qf_queueMemoryFetchRequest;
652 i_popIncomingRequestQueue;
653 }
654
655 transition({S, SS}, GETS, SS) {
656 qf_queueMemoryFetchRequest;
657 n_incrementOutstanding;
658 i_popIncomingRequestQueue;
659 }
660
661 transition({I, S}, PUTO) {
662 b_sendWriteBackNack;
663 i_popIncomingRequestQueue;
664 }
665
666 transition({I, S, O}, PUTX) {
667 b_sendWriteBackNack;
668 i_popIncomingRequestQueue;
669 }
670
671 transition(O, GETX, MM) {
672 f_forwardRequest;
673 g_sendInvalidations;
674 i_popIncomingRequestQueue;
675 }
676
677 transition(O, DMA_READ, O) {
678 f_forwardRequest; // this will cause the data to go to DMA directly
679 //g_sendInvalidations; // this will cause acks to be sent to the DMA
680 i_popIncomingRequestQueue;
681 }
682
683 transition({O,M}, DMA_WRITE, OI_D) {
684 f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
685 g_sendInvalidations; // these go to the DMA Controller
686 v_allocateTBE;
687 i_popIncomingRequestQueue;
688 }
689
690 transition(OI_D, Data, XI_U) {
691 qw_queueMemoryWBRequest;
692 a_sendDMAAck2; // ack count may be zero
693 p_writeFwdDataToMemory;
694 l_writeDMADataToMemoryFromTBE;
695 w_deallocateTBE;
696 j_popIncomingUnblockQueue;
697 }
698
699 transition({O, OO}, GETS, OO) {
700 f_forwardRequest;
701 n_incrementOutstanding;
702 i_popIncomingRequestQueue;
703 }
704
705 transition(M, GETX, MM) {
706 f_forwardRequest;
707 i_popIncomingRequestQueue;
708 }
709
710 // no exclusive unblock will show up to the directory
711 transition(M, DMA_READ, M) {
712 f_forwardRequest; // this will cause the data to go to DMA directly
713 i_popIncomingRequestQueue;
714 }
715
716 transition(M, GETS, MO) {
717 f_forwardRequest;
718 i_popIncomingRequestQueue;
719 }
720
721 transition(M, PUTX, MI) {
722 a_sendWriteBackAck;
723 i_popIncomingRequestQueue;
724 }
725
726 // happens if M->O transition happens on-chip
727 transition(M, PUTO, MI) {
728 a_sendWriteBackAck;
729 i_popIncomingRequestQueue;
730 }
731
732 transition(M, PUTO_SHARERS, MIS) {
733 a_sendWriteBackAck;
734 i_popIncomingRequestQueue;
735 }
736
737 transition(O, PUTO, OS) {
738 a_sendWriteBackAck;
739 i_popIncomingRequestQueue;
740 }
741
742 transition(O, PUTO_SHARERS, OSS) {
743 a_sendWriteBackAck;
744 i_popIncomingRequestQueue;
745 }
746
747
748 transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
749 zz_recycleRequest;
750 }
751
752 transition({MM, MO}, Exclusive_Unblock, M) {
753 cc_clearSharers;
754 e_ownerIsUnblocker;
755 j_popIncomingUnblockQueue;
756 }
757
758 transition(MO, Unblock, O) {
759 m_addUnlockerToSharers;
760 j_popIncomingUnblockQueue;
761 }
762
763 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
764 zz_recycleRequest;
765 }
766
767 transition(IS, GETS) {
768 zz_recycleRequest;
769 }
770
771 transition(IS, Unblock, S) {
772 m_addUnlockerToSharers;
773 j_popIncomingUnblockQueue;
774 }
775
776 transition(IS, Exclusive_Unblock, M) {
777 cc_clearSharers;
778 e_ownerIsUnblocker;
779 j_popIncomingUnblockQueue;
780 }
781
782 transition(SS, Unblock) {
783 m_addUnlockerToSharers;
784 o_decrementOutstanding;
785 j_popIncomingUnblockQueue;
786 }
787
788 transition(SS, Last_Unblock, S) {
789 m_addUnlockerToSharers;
790 o_decrementOutstanding;
791 j_popIncomingUnblockQueue;
792 }
793
794 transition(OO, Unblock) {
795 m_addUnlockerToSharers;
796 o_decrementOutstanding;
797 j_popIncomingUnblockQueue;
798 }
799
800 transition(OO, Last_Unblock, O) {
801 m_addUnlockerToSharers;
802 o_decrementOutstanding;
803 j_popIncomingUnblockQueue;
804 }
805
806 transition(MI, Dirty_Writeback, I) {
807 c_clearOwner;
808 cc_clearSharers;
809 l_writeDataToMemory;
810 qw_queueMemoryWBRequest;
811 j_popIncomingUnblockQueue;
812 }
813
814 transition(MIS, Dirty_Writeback, S) {
815 c_moveOwnerToSharer;
816 l_writeDataToMemory;
817 qw_queueMemoryWBRequest;
818 j_popIncomingUnblockQueue;
819 }
820
821 transition(MIS, Clean_Writeback, S) {
822 c_moveOwnerToSharer;
823 j_popIncomingUnblockQueue;
824 }
825
826 transition(OS, Dirty_Writeback, S) {
827 c_clearOwner;
828 l_writeDataToMemory;
829 qw_queueMemoryWBRequest;
830 j_popIncomingUnblockQueue;
831 }
832
833 transition(OSS, Dirty_Writeback, S) {
834 c_moveOwnerToSharer;
835 l_writeDataToMemory;
836 qw_queueMemoryWBRequest;
837 j_popIncomingUnblockQueue;
838 }
839
840 transition(OSS, Clean_Writeback, S) {
841 c_moveOwnerToSharer;
842 j_popIncomingUnblockQueue;
843 }
844
845 transition(MI, Clean_Writeback, I) {
846 c_clearOwner;
847 cc_clearSharers;
848 ll_checkDataInMemory;
849 j_popIncomingUnblockQueue;
850 }
851
852 transition(OS, Clean_Writeback, S) {
853 c_clearOwner;
854 ll_checkDataInMemory;
855 j_popIncomingUnblockQueue;
856 }
857
858 transition({MI, MIS}, Unblock, M) {
859 j_popIncomingUnblockQueue;
860 }
861
862 transition({OS, OSS}, Unblock, O) {
863 j_popIncomingUnblockQueue;
864 }
865
866 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
867 d_sendDataMsg;
868 q_popMemQueue;
869 }
870
871 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
872 //a_sendAck;
873 q_popMemQueue;
874 }
875
876 }