This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34 machine(Directory, "Directory protocol")
35 : DirectoryMemory * directory,
36 MemoryControl * memBuffer,
37 int directory_latency = 6
38 {
39
40 // ** IN QUEUES **
41 MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
42 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
43 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
44
45 MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
46 MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
47 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
48
49
50 // STATES
51 enumeration(State, desc="Directory states", default="Directory_State_I") {
52 // Base states
53 I, desc="Invalid";
54 S, desc="Shared";
55 O, desc="Owner";
56 M, desc="Modified";
57
58 IS, desc="Blocked, was in idle";
59 SS, desc="Blocked, was in shared";
60 OO, desc="Blocked, was in owned";
61 MO, desc="Blocked, going to owner or maybe modified";
62 MM, desc="Blocked, going to modified";
63 MM_DMA, desc="Blocked, going to I";
64
65 MI, desc="Blocked on a writeback";
66 MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
67 OS, desc="Blocked on a writeback";
68 OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
69
70 XI_M, desc="In a stable state, going to I, waiting for the memory controller";
71 XI_U, desc="In a stable state, going to I, waiting for an unblock";
72 OI_D, desc="In O, going to I, waiting for data";
73 }
74
75 // Events
76 enumeration(Event, desc="Directory events") {
77 GETX, desc="A GETX arrives";
78 GETS, desc="A GETS arrives";
79 PUTX, desc="A PUTX arrives";
80 PUTO, desc="A PUTO arrives";
81 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
82 Unblock, desc="An unblock message arrives";
83 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
84 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
85 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
86 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
87 Memory_Data, desc="Fetched data from memory arrives";
88 Memory_Ack, desc="Writeback Ack from memory arrives";
89 DMA_READ, desc="DMA Read";
90 DMA_WRITE, desc="DMA Write";
91 Data, desc="Data to directory";
92 }
93
94 // TYPES
95
96 // DirectoryEntry
97 structure(Entry, desc="...", interface='AbstractEntry') {
98 State DirectoryState, desc="Directory state";
99 DataBlock DataBlk, desc="data for the block";
100 NetDest Sharers, desc="Sharers for this block";
101 NetDest Owner, desc="Owner of this block";
102 int WaitingUnblocks, desc="Number of acks we're waiting for";
103 }
104
105 structure(TBE, desc="...") {
106 Address PhysicalAddress, desc="Physical address for this entry";
107 int Len, desc="Length of request";
108 DataBlock DataBlk, desc="DataBlk";
109 MachineID Requestor, desc="original requestor";
110 }
111
112 external_type(TBETable) {
113 TBE lookup(Address);
114 void allocate(Address);
115 void deallocate(Address);
116 bool isPresent(Address);
117 }
118
119 // ** OBJECTS **
120 TBETable TBEs, template_hack="<Directory_TBE>";
121
122 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
123 return static_cast(Entry, directory[addr]);
124 }
125
126 State getState(Address addr) {
127 return getDirectoryEntry(addr).DirectoryState;
128 }
129
130 void setState(Address addr, State state) {
131 if (directory.isPresent(addr)) {
132
133 if (state == State:I) {
134 assert(getDirectoryEntry(addr).Owner.count() == 0);
135 assert(getDirectoryEntry(addr).Sharers.count() == 0);
136 }
137
138 if (state == State:S) {
139 assert(getDirectoryEntry(addr).Owner.count() == 0);
140 }
141
142 if (state == State:O) {
143 assert(getDirectoryEntry(addr).Owner.count() == 1);
144 assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
145 }
146
147 if (state == State:M) {
148 assert(getDirectoryEntry(addr).Owner.count() == 1);
149 assert(getDirectoryEntry(addr).Sharers.count() == 0);
150 }
151
152 if ((state != State:SS) && (state != State:OO)) {
153 assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
154 }
155
156 if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
157 getDirectoryEntry(addr).DirectoryState := state;
158 // disable coherence checker
159 // sequencer.checkCoherence(addr);
160 }
161 else {
162 getDirectoryEntry(addr).DirectoryState := state;
163 }
164 }
165 }
166
167 // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
168 bool isBlockShared(Address addr) {
169 if (directory.isPresent(addr)) {
170 if (getDirectoryEntry(addr).DirectoryState == State:I) {
171 return true;
172 }
173 }
174 return false;
175 }
176
177 bool isBlockExclusive(Address addr) {
178 if (directory.isPresent(addr)) {
179 if (getDirectoryEntry(addr).DirectoryState == State:I) {
180 return true;
181 }
182 }
183 return false;
184 }
185
186
187 // ** OUT_PORTS **
188 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
189 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
190 // out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
191 out_port(goo1_out, ResponseMsg, goo1);
192 out_port(memQueue_out, MemoryMsg, memBuffer);
193
194 // ** IN_PORTS **
195
196 in_port(foo1_in, ResponseMsg, foo1) {
197
198 }
199
200 // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
201 // if (unblockNetwork_in.isReady()) {
202 in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
203 if (unblockNetwork_in.isReady()) {
204 peek(unblockNetwork_in, ResponseMsg) {
205 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
206 if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
207 trigger(Event:Last_Unblock, in_msg.Address);
208 } else {
209 trigger(Event:Unblock, in_msg.Address);
210 }
211 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
212 trigger(Event:Exclusive_Unblock, in_msg.Address);
213 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
214 trigger(Event:Dirty_Writeback, in_msg.Address);
215 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
216 trigger(Event:Clean_Writeback, in_msg.Address);
217 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
218 trigger(Event:Data, in_msg.Address);
219 } else {
220 error("Invalid message");
221 }
222 }
223 }
224 }
225
226 in_port(requestQueue_in, RequestMsg, requestToDir) {
227 if (requestQueue_in.isReady()) {
228 peek(requestQueue_in, RequestMsg) {
229 if (in_msg.Type == CoherenceRequestType:GETS) {
230 trigger(Event:GETS, in_msg.Address);
231 } else if (in_msg.Type == CoherenceRequestType:GETX) {
232 trigger(Event:GETX, in_msg.Address);
233 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
234 trigger(Event:PUTX, in_msg.Address);
235 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
236 trigger(Event:PUTO, in_msg.Address);
237 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
238 trigger(Event:PUTO_SHARERS, in_msg.Address);
239 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
240 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address));
241 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
242 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address));
243 } else {
244 error("Invalid message");
245 }
246 }
247 }
248 }
249
250 // off-chip memory request/response is done
251 in_port(memQueue_in, MemoryMsg, memBuffer) {
252 if (memQueue_in.isReady()) {
253 peek(memQueue_in, MemoryMsg) {
254 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
255 trigger(Event:Memory_Data, in_msg.Address);
256 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
257 trigger(Event:Memory_Ack, in_msg.Address);
258 } else {
259 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
260 error("Invalid message");
261 }
262 }
263 }
264 }
265
266 // Actions
267
268 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
269 peek(requestQueue_in, RequestMsg) {
270 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
271 out_msg.Address := address;
272 out_msg.Type := CoherenceRequestType:WB_ACK;
273 out_msg.Requestor := in_msg.Requestor;
274 out_msg.Destination.add(in_msg.Requestor);
275 out_msg.MessageSize := MessageSizeType:Writeback_Control;
276 }
277 }
278 }
279
280 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
281 peek(requestQueue_in, RequestMsg) {
282 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
283 out_msg.Address := address;
284 out_msg.Type := CoherenceRequestType:WB_NACK;
285 out_msg.Requestor := in_msg.Requestor;
286 out_msg.Destination.add(in_msg.Requestor);
287 out_msg.MessageSize := MessageSizeType:Writeback_Control;
288 }
289 }
290 }
291
292 action(c_clearOwner, "c", desc="Clear the owner field") {
293 getDirectoryEntry(address).Owner.clear();
294 }
295
296 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
297 getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
298 getDirectoryEntry(address).Owner.clear();
299 }
300
301 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
302 getDirectoryEntry(address).Sharers.clear();
303 }
304
305 action(d_sendDataMsg, "d", desc="Send data to requestor") {
306 peek(memQueue_in, MemoryMsg) {
307 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
308 out_msg.Address := address;
309 out_msg.Sender := machineID;
310 out_msg.SenderMachine := MachineType:Directory;
311 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
312 //out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
313 out_msg.DataBlk := in_msg.DataBlk;
314 out_msg.Dirty := false; // By definition, the block is now clean
315 out_msg.Acks := in_msg.Acks;
316 if (in_msg.ReadX) {
317 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
318 } else {
319 out_msg.Type := CoherenceResponseType:DATA;
320 }
321 out_msg.MessageSize := MessageSizeType:Response_Data;
322 }
323 }
324 }
325
326 action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
327 peek(requestQueue_in, RequestMsg) {
328 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
329 out_msg.Address := address;
330 out_msg.Sender := machineID;
331 out_msg.SenderMachine := MachineType:Directory;
332 out_msg.Destination.add(in_msg.Requestor);
333 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
334 out_msg.Dirty := false; // By definition, the block is now clean
335 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
336 out_msg.MessageSize := MessageSizeType:Response_Data;
337 }
338 }
339 }
340
341
342
343 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
344 peek(unblockNetwork_in, ResponseMsg) {
345 getDirectoryEntry(address).Owner.clear();
346 getDirectoryEntry(address).Owner.add(in_msg.Sender);
347 }
348 }
349
350 action(f_forwardRequest, "f", desc="Forward request to owner") {
351 peek(requestQueue_in, RequestMsg) {
352 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
353 out_msg.Address := address;
354 out_msg.Type := in_msg.Type;
355 out_msg.Requestor := in_msg.Requestor;
356 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
357 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
358 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
359 out_msg.Acks := out_msg.Acks - 1;
360 }
361 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
362 }
363 }
364 }
365
366 action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
367 peek(requestQueue_in, RequestMsg) {
368 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
369 out_msg.Address := address;
370 out_msg.Type := in_msg.Type;
371 out_msg.Requestor := machineID;
372 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
373 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
374 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
375 out_msg.Acks := out_msg.Acks - 1;
376 }
377 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
378 }
379 }
380 }
381
382 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
383 peek(requestQueue_in, RequestMsg) {
384 if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
385 ((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
386 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
387 out_msg.Address := address;
388 out_msg.Type := CoherenceRequestType:INV;
389 out_msg.Requestor := in_msg.Requestor;
390 // out_msg.Destination := getDirectoryEntry(in_msg.Address).Sharers;
391 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Sharers);
392 out_msg.Destination.remove(in_msg.Requestor);
393 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
394 }
395 }
396 }
397 }
398
399 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
400 requestQueue_in.dequeue();
401 }
402
403 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
404 unblockNetwork_in.dequeue();
405 }
406
407 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
408 peek(unblockNetwork_in, ResponseMsg) {
409 assert(in_msg.Dirty);
410 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
411 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
412 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
413 in_msg.Address, in_msg.DataBlk);
414 }
415 }
416
417 action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
418 peek(unblockNetwork_in, ResponseMsg) {
419 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
420 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
421 in_msg.Address, in_msg.DataBlk);
422 }
423 }
424
425 action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
426 peek(unblockNetwork_in, ResponseMsg) {
427 assert(in_msg.Dirty == false);
428 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
429
430 // NOTE: The following check would not be valid in a real
431 // implementation. We include the data in the "dataless"
432 // message so we can assert the clean data matches the datablock
433 // in memory
434 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
435 }
436 }
437
438 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
439 peek(unblockNetwork_in, ResponseMsg) {
440 getDirectoryEntry(address).Sharers.add(in_msg.Sender);
441 }
442 }
443
444 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
445 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
446 }
447
448 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
449 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
450 assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
451 }
452
453 action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
454 memQueue_in.dequeue();
455 }
456
457 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
458 peek(requestQueue_in, RequestMsg) {
459 enqueue(memQueue_out, MemoryMsg, latency="1") {
460 out_msg.Address := address;
461 out_msg.Type := MemoryRequestType:MEMORY_READ;
462 out_msg.Sender := machineID;
463 out_msg.OriginalRequestorMachId := in_msg.Requestor;
464 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
465 out_msg.MessageSize := in_msg.MessageSize;
466 //out_msg.Prefetch := false;
467 // These are not used by memory but are passed back here with the read data:
468 out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
469 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
470 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
471 out_msg.Acks := out_msg.Acks - 1;
472 }
473 DPRINTF(RubySlicc, "%s\n", out_msg);
474 }
475 }
476 }
477
478 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
479 peek(unblockNetwork_in, ResponseMsg) {
480 enqueue(memQueue_out, MemoryMsg, latency="1") {
481 out_msg.Address := address;
482 out_msg.Type := MemoryRequestType:MEMORY_WB;
483 out_msg.Sender := machineID;
484 if (TBEs.isPresent(address)) {
485 out_msg.OriginalRequestorMachId := TBEs[address].Requestor;
486 }
487 out_msg.DataBlk := in_msg.DataBlk;
488 out_msg.MessageSize := in_msg.MessageSize;
489 //out_msg.Prefetch := false;
490 // Not used:
491 out_msg.ReadX := false;
492 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
493 DPRINTF(RubySlicc, "%s\n", out_msg);
494 }
495 }
496 }
497
498 action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
499 peek(requestQueue_in, RequestMsg) {
500 enqueue(memQueue_out, MemoryMsg, latency="1") {
501 out_msg.Address := address;
502 out_msg.Type := MemoryRequestType:MEMORY_WB;
503 out_msg.Sender := machineID;
504 out_msg.OriginalRequestorMachId := in_msg.Requestor;
505 out_msg.DataBlk := in_msg.DataBlk;
506 out_msg.MessageSize := in_msg.MessageSize;
507 //out_msg.Prefetch := false;
508 // Not used:
509 out_msg.ReadX := false;
510 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
511 DPRINTF(RubySlicc, "%s\n", out_msg);
512 }
513 }
514 }
515
516
517 // action(z_stall, "z", desc="Cannot be handled right now.") {
518 // Special name recognized as do nothing case
519 // }
520
521 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
522 requestQueue_in.recycle();
523 }
524
525 action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
526 peek(requestQueue_in, RequestMsg) {
527 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
528 out_msg.Address := address;
529 out_msg.Sender := machineID;
530 out_msg.SenderMachine := MachineType:Directory;
531 out_msg.Destination.add(in_msg.Requestor);
532 out_msg.DataBlk := in_msg.DataBlk;
533 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
534 out_msg.Type := CoherenceResponseType:DMA_ACK;
535 out_msg.MessageSize := MessageSizeType:Writeback_Control;
536 }
537 }
538 }
539
540 action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
541 peek(unblockNetwork_in, ResponseMsg) {
542 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
543 out_msg.Address := address;
544 out_msg.Sender := machineID;
545 out_msg.SenderMachine := MachineType:Directory;
546 if (TBEs.isPresent(address)) {
547 out_msg.Destination.add(TBEs[address].Requestor);
548 }
549 out_msg.DataBlk := in_msg.DataBlk;
550 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
551 out_msg.Type := CoherenceResponseType:DMA_ACK;
552 out_msg.MessageSize := MessageSizeType:Writeback_Control;
553 }
554 }
555 }
556
557 action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
558 peek(requestQueue_in, RequestMsg) {
559 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
560 }
561 }
562
563 action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
564 getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk,
565 addressOffset(TBEs[address].PhysicalAddress),
566 TBEs[address].Len);
567 }
568
569 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
570 peek (requestQueue_in, RequestMsg) {
571 TBEs.allocate(address);
572 TBEs[address].PhysicalAddress := in_msg.Address;
573 TBEs[address].Len := in_msg.Len;
574 TBEs[address].DataBlk := in_msg.DataBlk;
575 TBEs[address].Requestor := in_msg.Requestor;
576 }
577 }
578
579 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
580 TBEs.deallocate(address);
581 }
582
583
584
585 // TRANSITIONS
586
587 transition(I, GETX, MM) {
588 qf_queueMemoryFetchRequest;
589 i_popIncomingRequestQueue;
590 }
591
592 transition(I, DMA_READ, XI_M) {
593 qf_queueMemoryFetchRequest;
594 i_popIncomingRequestQueue;
595 }
596
597 transition(I, DMA_WRITE, XI_U) {
598 qw_queueMemoryWBRequest2;
599 a_sendDMAAck; // ack count may be zero
600 l_writeDMADataToMemory;
601 i_popIncomingRequestQueue;
602 }
603
604 transition(XI_M, Memory_Data, I) {
605 d_sendDataMsg; // ack count may be zero
606 q_popMemQueue;
607 }
608
609 transition(XI_U, Exclusive_Unblock, I) {
610 cc_clearSharers;
611 c_clearOwner;
612 j_popIncomingUnblockQueue;
613 }
614
615 transition(S, GETX, MM) {
616 qf_queueMemoryFetchRequest;
617 g_sendInvalidations;
618 i_popIncomingRequestQueue;
619 }
620
621 transition(S, DMA_READ, S) {
622 //qf_queueMemoryFetchRequest;
623 p_fwdDataToDMA;
624 //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
625 i_popIncomingRequestQueue;
626 }
627
628 transition(S, DMA_WRITE, XI_U) {
629 qw_queueMemoryWBRequest2;
630 a_sendDMAAck; // ack count may be zero
631 l_writeDMADataToMemory;
632 g_sendInvalidations; // the DMA will collect invalidations
633 i_popIncomingRequestQueue;
634 }
635
636 transition(I, GETS, IS) {
637 qf_queueMemoryFetchRequest;
638 i_popIncomingRequestQueue;
639 }
640
641 transition({S, SS}, GETS, SS) {
642 qf_queueMemoryFetchRequest;
643 n_incrementOutstanding;
644 i_popIncomingRequestQueue;
645 }
646
647 transition({I, S}, PUTO) {
648 b_sendWriteBackNack;
649 i_popIncomingRequestQueue;
650 }
651
652 transition({I, S, O}, PUTX) {
653 b_sendWriteBackNack;
654 i_popIncomingRequestQueue;
655 }
656
657 transition(O, GETX, MM) {
658 f_forwardRequest;
659 g_sendInvalidations;
660 i_popIncomingRequestQueue;
661 }
662
663 transition(O, DMA_READ, O) {
664 f_forwardRequest; // this will cause the data to go to DMA directly
665 //g_sendInvalidations; // this will cause acks to be sent to the DMA
666 i_popIncomingRequestQueue;
667 }
668
669 transition({O,M}, DMA_WRITE, OI_D) {
670 f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
671 g_sendInvalidations; // these go to the DMA Controller
672 v_allocateTBE;
673 i_popIncomingRequestQueue;
674 }
675
676 transition(OI_D, Data, XI_U) {
677 qw_queueMemoryWBRequest;
678 a_sendDMAAck2; // ack count may be zero
679 p_writeFwdDataToMemory;
680 l_writeDMADataToMemoryFromTBE;
681 w_deallocateTBE;
682 j_popIncomingUnblockQueue;
683 }
684
685 transition({O, OO}, GETS, OO) {
686 f_forwardRequest;
687 n_incrementOutstanding;
688 i_popIncomingRequestQueue;
689 }
690
691 transition(M, GETX, MM) {
692 f_forwardRequest;
693 i_popIncomingRequestQueue;
694 }
695
696 // no exclusive unblock will show up to the directory
697 transition(M, DMA_READ, M) {
698 f_forwardRequest; // this will cause the data to go to DMA directly
699 i_popIncomingRequestQueue;
700 }
701
702 transition(M, GETS, MO) {
703 f_forwardRequest;
704 i_popIncomingRequestQueue;
705 }
706
707 transition(M, PUTX, MI) {
708 a_sendWriteBackAck;
709 i_popIncomingRequestQueue;
710 }
711
712 // happens if M->O transition happens on-chip
713 transition(M, PUTO, MI) {
714 a_sendWriteBackAck;
715 i_popIncomingRequestQueue;
716 }
717
718 transition(M, PUTO_SHARERS, MIS) {
719 a_sendWriteBackAck;
720 i_popIncomingRequestQueue;
721 }
722
723 transition(O, PUTO, OS) {
724 a_sendWriteBackAck;
725 i_popIncomingRequestQueue;
726 }
727
728 transition(O, PUTO_SHARERS, OSS) {
729 a_sendWriteBackAck;
730 i_popIncomingRequestQueue;
731 }
732
733
734 transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
735 zz_recycleRequest;
736 }
737
738 transition({MM, MO}, Exclusive_Unblock, M) {
739 cc_clearSharers;
740 e_ownerIsUnblocker;
741 j_popIncomingUnblockQueue;
742 }
743
744 transition(MO, Unblock, O) {
745 m_addUnlockerToSharers;
746 j_popIncomingUnblockQueue;
747 }
748
749 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
750 zz_recycleRequest;
751 }
752
753 transition(IS, GETS) {
754 zz_recycleRequest;
755 }
756
757 transition(IS, Unblock, S) {
758 m_addUnlockerToSharers;
759 j_popIncomingUnblockQueue;
760 }
761
762 transition(IS, Exclusive_Unblock, M) {
763 cc_clearSharers;
764 e_ownerIsUnblocker;
765 j_popIncomingUnblockQueue;
766 }
767
768 transition(SS, Unblock) {
769 m_addUnlockerToSharers;
770 o_decrementOutstanding;
771 j_popIncomingUnblockQueue;
772 }
773
774 transition(SS, Last_Unblock, S) {
775 m_addUnlockerToSharers;
776 o_decrementOutstanding;
777 j_popIncomingUnblockQueue;
778 }
779
780 transition(OO, Unblock) {
781 m_addUnlockerToSharers;
782 o_decrementOutstanding;
783 j_popIncomingUnblockQueue;
784 }
785
786 transition(OO, Last_Unblock, O) {
787 m_addUnlockerToSharers;
788 o_decrementOutstanding;
789 j_popIncomingUnblockQueue;
790 }
791
792 transition(MI, Dirty_Writeback, I) {
793 c_clearOwner;
794 cc_clearSharers;
795 l_writeDataToMemory;
796 qw_queueMemoryWBRequest;
797 j_popIncomingUnblockQueue;
798 }
799
800 transition(MIS, Dirty_Writeback, S) {
801 c_moveOwnerToSharer;
802 l_writeDataToMemory;
803 qw_queueMemoryWBRequest;
804 j_popIncomingUnblockQueue;
805 }
806
807 transition(MIS, Clean_Writeback, S) {
808 c_moveOwnerToSharer;
809 j_popIncomingUnblockQueue;
810 }
811
812 transition(OS, Dirty_Writeback, S) {
813 c_clearOwner;
814 l_writeDataToMemory;
815 qw_queueMemoryWBRequest;
816 j_popIncomingUnblockQueue;
817 }
818
819 transition(OSS, Dirty_Writeback, S) {
820 c_moveOwnerToSharer;
821 l_writeDataToMemory;
822 qw_queueMemoryWBRequest;
823 j_popIncomingUnblockQueue;
824 }
825
826 transition(OSS, Clean_Writeback, S) {
827 c_moveOwnerToSharer;
828 j_popIncomingUnblockQueue;
829 }
830
831 transition(MI, Clean_Writeback, I) {
832 c_clearOwner;
833 cc_clearSharers;
834 ll_checkDataInMemory;
835 j_popIncomingUnblockQueue;
836 }
837
838 transition(OS, Clean_Writeback, S) {
839 c_clearOwner;
840 ll_checkDataInMemory;
841 j_popIncomingUnblockQueue;
842 }
843
844 transition({MI, MIS}, Unblock, M) {
845 j_popIncomingUnblockQueue;
846 }
847
848 transition({OS, OSS}, Unblock, O) {
849 j_popIncomingUnblockQueue;
850 }
851
852 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
853 d_sendDataMsg;
854 q_popMemQueue;
855 }
856
857 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
858 //a_sendAck;
859 q_popMemQueue;
860 }
861
862 }