Ruby: Remove CacheMsg class from SLICC
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34 machine(Directory, "Directory protocol")
35 : DirectoryMemory * directory,
36 MemoryControl * memBuffer,
37 int directory_latency = 6
38 {
39
40 // ** IN QUEUES **
41 MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
42 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
43 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
44
45 MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
46 MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
47 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
48
49
50 // STATES
51 state_declaration(State, desc="Directory states", default="Directory_State_I") {
52 // Base states
53 I, AccessPermission:Invalid, desc="Invalid";
54 S, AccessPermission:Read_Only, desc="Shared";
55 O, AccessPermission:Read_Only, desc="Owner";
56 M, AccessPermission:Read_Write, desc="Modified";
57
58 IS, AccessPermission:Busy, desc="Blocked, was in idle";
59 SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
60 OO, AccessPermission:Read_Only, desc="Blocked, was in owned";
61 MO, AccessPermission:Read_Only, desc="Blocked, going to owner or maybe modified";
62 MM, AccessPermission:Read_Only, desc="Blocked, going to modified";
63 MM_DMA, AccessPermission:Busy, desc="Blocked, going to I";
64
65 MI, AccessPermission:Busy, desc="Blocked on a writeback";
66 MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
67 OS, AccessPermission:Busy, desc="Blocked on a writeback";
68 OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
69
70 XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
71 XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
72 OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
73
74 OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
75 MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
76 }
77
78 // Events
79 enumeration(Event, desc="Directory events") {
80 GETX, desc="A GETX arrives";
81 GETS, desc="A GETS arrives";
82 PUTX, desc="A PUTX arrives";
83 PUTO, desc="A PUTO arrives";
84 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
85 Unblock, desc="An unblock message arrives";
86 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
87 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
88 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
89 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
90 Memory_Data, desc="Fetched data from memory arrives";
91 Memory_Ack, desc="Writeback Ack from memory arrives";
92 DMA_READ, desc="DMA Read";
93 DMA_WRITE, desc="DMA Write";
94 DMA_ACK, desc="DMA Ack";
95 Data, desc="Data to directory";
96 }
97
98 // TYPES
99
100 // DirectoryEntry
101 structure(Entry, desc="...", interface='AbstractEntry') {
102 State DirectoryState, desc="Directory state";
103 DataBlock DataBlk, desc="data for the block";
104 NetDest Sharers, desc="Sharers for this block";
105 NetDest Owner, desc="Owner of this block";
106 int WaitingUnblocks, desc="Number of acks we're waiting for";
107 }
108
109 structure(TBE, desc="...") {
110 Address PhysicalAddress, desc="Physical address for this entry";
111 int Len, desc="Length of request";
112 DataBlock DataBlk, desc="DataBlk";
113 MachineID Requestor, desc="original requestor";
114 }
115
116 structure(TBETable, external = "yes") {
117 TBE lookup(Address);
118 void allocate(Address);
119 void deallocate(Address);
120 bool isPresent(Address);
121 }
122
123 // ** OBJECTS **
124 TBETable TBEs, template_hack="<Directory_TBE>";
125
126 void set_tbe(TBE b);
127 void unset_tbe();
128
129 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
130 return static_cast(Entry, directory[addr]);
131 }
132
133 State getState(TBE tbe, Address addr) {
134 return getDirectoryEntry(addr).DirectoryState;
135 }
136
137 void setState(TBE tbe, Address addr, State state) {
138 if (directory.isPresent(addr)) {
139
140 if (state == State:I) {
141 assert(getDirectoryEntry(addr).Owner.count() == 0);
142 assert(getDirectoryEntry(addr).Sharers.count() == 0);
143 }
144
145 if (state == State:S) {
146 assert(getDirectoryEntry(addr).Owner.count() == 0);
147 }
148
149 if (state == State:O) {
150 assert(getDirectoryEntry(addr).Owner.count() == 1);
151 assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
152 }
153
154 if (state == State:M) {
155 assert(getDirectoryEntry(addr).Owner.count() == 1);
156 assert(getDirectoryEntry(addr).Sharers.count() == 0);
157 }
158
159 if ((state != State:SS) && (state != State:OO)) {
160 assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
161 }
162
163 if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
164 getDirectoryEntry(addr).DirectoryState := state;
165 // disable coherence checker
166 // sequencer.checkCoherence(addr);
167 }
168 else {
169 getDirectoryEntry(addr).DirectoryState := state;
170 }
171 }
172 }
173
174 // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
175 bool isBlockShared(Address addr) {
176 if (directory.isPresent(addr)) {
177 if (getDirectoryEntry(addr).DirectoryState == State:I) {
178 return true;
179 }
180 }
181 return false;
182 }
183
184 bool isBlockExclusive(Address addr) {
185 if (directory.isPresent(addr)) {
186 if (getDirectoryEntry(addr).DirectoryState == State:I) {
187 return true;
188 }
189 }
190 return false;
191 }
192
193
194 // ** OUT_PORTS **
195 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
196 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
197 // out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
198 out_port(goo1_out, ResponseMsg, goo1);
199 out_port(memQueue_out, MemoryMsg, memBuffer);
200
201 // ** IN_PORTS **
202
203 in_port(foo1_in, ResponseMsg, foo1) {
204
205 }
206
207 // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
208 // if (unblockNetwork_in.isReady()) {
209 in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
210 if (unblockNetwork_in.isReady()) {
211 peek(unblockNetwork_in, ResponseMsg) {
212 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
213 if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
214 trigger(Event:Last_Unblock, in_msg.Address,
215 TBEs[in_msg.Address]);
216 } else {
217 trigger(Event:Unblock, in_msg.Address,
218 TBEs[in_msg.Address]);
219 }
220 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
221 trigger(Event:Exclusive_Unblock, in_msg.Address,
222 TBEs[in_msg.Address]);
223 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
224 trigger(Event:Dirty_Writeback, in_msg.Address,
225 TBEs[in_msg.Address]);
226 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
227 trigger(Event:Clean_Writeback, in_msg.Address,
228 TBEs[in_msg.Address]);
229 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
230 trigger(Event:Data, in_msg.Address,
231 TBEs[in_msg.Address]);
232 } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
233 trigger(Event:DMA_ACK, in_msg.Address,
234 TBEs[in_msg.Address]);
235 } else {
236 error("Invalid message");
237 }
238 }
239 }
240 }
241
242 in_port(requestQueue_in, RequestMsg, requestToDir) {
243 if (requestQueue_in.isReady()) {
244 peek(requestQueue_in, RequestMsg) {
245 if (in_msg.Type == CoherenceRequestType:GETS) {
246 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
247 } else if (in_msg.Type == CoherenceRequestType:GETX) {
248 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
249 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
250 trigger(Event:PUTX, in_msg.Address, TBEs[in_msg.Address]);
251 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
252 trigger(Event:PUTO, in_msg.Address, TBEs[in_msg.Address]);
253 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
254 trigger(Event:PUTO_SHARERS, in_msg.Address, TBEs[in_msg.Address]);
255 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
256 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
257 TBEs[makeLineAddress(in_msg.Address)]);
258 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
259 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
260 TBEs[makeLineAddress(in_msg.Address)]);
261 } else {
262 error("Invalid message");
263 }
264 }
265 }
266 }
267
268 // off-chip memory request/response is done
269 in_port(memQueue_in, MemoryMsg, memBuffer) {
270 if (memQueue_in.isReady()) {
271 peek(memQueue_in, MemoryMsg) {
272 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
273 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
274 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
275 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
276 } else {
277 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
278 error("Invalid message");
279 }
280 }
281 }
282 }
283
284 // Actions
285
286 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
287 peek(requestQueue_in, RequestMsg) {
288 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
289 out_msg.Address := address;
290 out_msg.Type := CoherenceRequestType:WB_ACK;
291 out_msg.Requestor := in_msg.Requestor;
292 out_msg.RequestorMachine := MachineType:Directory;
293 out_msg.Destination.add(in_msg.Requestor);
294 out_msg.MessageSize := MessageSizeType:Writeback_Control;
295 }
296 }
297 }
298
299 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
300 peek(requestQueue_in, RequestMsg) {
301 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
302 out_msg.Address := address;
303 out_msg.Type := CoherenceRequestType:WB_NACK;
304 out_msg.Requestor := in_msg.Requestor;
305 out_msg.RequestorMachine := MachineType:Directory;
306 out_msg.Destination.add(in_msg.Requestor);
307 out_msg.MessageSize := MessageSizeType:Writeback_Control;
308 }
309 }
310 }
311
312 action(c_clearOwner, "c", desc="Clear the owner field") {
313 getDirectoryEntry(address).Owner.clear();
314 }
315
316 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
317 getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
318 getDirectoryEntry(address).Owner.clear();
319 }
320
321 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
322 getDirectoryEntry(address).Sharers.clear();
323 }
324
325 action(d_sendDataMsg, "d", desc="Send data to requestor") {
326 peek(memQueue_in, MemoryMsg) {
327 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
328 out_msg.Address := address;
329 out_msg.Sender := machineID;
330 out_msg.SenderMachine := MachineType:Directory;
331 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
332 //out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
333 out_msg.DataBlk := in_msg.DataBlk;
334 out_msg.Dirty := false; // By definition, the block is now clean
335 out_msg.Acks := in_msg.Acks;
336 if (in_msg.ReadX) {
337 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
338 } else {
339 out_msg.Type := CoherenceResponseType:DATA;
340 }
341 out_msg.MessageSize := MessageSizeType:Response_Data;
342 }
343 }
344 }
345
346 action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
347 peek(requestQueue_in, RequestMsg) {
348 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
349 out_msg.Address := address;
350 out_msg.Sender := machineID;
351 out_msg.SenderMachine := MachineType:Directory;
352 out_msg.Destination.add(in_msg.Requestor);
353 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
354 out_msg.Dirty := false; // By definition, the block is now clean
355 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
356 out_msg.MessageSize := MessageSizeType:Response_Data;
357 }
358 }
359 }
360
361
362
363 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
364 peek(unblockNetwork_in, ResponseMsg) {
365 getDirectoryEntry(address).Owner.clear();
366 getDirectoryEntry(address).Owner.add(in_msg.Sender);
367 }
368 }
369
370 action(f_forwardRequest, "f", desc="Forward request to owner") {
371 peek(requestQueue_in, RequestMsg) {
372 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
373 out_msg.Address := address;
374 out_msg.Type := in_msg.Type;
375 out_msg.Requestor := in_msg.Requestor;
376 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
377 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
378 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
379 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
380 out_msg.Acks := out_msg.Acks - 1;
381 }
382 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
383 }
384 }
385 }
386
387 action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
388 peek(requestQueue_in, RequestMsg) {
389 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
390 out_msg.Address := address;
391 out_msg.Type := in_msg.Type;
392 out_msg.Requestor := machineID;
393 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
394 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
395 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
396 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
397 out_msg.Acks := out_msg.Acks - 1;
398 }
399 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
400 }
401 }
402 }
403
404 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
405 peek(requestQueue_in, RequestMsg) {
406 if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
407 ((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
408 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
409 out_msg.Address := address;
410 out_msg.Type := CoherenceRequestType:INV;
411 out_msg.Requestor := in_msg.Requestor;
412 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
413 // out_msg.Destination := getDirectoryEntry(in_msg.Address).Sharers;
414 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Sharers);
415 out_msg.Destination.remove(in_msg.Requestor);
416 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
417 }
418 }
419 }
420 }
421
422 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
423 requestQueue_in.dequeue();
424 }
425
426 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
427 unblockNetwork_in.dequeue();
428 }
429
430 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
431 peek(unblockNetwork_in, ResponseMsg) {
432 assert(in_msg.Dirty);
433 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
434 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
435 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
436 in_msg.Address, in_msg.DataBlk);
437 }
438 }
439
440 action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
441 peek(unblockNetwork_in, ResponseMsg) {
442 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
443 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
444 in_msg.Address, in_msg.DataBlk);
445 }
446 }
447
448 action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
449 peek(unblockNetwork_in, ResponseMsg) {
450 assert(in_msg.Dirty == false);
451 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
452
453 // NOTE: The following check would not be valid in a real
454 // implementation. We include the data in the "dataless"
455 // message so we can assert the clean data matches the datablock
456 // in memory
457 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
458 }
459 }
460
461 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
462 peek(unblockNetwork_in, ResponseMsg) {
463 getDirectoryEntry(address).Sharers.add(in_msg.Sender);
464 }
465 }
466
467 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
468 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
469 }
470
471 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
472 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
473 assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
474 }
475
476 action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
477 memQueue_in.dequeue();
478 }
479
480 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
481 peek(requestQueue_in, RequestMsg) {
482 enqueue(memQueue_out, MemoryMsg, latency="1") {
483 out_msg.Address := address;
484 out_msg.Type := MemoryRequestType:MEMORY_READ;
485 out_msg.Sender := machineID;
486 out_msg.OriginalRequestorMachId := in_msg.Requestor;
487 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
488 out_msg.MessageSize := in_msg.MessageSize;
489 //out_msg.Prefetch := false;
490 // These are not used by memory but are passed back here with the read data:
491 out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
492 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
493 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
494 out_msg.Acks := out_msg.Acks - 1;
495 }
496 DPRINTF(RubySlicc, "%s\n", out_msg);
497 }
498 }
499 }
500
501 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
502 peek(unblockNetwork_in, ResponseMsg) {
503 enqueue(memQueue_out, MemoryMsg, latency="1") {
504 out_msg.Address := address;
505 out_msg.Type := MemoryRequestType:MEMORY_WB;
506 out_msg.Sender := machineID;
507 if (is_valid(tbe)) {
508 out_msg.OriginalRequestorMachId := tbe.Requestor;
509 }
510 out_msg.DataBlk := in_msg.DataBlk;
511 out_msg.MessageSize := in_msg.MessageSize;
512 //out_msg.Prefetch := false;
513 // Not used:
514 out_msg.ReadX := false;
515 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
516 DPRINTF(RubySlicc, "%s\n", out_msg);
517 }
518 }
519 }
520
521 action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
522 peek(requestQueue_in, RequestMsg) {
523 enqueue(memQueue_out, MemoryMsg, latency="1") {
524 out_msg.Address := address;
525 out_msg.Type := MemoryRequestType:MEMORY_WB;
526 out_msg.Sender := machineID;
527 out_msg.OriginalRequestorMachId := in_msg.Requestor;
528 out_msg.DataBlk := in_msg.DataBlk;
529 out_msg.MessageSize := in_msg.MessageSize;
530 //out_msg.Prefetch := false;
531 // Not used:
532 out_msg.ReadX := false;
533 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
534 DPRINTF(RubySlicc, "%s\n", out_msg);
535 }
536 }
537 }
538
539
540 // action(z_stall, "z", desc="Cannot be handled right now.") {
541 // Special name recognized as do nothing case
542 // }
543
544 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
545 requestQueue_in.recycle();
546 }
547
548 action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
549 peek(requestQueue_in, RequestMsg) {
550 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
551 out_msg.Address := address;
552 out_msg.Sender := machineID;
553 out_msg.SenderMachine := MachineType:Directory;
554 out_msg.Destination.add(in_msg.Requestor);
555 out_msg.DataBlk := in_msg.DataBlk;
556 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
557 out_msg.Type := CoherenceResponseType:DMA_ACK;
558 out_msg.MessageSize := MessageSizeType:Writeback_Control;
559 }
560 }
561 }
562
563 action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
564 peek(unblockNetwork_in, ResponseMsg) {
565 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
566 out_msg.Address := address;
567 out_msg.Sender := machineID;
568 out_msg.SenderMachine := MachineType:Directory;
569 if (is_valid(tbe)) {
570 out_msg.Destination.add(tbe.Requestor);
571 }
572 out_msg.DataBlk := in_msg.DataBlk;
573 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
574 out_msg.Type := CoherenceResponseType:DMA_ACK;
575 out_msg.MessageSize := MessageSizeType:Writeback_Control;
576 }
577 }
578 }
579
580 action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
581 peek(requestQueue_in, RequestMsg) {
582 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
583 }
584 }
585
586 action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
587 assert(is_valid(tbe));
588 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
589 addressOffset(tbe.PhysicalAddress), tbe.Len);
590 }
591
592 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
593 peek (requestQueue_in, RequestMsg) {
594 TBEs.allocate(address);
595 set_tbe(TBEs[address]);
596 tbe.PhysicalAddress := in_msg.Address;
597 tbe.Len := in_msg.Len;
598 tbe.DataBlk := in_msg.DataBlk;
599 tbe.Requestor := in_msg.Requestor;
600 }
601 }
602
603 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
604 TBEs.deallocate(address);
605 unset_tbe();
606 }
607
608
609
610 // TRANSITIONS
611
612 transition(I, GETX, MM) {
613 qf_queueMemoryFetchRequest;
614 i_popIncomingRequestQueue;
615 }
616
617 transition(I, DMA_READ, XI_M) {
618 qf_queueMemoryFetchRequest;
619 i_popIncomingRequestQueue;
620 }
621
622 transition(I, DMA_WRITE, XI_U) {
623 qw_queueMemoryWBRequest2;
624 a_sendDMAAck; // ack count may be zero
625 l_writeDMADataToMemory;
626 i_popIncomingRequestQueue;
627 }
628
629 transition(XI_M, Memory_Data, I) {
630 d_sendDataMsg; // ack count may be zero
631 q_popMemQueue;
632 }
633
634 transition(XI_U, Exclusive_Unblock, I) {
635 cc_clearSharers;
636 c_clearOwner;
637 j_popIncomingUnblockQueue;
638 }
639
640 transition(S, GETX, MM) {
641 qf_queueMemoryFetchRequest;
642 g_sendInvalidations;
643 i_popIncomingRequestQueue;
644 }
645
646 transition(S, DMA_READ) {
647 //qf_queueMemoryFetchRequest;
648 p_fwdDataToDMA;
649 //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
650 i_popIncomingRequestQueue;
651 }
652
653 transition(S, DMA_WRITE, XI_U) {
654 qw_queueMemoryWBRequest2;
655 a_sendDMAAck; // ack count may be zero
656 l_writeDMADataToMemory;
657 g_sendInvalidations; // the DMA will collect invalidations
658 i_popIncomingRequestQueue;
659 }
660
661 transition(I, GETS, IS) {
662 qf_queueMemoryFetchRequest;
663 i_popIncomingRequestQueue;
664 }
665
666 transition({S, SS}, GETS, SS) {
667 qf_queueMemoryFetchRequest;
668 n_incrementOutstanding;
669 i_popIncomingRequestQueue;
670 }
671
672 transition({I, S}, PUTO) {
673 b_sendWriteBackNack;
674 i_popIncomingRequestQueue;
675 }
676
677 transition({I, S, O}, PUTX) {
678 b_sendWriteBackNack;
679 i_popIncomingRequestQueue;
680 }
681
682 transition(O, GETX, MM) {
683 f_forwardRequest;
684 g_sendInvalidations;
685 i_popIncomingRequestQueue;
686 }
687
688 transition(O, DMA_READ, OD) {
689 f_forwardRequest; // this will cause the data to go to DMA directly
690 //g_sendInvalidations; // this will cause acks to be sent to the DMA
691 i_popIncomingRequestQueue;
692 }
693
694 transition(OD, DMA_ACK, O) {
695 j_popIncomingUnblockQueue;
696 }
697
698 transition({O,M}, DMA_WRITE, OI_D) {
699 f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
700 g_sendInvalidations; // these go to the DMA Controller
701 v_allocateTBE;
702 i_popIncomingRequestQueue;
703 }
704
705 transition(OI_D, Data, XI_U) {
706 qw_queueMemoryWBRequest;
707 a_sendDMAAck2; // ack count may be zero
708 p_writeFwdDataToMemory;
709 l_writeDMADataToMemoryFromTBE;
710 w_deallocateTBE;
711 j_popIncomingUnblockQueue;
712 }
713
714 transition({O, OO}, GETS, OO) {
715 f_forwardRequest;
716 n_incrementOutstanding;
717 i_popIncomingRequestQueue;
718 }
719
720 transition(M, GETX, MM) {
721 f_forwardRequest;
722 i_popIncomingRequestQueue;
723 }
724
725 // no exclusive unblock will show up to the directory
726 transition(M, DMA_READ, MD) {
727 f_forwardRequest; // this will cause the data to go to DMA directly
728 i_popIncomingRequestQueue;
729 }
730
731 transition(MD, DMA_ACK, M) {
732 j_popIncomingUnblockQueue;
733 }
734
735 transition(M, GETS, MO) {
736 f_forwardRequest;
737 i_popIncomingRequestQueue;
738 }
739
740 transition(M, PUTX, MI) {
741 a_sendWriteBackAck;
742 i_popIncomingRequestQueue;
743 }
744
745 // happens if M->O transition happens on-chip
746 transition(M, PUTO, MI) {
747 a_sendWriteBackAck;
748 i_popIncomingRequestQueue;
749 }
750
751 transition(M, PUTO_SHARERS, MIS) {
752 a_sendWriteBackAck;
753 i_popIncomingRequestQueue;
754 }
755
756 transition(O, PUTO, OS) {
757 a_sendWriteBackAck;
758 i_popIncomingRequestQueue;
759 }
760
761 transition(O, PUTO_SHARERS, OSS) {
762 a_sendWriteBackAck;
763 i_popIncomingRequestQueue;
764 }
765
766
767 transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
768 zz_recycleRequest;
769 }
770
771 transition({MM, MO}, Exclusive_Unblock, M) {
772 cc_clearSharers;
773 e_ownerIsUnblocker;
774 j_popIncomingUnblockQueue;
775 }
776
777 transition(MO, Unblock, O) {
778 m_addUnlockerToSharers;
779 j_popIncomingUnblockQueue;
780 }
781
782 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
783 zz_recycleRequest;
784 }
785
786 transition(IS, GETS) {
787 zz_recycleRequest;
788 }
789
790 transition(IS, Unblock, S) {
791 m_addUnlockerToSharers;
792 j_popIncomingUnblockQueue;
793 }
794
795 transition(IS, Exclusive_Unblock, M) {
796 cc_clearSharers;
797 e_ownerIsUnblocker;
798 j_popIncomingUnblockQueue;
799 }
800
801 transition(SS, Unblock) {
802 m_addUnlockerToSharers;
803 o_decrementOutstanding;
804 j_popIncomingUnblockQueue;
805 }
806
807 transition(SS, Last_Unblock, S) {
808 m_addUnlockerToSharers;
809 o_decrementOutstanding;
810 j_popIncomingUnblockQueue;
811 }
812
813 transition(OO, Unblock) {
814 m_addUnlockerToSharers;
815 o_decrementOutstanding;
816 j_popIncomingUnblockQueue;
817 }
818
819 transition(OO, Last_Unblock, O) {
820 m_addUnlockerToSharers;
821 o_decrementOutstanding;
822 j_popIncomingUnblockQueue;
823 }
824
825 transition(MI, Dirty_Writeback, I) {
826 c_clearOwner;
827 cc_clearSharers;
828 l_writeDataToMemory;
829 qw_queueMemoryWBRequest;
830 j_popIncomingUnblockQueue;
831 }
832
833 transition(MIS, Dirty_Writeback, S) {
834 c_moveOwnerToSharer;
835 l_writeDataToMemory;
836 qw_queueMemoryWBRequest;
837 j_popIncomingUnblockQueue;
838 }
839
840 transition(MIS, Clean_Writeback, S) {
841 c_moveOwnerToSharer;
842 j_popIncomingUnblockQueue;
843 }
844
845 transition(OS, Dirty_Writeback, S) {
846 c_clearOwner;
847 l_writeDataToMemory;
848 qw_queueMemoryWBRequest;
849 j_popIncomingUnblockQueue;
850 }
851
852 transition(OSS, Dirty_Writeback, S) {
853 c_moveOwnerToSharer;
854 l_writeDataToMemory;
855 qw_queueMemoryWBRequest;
856 j_popIncomingUnblockQueue;
857 }
858
859 transition(OSS, Clean_Writeback, S) {
860 c_moveOwnerToSharer;
861 j_popIncomingUnblockQueue;
862 }
863
864 transition(MI, Clean_Writeback, I) {
865 c_clearOwner;
866 cc_clearSharers;
867 ll_checkDataInMemory;
868 j_popIncomingUnblockQueue;
869 }
870
871 transition(OS, Clean_Writeback, S) {
872 c_clearOwner;
873 ll_checkDataInMemory;
874 j_popIncomingUnblockQueue;
875 }
876
877 transition({MI, MIS}, Unblock, M) {
878 j_popIncomingUnblockQueue;
879 }
880
881 transition({OS, OSS}, Unblock, O) {
882 j_popIncomingUnblockQueue;
883 }
884
885 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
886 d_sendDataMsg;
887 q_popMemQueue;
888 }
889
890 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
891 //a_sendAck;
892 q_popMemQueue;
893 }
894
895 }