MOESI_hammer: Added full-bit directory support
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-perfectDir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOESI_CMP_directory-dir.sm 1.11 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
32 */
33
34 machine(Directory, "Directory protocol") {
35
36 // ** IN QUEUES **
37 MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
38 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
39 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
40
41 MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
42 MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
43 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
44
45
46 // STATES
47 enumeration(State, desc="Directory states", default="Directory_State_I") {
48 // Base states
49 I, desc="Invalid";
50 S, desc="Shared";
51 O, desc="Owner";
52 M, desc="Modified";
53
54 IS, desc="Blocked, was in idle";
55 SS, desc="Blocked, was in shared";
56 OO, desc="Blocked, was in owned";
57 MO, desc="Blocked, going to owner or maybe modified";
58 MM, desc="Blocked, going to modified";
59
60 MI, desc="Blocked on a writeback";
61 MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
62 OS, desc="Blocked on a writeback";
63 OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
64 }
65
66 // Events
67 enumeration(Event, desc="Directory events") {
68 GETX, desc="A GETX arrives";
69 GETS, desc="A GETS arrives";
70 PUTX, desc="A PUTX arrives";
71 PUTO, desc="A PUTO arrives";
72 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
73 Unblock, desc="An unblock message arrives";
74 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
75 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
76 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
77 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
78 }
79
80 // TYPES
81
82 // DirectoryEntry
83 structure(Entry, desc="...") {
84 State DirectoryState, desc="Directory state";
85 DataBlock DataBlk, desc="data for the block";
86 NetDest Sharers, desc="Sharers for this block";
87 NetDest Owner, desc="Owner of this block";
88 int WaitingUnblocks, desc="Number of acks we're waiting for";
89 }
90
91 external_type(DirectoryMemory) {
92 Entry lookup(Address);
93 bool isPresent(Address);
94 }
95
96
97 // ** OBJECTS **
98
99 DirectoryMemory directory, constructor_hack="i";
100
101 State getState(Address addr) {
102 return directory[addr].DirectoryState;
103 }
104
105 void setState(Address addr, State state) {
106 if (directory.isPresent(addr)) {
107
108 if (state == State:I) {
109 assert(directory[addr].Owner.count() == 0);
110 assert(directory[addr].Sharers.count() == 0);
111 }
112
113 if (state == State:S) {
114 assert(directory[addr].Owner.count() == 0);
115 }
116
117 if (state == State:O) {
118 assert(directory[addr].Owner.count() == 1);
119 assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
120 }
121
122 if (state == State:M) {
123 assert(directory[addr].Owner.count() == 1);
124 assert(directory[addr].Sharers.count() == 0);
125 }
126
127 if ((state != State:SS) && (state != State:OO)) {
128 assert(directory[addr].WaitingUnblocks == 0);
129 }
130
131 if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
132 directory[addr].DirectoryState := state;
133 // disable coherence checker
134 // sequencer.checkCoherence(addr);
135 }
136 else {
137 directory[addr].DirectoryState := state;
138 }
139 }
140 }
141
142 // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
143 bool isBlockShared(Address addr) {
144 if (directory.isPresent(addr)) {
145 if (directory[addr].DirectoryState == State:I) {
146 return true;
147 }
148 }
149 return false;
150 }
151
152 bool isBlockExclusive(Address addr) {
153 if (directory.isPresent(addr)) {
154 if (directory[addr].DirectoryState == State:I) {
155 return true;
156 }
157 }
158 return false;
159 }
160
161
162 // ** OUT_PORTS **
163 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
164 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
165 // out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
166 out_port(goo1_out, ResponseMsg, goo1);
167
168 // ** IN_PORTS **
169
170 in_port(foo1_in, ResponseMsg, foo1) {
171
172 }
173
174 // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
175 // if (unblockNetwork_in.isReady()) {
176 in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
177 if (unblockNetwork_in.isReady()) {
178 peek(unblockNetwork_in, ResponseMsg) {
179 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
180 if (directory[in_msg.Address].WaitingUnblocks == 1) {
181 trigger(Event:Last_Unblock, in_msg.Address);
182 } else {
183 trigger(Event:Unblock, in_msg.Address);
184 }
185 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
186 trigger(Event:Exclusive_Unblock, in_msg.Address);
187 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
188 trigger(Event:Dirty_Writeback, in_msg.Address);
189 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
190 trigger(Event:Clean_Writeback, in_msg.Address);
191 } else {
192 error("Invalid message");
193 }
194 }
195 }
196 }
197
198 in_port(requestQueue_in, RequestMsg, requestToDir) {
199 if (requestQueue_in.isReady()) {
200 peek(requestQueue_in, RequestMsg) {
201 if (in_msg.Type == CoherenceRequestType:GETS) {
202 trigger(Event:GETS, in_msg.Address);
203 } else if (in_msg.Type == CoherenceRequestType:GETX) {
204 trigger(Event:GETX, in_msg.Address);
205 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
206 trigger(Event:PUTX, in_msg.Address);
207 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
208 trigger(Event:PUTO, in_msg.Address);
209 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
210 trigger(Event:PUTO_SHARERS, in_msg.Address);
211 } else {
212 error("Invalid message");
213 }
214 }
215 }
216 }
217
218 // Actions
219
220 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
221 peek(requestQueue_in, RequestMsg) {
222 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
223 out_msg.Address := address;
224 out_msg.Type := CoherenceRequestType:WB_ACK;
225 out_msg.Requestor := in_msg.Requestor;
226 out_msg.Destination.add(in_msg.Requestor);
227 out_msg.MessageSize := MessageSizeType:Writeback_Control;
228 }
229 }
230 }
231
232 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
233 peek(requestQueue_in, RequestMsg) {
234 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
235 out_msg.Address := address;
236 out_msg.Type := CoherenceRequestType:WB_NACK;
237 out_msg.Requestor := in_msg.Requestor;
238 out_msg.Destination.add(in_msg.Requestor);
239 out_msg.MessageSize := MessageSizeType:Writeback_Control;
240 }
241 }
242 }
243
244 action(c_clearOwner, "c", desc="Clear the owner field") {
245 directory[address].Owner.clear();
246 }
247
248 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
249 directory[address].Sharers.addNetDest(directory[address].Owner);
250 directory[address].Owner.clear();
251 }
252
253 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
254 directory[address].Sharers.clear();
255 }
256
257 action(d_sendData, "d", desc="Send data to requestor") {
258 peek(requestQueue_in, RequestMsg) {
259 enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
260 // enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
261 out_msg.Address := address;
262
263 if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
264 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
265 } else {
266 out_msg.Type := CoherenceResponseType:DATA;
267 }
268
269 out_msg.Sender := machineID;
270 out_msg.SenderMachine := MachineType:Directory;
271 out_msg.Destination.add(in_msg.Requestor);
272 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
273 out_msg.Dirty := false; // By definition, the block is now clean
274 out_msg.Acks := directory[address].Sharers.count();
275 if (directory[address].Sharers.isElement(in_msg.Requestor)) {
276 out_msg.Acks := out_msg.Acks - 1;
277 }
278 out_msg.MessageSize := MessageSizeType:Response_Data;
279 }
280 }
281 }
282
283 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
284 peek(unblockNetwork_in, ResponseMsg) {
285 directory[address].Owner.clear();
286 directory[address].Owner.add(in_msg.Sender);
287 }
288 }
289
290 action(f_forwardRequest, "f", desc="Forward request to owner") {
291 peek(requestQueue_in, RequestMsg) {
292 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
293 out_msg.Address := address;
294 out_msg.Type := in_msg.Type;
295 out_msg.Requestor := in_msg.Requestor;
296 out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
297 out_msg.Acks := directory[address].Sharers.count();
298 if (directory[address].Sharers.isElement(in_msg.Requestor)) {
299 out_msg.Acks := out_msg.Acks - 1;
300 }
301 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
302 }
303 }
304 }
305
306 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
307 peek(requestQueue_in, RequestMsg) {
308 if ((directory[in_msg.Address].Sharers.count() > 1) ||
309 ((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
310 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_CACHE_LATENCY") {
311 out_msg.Address := address;
312 out_msg.Type := CoherenceRequestType:INV;
313 out_msg.Requestor := in_msg.Requestor;
314 // out_msg.Destination := directory[in_msg.Address].Sharers;
315 out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
316 out_msg.Destination.remove(in_msg.Requestor);
317 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
318 }
319 }
320 }
321 }
322
323 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
324 requestQueue_in.dequeue();
325 }
326
327 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
328 unblockNetwork_in.dequeue();
329 }
330
331 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
332 peek(unblockNetwork_in, ResponseMsg) {
333 assert(in_msg.Dirty);
334 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
335 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
336 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
337 in_msg.Address, in_msg.DataBlk);
338 }
339 }
340
341 action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
342 peek(unblockNetwork_in, ResponseMsg) {
343 assert(in_msg.Dirty == false);
344 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
345
346 // NOTE: The following check would not be valid in a real
347 // implementation. We include the data in the "dataless"
348 // message so we can assert the clean data matches the datablock
349 // in memory
350 assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
351 }
352 }
353
354 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
355 peek(unblockNetwork_in, ResponseMsg) {
356 directory[address].Sharers.add(in_msg.Sender);
357 }
358 }
359
360 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
361 directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
362 }
363
364 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
365 directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
366 assert(directory[address].WaitingUnblocks >= 0);
367 }
368
369 // action(z_stall, "z", desc="Cannot be handled right now.") {
370 // Special name recognized as do nothing case
371 // }
372
373 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
374 requestQueue_in.recycle();
375 }
376
377 // TRANSITIONS
378
379 transition(I, GETX, MM) {
380 d_sendData;
381 i_popIncomingRequestQueue;
382 }
383
384 transition(S, GETX, MM) {
385 d_sendData;
386 g_sendInvalidations;
387 i_popIncomingRequestQueue;
388 }
389
390 transition(I, GETS, IS) {
391 d_sendData;
392 i_popIncomingRequestQueue;
393 }
394
395 transition({S, SS}, GETS, SS) {
396 d_sendData;
397 n_incrementOutstanding;
398 i_popIncomingRequestQueue;
399 }
400
401 transition({I, S}, PUTO) {
402 b_sendWriteBackNack;
403 i_popIncomingRequestQueue;
404 }
405
406 transition({I, S, O}, PUTX) {
407 b_sendWriteBackNack;
408 i_popIncomingRequestQueue;
409 }
410
411 transition(O, GETX, MM) {
412 f_forwardRequest;
413 g_sendInvalidations;
414 i_popIncomingRequestQueue;
415 }
416
417 transition({O, OO}, GETS, OO) {
418 f_forwardRequest;
419 n_incrementOutstanding;
420 i_popIncomingRequestQueue;
421 }
422
423 transition(M, GETX, MM) {
424 f_forwardRequest;
425 i_popIncomingRequestQueue;
426 }
427
428 transition(M, GETS, MO) {
429 f_forwardRequest;
430 i_popIncomingRequestQueue;
431 }
432
433 transition(M, PUTX, MI) {
434 a_sendWriteBackAck;
435 i_popIncomingRequestQueue;
436 }
437
438 // happens if M->O transition happens on-chip
439 transition(M, PUTO, MI) {
440 a_sendWriteBackAck;
441 i_popIncomingRequestQueue;
442 }
443
444 transition(M, PUTO_SHARERS, MIS) {
445 a_sendWriteBackAck;
446 i_popIncomingRequestQueue;
447 }
448
449 transition(O, PUTO, OS) {
450 a_sendWriteBackAck;
451 i_popIncomingRequestQueue;
452 }
453
454 transition(O, PUTO_SHARERS, OSS) {
455 a_sendWriteBackAck;
456 i_popIncomingRequestQueue;
457 }
458
459
460 transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
461 zz_recycleRequest;
462 }
463
464 transition({MM, MO}, Exclusive_Unblock, M) {
465 cc_clearSharers;
466 e_ownerIsUnblocker;
467 j_popIncomingUnblockQueue;
468 }
469
470 transition(MO, Unblock, O) {
471 m_addUnlockerToSharers;
472 j_popIncomingUnblockQueue;
473 }
474
475 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
476 zz_recycleRequest;
477 }
478
479 transition(IS, GETS) {
480 zz_recycleRequest;
481 }
482
483 transition(IS, Unblock, S) {
484 m_addUnlockerToSharers;
485 j_popIncomingUnblockQueue;
486 }
487
488 transition(IS, Exclusive_Unblock, M) {
489 cc_clearSharers;
490 e_ownerIsUnblocker;
491 j_popIncomingUnblockQueue;
492 }
493
494 transition(SS, Unblock) {
495 m_addUnlockerToSharers;
496 o_decrementOutstanding;
497 j_popIncomingUnblockQueue;
498 }
499
500 transition(SS, Last_Unblock, S) {
501 m_addUnlockerToSharers;
502 o_decrementOutstanding;
503 j_popIncomingUnblockQueue;
504 }
505
506 transition(OO, Unblock) {
507 m_addUnlockerToSharers;
508 o_decrementOutstanding;
509 j_popIncomingUnblockQueue;
510 }
511
512 transition(OO, Last_Unblock, O) {
513 m_addUnlockerToSharers;
514 o_decrementOutstanding;
515 j_popIncomingUnblockQueue;
516 }
517
518 transition(MI, Dirty_Writeback, I) {
519 c_clearOwner;
520 cc_clearSharers;
521 l_writeDataToMemory;
522 j_popIncomingUnblockQueue;
523 }
524
525 transition(MIS, Dirty_Writeback, S) {
526 c_moveOwnerToSharer;
527 l_writeDataToMemory;
528 j_popIncomingUnblockQueue;
529 }
530
531 transition(MIS, Clean_Writeback, S) {
532 c_moveOwnerToSharer;
533 j_popIncomingUnblockQueue;
534 }
535
536 transition(OS, Dirty_Writeback, S) {
537 c_clearOwner;
538 l_writeDataToMemory;
539 j_popIncomingUnblockQueue;
540 }
541
542 transition(OSS, Dirty_Writeback, S) {
543 c_moveOwnerToSharer;
544 l_writeDataToMemory;
545 j_popIncomingUnblockQueue;
546 }
547
548 transition(OSS, Clean_Writeback, S) {
549 c_moveOwnerToSharer;
550 j_popIncomingUnblockQueue;
551 }
552
553 transition(MI, Clean_Writeback, I) {
554 c_clearOwner;
555 cc_clearSharers;
556 ll_checkDataInMemory;
557 j_popIncomingUnblockQueue;
558 }
559
560 transition(OS, Clean_Writeback, S) {
561 c_clearOwner;
562 ll_checkDataInMemory;
563 j_popIncomingUnblockQueue;
564 }
565
566 transition({MI, MIS}, Unblock, M) {
567 j_popIncomingUnblockQueue;
568 }
569
570 transition({OS, OSS}, Unblock, O) {
571 j_popIncomingUnblockQueue;
572 }
573 }