x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dir.sm
1 /*
2 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * $Id$
31 */
32
33 machine(Directory, "Directory protocol")
34 : DirectoryMemory * directory,
35 MemoryControl * memBuffer,
36 Cycles directory_latency = 6
37 {
38
39 // ** IN QUEUES **
40 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false", vnet_type="request"; // a mod-L2 bank -> this Dir
41 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false", vnet_type="response"; // a mod-L2 bank -> this Dir
42
43 MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false", vnet_type="forward";
44 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false", vnet_type="response"; // Dir -> mod-L2 bank
45
46
47 // STATES
48 state_declaration(State, desc="Directory states", default="Directory_State_I") {
49 // Base states
50 I, AccessPermission:Read_Write, desc="Invalid";
51 S, AccessPermission:Read_Only, desc="Shared";
52 O, AccessPermission:Maybe_Stale, desc="Owner";
53 M, AccessPermission:Maybe_Stale, desc="Modified";
54
55 IS, AccessPermission:Busy, desc="Blocked, was in idle";
56 SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
57 OO, AccessPermission:Busy, desc="Blocked, was in owned";
58 MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
59 MM, AccessPermission:Busy, desc="Blocked, going to modified";
60 MM_DMA, AccessPermission:Busy, desc="Blocked, going to I";
61
62 MI, AccessPermission:Busy, desc="Blocked on a writeback";
63 MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
64 OS, AccessPermission:Busy, desc="Blocked on a writeback";
65 OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
66
67 XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
68 XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
69 OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
70
71 OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
72 MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
73 }
74
75 // Events
76 enumeration(Event, desc="Directory events") {
77 GETX, desc="A GETX arrives";
78 GETS, desc="A GETS arrives";
79 PUTX, desc="A PUTX arrives";
80 PUTO, desc="A PUTO arrives";
81 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
82 Unblock, desc="An unblock message arrives";
83 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
84 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
85 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
86 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
87 Memory_Data, desc="Fetched data from memory arrives";
88 Memory_Ack, desc="Writeback Ack from memory arrives";
89 DMA_READ, desc="DMA Read";
90 DMA_WRITE, desc="DMA Write";
91 DMA_ACK, desc="DMA Ack";
92 Data, desc="Data to directory";
93 }
94
95 // TYPES
96
97 // DirectoryEntry
98 structure(Entry, desc="...", interface='AbstractEntry') {
99 State DirectoryState, desc="Directory state";
100 DataBlock DataBlk, desc="data for the block";
101 NetDest Sharers, desc="Sharers for this block";
102 NetDest Owner, desc="Owner of this block";
103 int WaitingUnblocks, desc="Number of acks we're waiting for";
104 }
105
106 structure(TBE, desc="...") {
107 Address PhysicalAddress, desc="Physical address for this entry";
108 int Len, desc="Length of request";
109 DataBlock DataBlk, desc="DataBlk";
110 MachineID Requestor, desc="original requestor";
111 }
112
113 structure(TBETable, external = "yes") {
114 TBE lookup(Address);
115 void allocate(Address);
116 void deallocate(Address);
117 bool isPresent(Address);
118 }
119
120 // ** OBJECTS **
121 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
122
123 void set_tbe(TBE b);
124 void unset_tbe();
125
126 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
127 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
128
129 if (is_valid(dir_entry)) {
130 return dir_entry;
131 }
132
133 dir_entry := static_cast(Entry, "pointer",
134 directory.allocate(addr, new Entry));
135 return dir_entry;
136 }
137
138 State getState(TBE tbe, Address addr) {
139 return getDirectoryEntry(addr).DirectoryState;
140 }
141
142 void setState(TBE tbe, Address addr, State state) {
143 if (directory.isPresent(addr)) {
144
145 if (state == State:I) {
146 assert(getDirectoryEntry(addr).Owner.count() == 0);
147 assert(getDirectoryEntry(addr).Sharers.count() == 0);
148 }
149
150 if (state == State:S) {
151 assert(getDirectoryEntry(addr).Owner.count() == 0);
152 }
153
154 if (state == State:O) {
155 assert(getDirectoryEntry(addr).Owner.count() == 1);
156 assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
157 }
158
159 if (state == State:M) {
160 assert(getDirectoryEntry(addr).Owner.count() == 1);
161 assert(getDirectoryEntry(addr).Sharers.count() == 0);
162 }
163
164 if ((state != State:SS) && (state != State:OO)) {
165 assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
166 }
167
168 if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
169 getDirectoryEntry(addr).DirectoryState := state;
170 // disable coherence checker
171 // sequencer.checkCoherence(addr);
172 }
173 else {
174 getDirectoryEntry(addr).DirectoryState := state;
175 }
176 }
177 }
178
179 AccessPermission getAccessPermission(Address addr) {
180 if (directory.isPresent(addr)) {
181 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
182 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
183 }
184
185 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
186 return AccessPermission:NotPresent;
187 }
188
189 void setAccessPermission(Address addr, State state) {
190 if (directory.isPresent(addr)) {
191 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
192 }
193 }
194
195 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
196 return getDirectoryEntry(addr).DataBlk;
197 }
198
199 // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
200 bool isBlockShared(Address addr) {
201 if (directory.isPresent(addr)) {
202 if (getDirectoryEntry(addr).DirectoryState == State:I) {
203 return true;
204 }
205 }
206 return false;
207 }
208
209 bool isBlockExclusive(Address addr) {
210 if (directory.isPresent(addr)) {
211 if (getDirectoryEntry(addr).DirectoryState == State:I) {
212 return true;
213 }
214 }
215 return false;
216 }
217
218
219 // ** OUT_PORTS **
220 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
221 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
222 // out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
223 out_port(memQueue_out, MemoryMsg, memBuffer);
224
225 // ** IN_PORTS **
226
227 in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
228 if (unblockNetwork_in.isReady()) {
229 peek(unblockNetwork_in, ResponseMsg) {
230 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
231 if (getDirectoryEntry(in_msg.Address).WaitingUnblocks == 1) {
232 trigger(Event:Last_Unblock, in_msg.Address,
233 TBEs[in_msg.Address]);
234 } else {
235 trigger(Event:Unblock, in_msg.Address,
236 TBEs[in_msg.Address]);
237 }
238 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
239 trigger(Event:Exclusive_Unblock, in_msg.Address,
240 TBEs[in_msg.Address]);
241 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
242 trigger(Event:Dirty_Writeback, in_msg.Address,
243 TBEs[in_msg.Address]);
244 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
245 trigger(Event:Clean_Writeback, in_msg.Address,
246 TBEs[in_msg.Address]);
247 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
248 trigger(Event:Data, in_msg.Address,
249 TBEs[in_msg.Address]);
250 } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
251 trigger(Event:DMA_ACK, in_msg.Address,
252 TBEs[in_msg.Address]);
253 } else {
254 error("Invalid message");
255 }
256 }
257 }
258 }
259
260 in_port(requestQueue_in, RequestMsg, requestToDir) {
261 if (requestQueue_in.isReady()) {
262 peek(requestQueue_in, RequestMsg) {
263 if (in_msg.Type == CoherenceRequestType:GETS) {
264 trigger(Event:GETS, in_msg.Address, TBEs[in_msg.Address]);
265 } else if (in_msg.Type == CoherenceRequestType:GETX) {
266 trigger(Event:GETX, in_msg.Address, TBEs[in_msg.Address]);
267 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
268 trigger(Event:PUTX, in_msg.Address, TBEs[in_msg.Address]);
269 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
270 trigger(Event:PUTO, in_msg.Address, TBEs[in_msg.Address]);
271 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
272 trigger(Event:PUTO_SHARERS, in_msg.Address, TBEs[in_msg.Address]);
273 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
274 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
275 TBEs[makeLineAddress(in_msg.Address)]);
276 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
277 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
278 TBEs[makeLineAddress(in_msg.Address)]);
279 } else {
280 error("Invalid message");
281 }
282 }
283 }
284 }
285
286 // off-chip memory request/response is done
287 in_port(memQueue_in, MemoryMsg, memBuffer) {
288 if (memQueue_in.isReady()) {
289 peek(memQueue_in, MemoryMsg) {
290 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
291 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
292 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
293 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
294 } else {
295 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
296 error("Invalid message");
297 }
298 }
299 }
300 }
301
302 // Actions
303
304 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
305 peek(requestQueue_in, RequestMsg) {
306 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
307 out_msg.Address := address;
308 out_msg.Type := CoherenceRequestType:WB_ACK;
309 out_msg.Requestor := in_msg.Requestor;
310 out_msg.RequestorMachine := MachineType:Directory;
311 out_msg.Destination.add(in_msg.Requestor);
312 out_msg.MessageSize := MessageSizeType:Writeback_Control;
313 }
314 }
315 }
316
317 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
318 peek(requestQueue_in, RequestMsg) {
319 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
320 out_msg.Address := address;
321 out_msg.Type := CoherenceRequestType:WB_NACK;
322 out_msg.Requestor := in_msg.Requestor;
323 out_msg.RequestorMachine := MachineType:Directory;
324 out_msg.Destination.add(in_msg.Requestor);
325 out_msg.MessageSize := MessageSizeType:Writeback_Control;
326 }
327 }
328 }
329
330 action(c_clearOwner, "c", desc="Clear the owner field") {
331 getDirectoryEntry(address).Owner.clear();
332 }
333
334 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
335 getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
336 getDirectoryEntry(address).Owner.clear();
337 }
338
339 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
340 getDirectoryEntry(address).Sharers.clear();
341 }
342
343 action(d_sendDataMsg, "d", desc="Send data to requestor") {
344 peek(memQueue_in, MemoryMsg) {
345 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
346 out_msg.Address := address;
347 out_msg.Sender := machineID;
348 out_msg.SenderMachine := MachineType:Directory;
349 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
350 //out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
351 out_msg.DataBlk := in_msg.DataBlk;
352 out_msg.Dirty := false; // By definition, the block is now clean
353 out_msg.Acks := in_msg.Acks;
354 if (in_msg.ReadX) {
355 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
356 } else {
357 out_msg.Type := CoherenceResponseType:DATA;
358 }
359 out_msg.MessageSize := MessageSizeType:Response_Data;
360 }
361 }
362 }
363
364 action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
365 peek(requestQueue_in, RequestMsg) {
366 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
367 out_msg.Address := address;
368 out_msg.Sender := machineID;
369 out_msg.SenderMachine := MachineType:Directory;
370 out_msg.Destination.add(in_msg.Requestor);
371 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
372 out_msg.Dirty := false; // By definition, the block is now clean
373 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
374 out_msg.MessageSize := MessageSizeType:Response_Data;
375 }
376 }
377 }
378
379
380
381 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
382 peek(unblockNetwork_in, ResponseMsg) {
383 getDirectoryEntry(address).Owner.clear();
384 getDirectoryEntry(address).Owner.add(in_msg.Sender);
385 }
386 }
387
388 action(f_forwardRequest, "f", desc="Forward request to owner") {
389 peek(requestQueue_in, RequestMsg) {
390 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
391 out_msg.Address := address;
392 out_msg.Type := in_msg.Type;
393 out_msg.Requestor := in_msg.Requestor;
394 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
395 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
396 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
397 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
398 out_msg.Acks := out_msg.Acks - 1;
399 }
400 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
401 }
402 }
403 }
404
405 action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
406 peek(requestQueue_in, RequestMsg) {
407 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
408 out_msg.Address := address;
409 out_msg.Type := in_msg.Type;
410 out_msg.Requestor := machineID;
411 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
412 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Owner);
413 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
414 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
415 out_msg.Acks := out_msg.Acks - 1;
416 }
417 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
418 }
419 }
420 }
421
422 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
423 peek(requestQueue_in, RequestMsg) {
424 if ((getDirectoryEntry(in_msg.Address).Sharers.count() > 1) ||
425 ((getDirectoryEntry(in_msg.Address).Sharers.count() > 0) && (getDirectoryEntry(in_msg.Address).Sharers.isElement(in_msg.Requestor) == false))) {
426 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
427 out_msg.Address := address;
428 out_msg.Type := CoherenceRequestType:INV;
429 out_msg.Requestor := in_msg.Requestor;
430 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
431 // out_msg.Destination := getDirectoryEntry(in_msg.Address).Sharers;
432 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.Address).Sharers);
433 out_msg.Destination.remove(in_msg.Requestor);
434 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
435 }
436 }
437 }
438 }
439
440 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
441 requestQueue_in.dequeue();
442 }
443
444 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
445 unblockNetwork_in.dequeue();
446 }
447
448 action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
449 peek(unblockNetwork_in, ResponseMsg) {
450 assert(in_msg.Dirty);
451 assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
452 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
453 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
454 in_msg.Address, in_msg.DataBlk);
455 }
456 }
457
458 action(p_writeFwdDataToMemory, "p", desc="Write Response data to memory") {
459 peek(unblockNetwork_in, ResponseMsg) {
460 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
461 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
462 in_msg.Address, in_msg.DataBlk);
463 }
464 }
465
466 action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
467 peek(unblockNetwork_in, ResponseMsg) {
468 assert(in_msg.Dirty == false);
469 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
470
471 // NOTE: The following check would not be valid in a real
472 // implementation. We include the data in the "dataless"
473 // message so we can assert the clean data matches the datablock
474 // in memory
475 DPRINTF(RubySlicc, "Address: %s, MsgDataBlock: %s MemoryDataBlock: %s\n",
476 in_msg.Address, in_msg.DataBlk,
477 getDirectoryEntry(in_msg.Address).DataBlk);
478 assert(getDirectoryEntry(in_msg.Address).DataBlk == in_msg.DataBlk);
479 }
480 }
481
482 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
483 peek(unblockNetwork_in, ResponseMsg) {
484 getDirectoryEntry(address).Sharers.add(in_msg.Sender);
485 }
486 }
487
488 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
489 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
490 }
491
492 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
493 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
494 assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
495 }
496
497 action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
498 memQueue_in.dequeue();
499 }
500
501 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
502 peek(requestQueue_in, RequestMsg) {
503 enqueue(memQueue_out, MemoryMsg, latency="1") {
504 out_msg.Address := address;
505 out_msg.Type := MemoryRequestType:MEMORY_READ;
506 out_msg.Sender := machineID;
507 out_msg.OriginalRequestorMachId := in_msg.Requestor;
508 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
509 out_msg.MessageSize := in_msg.MessageSize;
510 //out_msg.Prefetch := false;
511 // These are not used by memory but are passed back here with the read data:
512 out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && getDirectoryEntry(address).Sharers.count() == 0);
513 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
514 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
515 out_msg.Acks := out_msg.Acks - 1;
516 }
517 DPRINTF(RubySlicc, "%s\n", out_msg);
518 }
519 }
520 }
521
522 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
523 peek(unblockNetwork_in, ResponseMsg) {
524 enqueue(memQueue_out, MemoryMsg, latency="1") {
525 out_msg.Address := address;
526 out_msg.Type := MemoryRequestType:MEMORY_WB;
527 out_msg.Sender := machineID;
528 if (is_valid(tbe)) {
529 out_msg.OriginalRequestorMachId := tbe.Requestor;
530 }
531 out_msg.DataBlk := in_msg.DataBlk;
532 out_msg.MessageSize := in_msg.MessageSize;
533 //out_msg.Prefetch := false;
534 // Not used:
535 out_msg.ReadX := false;
536 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
537 DPRINTF(RubySlicc, "%s\n", out_msg);
538 }
539 }
540 }
541
542 action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
543 peek(requestQueue_in, RequestMsg) {
544 enqueue(memQueue_out, MemoryMsg, latency="1") {
545 out_msg.Address := address;
546 out_msg.Type := MemoryRequestType:MEMORY_WB;
547 out_msg.Sender := machineID;
548 out_msg.OriginalRequestorMachId := in_msg.Requestor;
549 out_msg.DataBlk := in_msg.DataBlk;
550 out_msg.MessageSize := in_msg.MessageSize;
551 //out_msg.Prefetch := false;
552 // Not used:
553 out_msg.ReadX := false;
554 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
555 DPRINTF(RubySlicc, "%s\n", out_msg);
556 }
557 }
558 }
559
560
561 // action(z_stall, "z", desc="Cannot be handled right now.") {
562 // Special name recognized as do nothing case
563 // }
564
565 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
566 requestQueue_in.recycle();
567 }
568
569 action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
570 peek(requestQueue_in, RequestMsg) {
571 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
572 out_msg.Address := address;
573 out_msg.Sender := machineID;
574 out_msg.SenderMachine := MachineType:Directory;
575 out_msg.Destination.add(in_msg.Requestor);
576 out_msg.DataBlk := in_msg.DataBlk;
577 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
578 out_msg.Type := CoherenceResponseType:DMA_ACK;
579 out_msg.MessageSize := MessageSizeType:Writeback_Control;
580 }
581 }
582 }
583
584 action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
585 peek(unblockNetwork_in, ResponseMsg) {
586 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
587 out_msg.Address := address;
588 out_msg.Sender := machineID;
589 out_msg.SenderMachine := MachineType:Directory;
590 if (is_valid(tbe)) {
591 out_msg.Destination.add(tbe.Requestor);
592 }
593 out_msg.DataBlk := in_msg.DataBlk;
594 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
595 out_msg.Type := CoherenceResponseType:DMA_ACK;
596 out_msg.MessageSize := MessageSizeType:Writeback_Control;
597 }
598 }
599 }
600
601 action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
602 peek(requestQueue_in, RequestMsg) {
603 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
604 }
605 }
606
607 action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
608 assert(is_valid(tbe));
609 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
610 addressOffset(tbe.PhysicalAddress), tbe.Len);
611 }
612
613 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
614 peek (requestQueue_in, RequestMsg) {
615 TBEs.allocate(address);
616 set_tbe(TBEs[address]);
617 tbe.PhysicalAddress := in_msg.Address;
618 tbe.Len := in_msg.Len;
619 tbe.DataBlk := in_msg.DataBlk;
620 tbe.Requestor := in_msg.Requestor;
621 }
622 }
623
624 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
625 TBEs.deallocate(address);
626 unset_tbe();
627 }
628
629
630
631 // TRANSITIONS
632
633 transition(I, GETX, MM) {
634 qf_queueMemoryFetchRequest;
635 i_popIncomingRequestQueue;
636 }
637
638 transition(I, DMA_READ, XI_M) {
639 qf_queueMemoryFetchRequest;
640 i_popIncomingRequestQueue;
641 }
642
643 transition(I, DMA_WRITE, XI_U) {
644 qw_queueMemoryWBRequest2;
645 a_sendDMAAck; // ack count may be zero
646 l_writeDMADataToMemory;
647 i_popIncomingRequestQueue;
648 }
649
650 transition(XI_M, Memory_Data, I) {
651 d_sendDataMsg; // ack count may be zero
652 q_popMemQueue;
653 }
654
655 transition(XI_U, Exclusive_Unblock, I) {
656 cc_clearSharers;
657 c_clearOwner;
658 j_popIncomingUnblockQueue;
659 }
660
661 transition(S, GETX, MM) {
662 qf_queueMemoryFetchRequest;
663 g_sendInvalidations;
664 i_popIncomingRequestQueue;
665 }
666
667 transition(S, DMA_READ) {
668 //qf_queueMemoryFetchRequest;
669 p_fwdDataToDMA;
670 //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
671 i_popIncomingRequestQueue;
672 }
673
674 transition(S, DMA_WRITE, XI_U) {
675 qw_queueMemoryWBRequest2;
676 a_sendDMAAck; // ack count may be zero
677 l_writeDMADataToMemory;
678 g_sendInvalidations; // the DMA will collect invalidations
679 i_popIncomingRequestQueue;
680 }
681
682 transition(I, GETS, IS) {
683 qf_queueMemoryFetchRequest;
684 i_popIncomingRequestQueue;
685 }
686
687 transition({S, SS}, GETS, SS) {
688 qf_queueMemoryFetchRequest;
689 n_incrementOutstanding;
690 i_popIncomingRequestQueue;
691 }
692
693 transition({I, S}, PUTO) {
694 b_sendWriteBackNack;
695 i_popIncomingRequestQueue;
696 }
697
698 transition({I, S, O}, PUTX) {
699 b_sendWriteBackNack;
700 i_popIncomingRequestQueue;
701 }
702
703 transition(O, GETX, MM) {
704 f_forwardRequest;
705 g_sendInvalidations;
706 i_popIncomingRequestQueue;
707 }
708
709 transition(O, DMA_READ, OD) {
710 f_forwardRequest; // this will cause the data to go to DMA directly
711 //g_sendInvalidations; // this will cause acks to be sent to the DMA
712 i_popIncomingRequestQueue;
713 }
714
715 transition(OD, DMA_ACK, O) {
716 j_popIncomingUnblockQueue;
717 }
718
719 transition({O,M}, DMA_WRITE, OI_D) {
720 f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
721 g_sendInvalidations; // these go to the DMA Controller
722 v_allocateTBE;
723 i_popIncomingRequestQueue;
724 }
725
726 transition(OI_D, Data, XI_U) {
727 qw_queueMemoryWBRequest;
728 a_sendDMAAck2; // ack count may be zero
729 p_writeFwdDataToMemory;
730 l_writeDMADataToMemoryFromTBE;
731 w_deallocateTBE;
732 j_popIncomingUnblockQueue;
733 }
734
735 transition({O, OO}, GETS, OO) {
736 f_forwardRequest;
737 n_incrementOutstanding;
738 i_popIncomingRequestQueue;
739 }
740
741 transition(M, GETX, MM) {
742 f_forwardRequest;
743 i_popIncomingRequestQueue;
744 }
745
746 // no exclusive unblock will show up to the directory
747 transition(M, DMA_READ, MD) {
748 f_forwardRequest; // this will cause the data to go to DMA directly
749 i_popIncomingRequestQueue;
750 }
751
752 transition(MD, DMA_ACK, M) {
753 j_popIncomingUnblockQueue;
754 }
755
756 transition(M, GETS, MO) {
757 f_forwardRequest;
758 i_popIncomingRequestQueue;
759 }
760
761 transition(M, PUTX, MI) {
762 a_sendWriteBackAck;
763 i_popIncomingRequestQueue;
764 }
765
766 // happens if M->O transition happens on-chip
767 transition(M, PUTO, MI) {
768 a_sendWriteBackAck;
769 i_popIncomingRequestQueue;
770 }
771
772 transition(M, PUTO_SHARERS, MIS) {
773 a_sendWriteBackAck;
774 i_popIncomingRequestQueue;
775 }
776
777 transition(O, PUTO, OS) {
778 a_sendWriteBackAck;
779 i_popIncomingRequestQueue;
780 }
781
782 transition(O, PUTO_SHARERS, OSS) {
783 a_sendWriteBackAck;
784 i_popIncomingRequestQueue;
785 }
786
787
788 transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
789 zz_recycleRequest;
790 }
791
792 transition({MM, MO}, Exclusive_Unblock, M) {
793 cc_clearSharers;
794 e_ownerIsUnblocker;
795 j_popIncomingUnblockQueue;
796 }
797
798 transition(MO, Unblock, O) {
799 m_addUnlockerToSharers;
800 j_popIncomingUnblockQueue;
801 }
802
803 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
804 zz_recycleRequest;
805 }
806
807 transition(IS, GETS) {
808 zz_recycleRequest;
809 }
810
811 transition(IS, Unblock, S) {
812 m_addUnlockerToSharers;
813 j_popIncomingUnblockQueue;
814 }
815
816 transition(IS, Exclusive_Unblock, M) {
817 cc_clearSharers;
818 e_ownerIsUnblocker;
819 j_popIncomingUnblockQueue;
820 }
821
822 transition(SS, Unblock) {
823 m_addUnlockerToSharers;
824 o_decrementOutstanding;
825 j_popIncomingUnblockQueue;
826 }
827
828 transition(SS, Last_Unblock, S) {
829 m_addUnlockerToSharers;
830 o_decrementOutstanding;
831 j_popIncomingUnblockQueue;
832 }
833
834 transition(OO, Unblock) {
835 m_addUnlockerToSharers;
836 o_decrementOutstanding;
837 j_popIncomingUnblockQueue;
838 }
839
840 transition(OO, Last_Unblock, O) {
841 m_addUnlockerToSharers;
842 o_decrementOutstanding;
843 j_popIncomingUnblockQueue;
844 }
845
846 transition(MI, Dirty_Writeback, I) {
847 c_clearOwner;
848 cc_clearSharers;
849 l_writeDataToMemory;
850 qw_queueMemoryWBRequest;
851 j_popIncomingUnblockQueue;
852 }
853
854 transition(MIS, Dirty_Writeback, S) {
855 c_moveOwnerToSharer;
856 l_writeDataToMemory;
857 qw_queueMemoryWBRequest;
858 j_popIncomingUnblockQueue;
859 }
860
861 transition(MIS, Clean_Writeback, S) {
862 c_moveOwnerToSharer;
863 j_popIncomingUnblockQueue;
864 }
865
866 transition(OS, Dirty_Writeback, S) {
867 c_clearOwner;
868 l_writeDataToMemory;
869 qw_queueMemoryWBRequest;
870 j_popIncomingUnblockQueue;
871 }
872
873 transition(OSS, Dirty_Writeback, S) {
874 c_moveOwnerToSharer;
875 l_writeDataToMemory;
876 qw_queueMemoryWBRequest;
877 j_popIncomingUnblockQueue;
878 }
879
880 transition(OSS, Clean_Writeback, S) {
881 c_moveOwnerToSharer;
882 j_popIncomingUnblockQueue;
883 }
884
885 transition(MI, Clean_Writeback, I) {
886 c_clearOwner;
887 cc_clearSharers;
888 ll_checkDataInMemory;
889 j_popIncomingUnblockQueue;
890 }
891
892 transition(OS, Clean_Writeback, S) {
893 c_clearOwner;
894 ll_checkDataInMemory;
895 j_popIncomingUnblockQueue;
896 }
897
898 transition({MI, MIS}, Unblock, M) {
899 j_popIncomingUnblockQueue;
900 }
901
902 transition({OS, OSS}, Unblock, O) {
903 j_popIncomingUnblockQueue;
904 }
905
906 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
907 d_sendDataMsg;
908 q_popMemQueue;
909 }
910
911 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
912 //a_sendAck;
913 q_popMemQueue;
914 }
915
916 }