mem-cache: Add match functions to QueueEntry
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-dir.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:Directory, "Directory protocol")
30 : DirectoryMemory * directory;
31 Cycles directory_latency := 6;
32 Cycles to_memory_controller_latency := 1;
33
34 // Message Queues
35 MessageBuffer * requestToDir, network="From", virtual_network="1",
36 vnet_type="request"; // a mod-L2 bank -> this Dir
37 MessageBuffer * responseToDir, network="From", virtual_network="2",
38 vnet_type="response"; // a mod-L2 bank -> this Dir
39
40 MessageBuffer * forwardFromDir, network="To", virtual_network="1",
41 vnet_type="forward";
42 MessageBuffer * responseFromDir, network="To", virtual_network="2",
43 vnet_type="response"; // Dir -> mod-L2 bank
44
45 MessageBuffer * responseFromMemory;
46 {
47 // STATES
48 state_declaration(State, desc="Directory states", default="Directory_State_I") {
49 // Base states
50 I, AccessPermission:Read_Write, desc="Invalid";
51 S, AccessPermission:Read_Only, desc="Shared";
52 O, AccessPermission:Maybe_Stale, desc="Owner";
53 M, AccessPermission:Maybe_Stale, desc="Modified";
54
55 IS, AccessPermission:Busy, desc="Blocked, was in idle";
56 SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
57 OO, AccessPermission:Busy, desc="Blocked, was in owned";
58 MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
59 MM, AccessPermission:Busy, desc="Blocked, going to modified";
60 MM_DMA, AccessPermission:Busy, desc="Blocked, going to I";
61
62 MI, AccessPermission:Busy, desc="Blocked on a writeback";
63 MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
64 OS, AccessPermission:Busy, desc="Blocked on a writeback";
65 OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
66
67 XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
68 XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
69 OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
70
71 OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
72 MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
73 }
74
75 // Events
76 enumeration(Event, desc="Directory events") {
77 GETX, desc="A GETX arrives";
78 GETS, desc="A GETS arrives";
79 PUTX, desc="A PUTX arrives";
80 PUTO, desc="A PUTO arrives";
81 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
82 Unblock, desc="An unblock message arrives";
83 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
84 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
85 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
86 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
87 Memory_Data, desc="Fetched data from memory arrives";
88 Memory_Ack, desc="Writeback Ack from memory arrives";
89 DMA_READ, desc="DMA Read";
90 DMA_WRITE, desc="DMA Write";
91 DMA_ACK, desc="DMA Ack";
92 Data, desc="Data to directory";
93 }
94
95 // TYPES
96
97 // DirectoryEntry
98 structure(Entry, desc="...", interface='AbstractEntry') {
99 State DirectoryState, desc="Directory state";
100 NetDest Sharers, desc="Sharers for this block";
101 NetDest Owner, desc="Owner of this block";
102 int WaitingUnblocks, desc="Number of acks we're waiting for";
103 }
104
105 structure(TBE, desc="...") {
106 Addr PhysicalAddress, desc="Physical address for this entry";
107 int Len, desc="Length of request";
108 DataBlock DataBlk, desc="DataBlk";
109 MachineID Requestor, desc="original requestor";
110 }
111
112 structure(TBETable, external = "yes") {
113 TBE lookup(Addr);
114 void allocate(Addr);
115 void deallocate(Addr);
116 bool isPresent(Addr);
117 }
118
119 // ** OBJECTS **
120 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
121
122 Tick clockEdge();
123 Tick cyclesToTicks(Cycles c);
124 void set_tbe(TBE b);
125 void unset_tbe();
126
127 Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
128 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
129
130 if (is_valid(dir_entry)) {
131 return dir_entry;
132 }
133
134 dir_entry := static_cast(Entry, "pointer",
135 directory.allocate(addr, new Entry));
136 return dir_entry;
137 }
138
139 State getState(TBE tbe, Addr addr) {
140 return getDirectoryEntry(addr).DirectoryState;
141 }
142
143 void setState(TBE tbe, Addr addr, State state) {
144 if (directory.isPresent(addr)) {
145
146 if (state == State:I) {
147 assert(getDirectoryEntry(addr).Owner.count() == 0);
148 assert(getDirectoryEntry(addr).Sharers.count() == 0);
149 }
150
151 if (state == State:S) {
152 assert(getDirectoryEntry(addr).Owner.count() == 0);
153 }
154
155 if (state == State:O) {
156 assert(getDirectoryEntry(addr).Owner.count() == 1);
157 assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
158 }
159
160 if (state == State:M) {
161 assert(getDirectoryEntry(addr).Owner.count() == 1);
162 assert(getDirectoryEntry(addr).Sharers.count() == 0);
163 }
164
165 if ((state != State:SS) && (state != State:OO)) {
166 assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
167 }
168
169 if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
170 getDirectoryEntry(addr).DirectoryState := state;
171 // disable coherence checker
172 // sequencer.checkCoherence(addr);
173 }
174 else {
175 getDirectoryEntry(addr).DirectoryState := state;
176 }
177 }
178 }
179
180 AccessPermission getAccessPermission(Addr addr) {
181 if (directory.isPresent(addr)) {
182 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
183 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
184 }
185
186 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
187 return AccessPermission:NotPresent;
188 }
189
190 void setAccessPermission(Addr addr, State state) {
191 if (directory.isPresent(addr)) {
192 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
193 }
194 }
195
196 void functionalRead(Addr addr, Packet *pkt) {
197 functionalMemoryRead(pkt);
198 }
199
200 int functionalWrite(Addr addr, Packet *pkt) {
201 int num_functional_writes := 0;
202 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
203 return num_functional_writes;
204 }
205
206 // if no sharers, then directory can be considered
207 // both a sharer and exclusive w.r.t. coherence checking
208 bool isBlockShared(Addr addr) {
209 if (directory.isPresent(addr)) {
210 if (getDirectoryEntry(addr).DirectoryState == State:I) {
211 return true;
212 }
213 }
214 return false;
215 }
216
217 bool isBlockExclusive(Addr addr) {
218 if (directory.isPresent(addr)) {
219 if (getDirectoryEntry(addr).DirectoryState == State:I) {
220 return true;
221 }
222 }
223 return false;
224 }
225
226 // ** OUT_PORTS **
227 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
228 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
229
230 // ** IN_PORTS **
231
232 in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
233 if (unblockNetwork_in.isReady(clockEdge())) {
234 peek(unblockNetwork_in, ResponseMsg) {
235 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
236 if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
237 trigger(Event:Last_Unblock, in_msg.addr,
238 TBEs[in_msg.addr]);
239 } else {
240 trigger(Event:Unblock, in_msg.addr,
241 TBEs[in_msg.addr]);
242 }
243 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
244 trigger(Event:Exclusive_Unblock, in_msg.addr,
245 TBEs[in_msg.addr]);
246 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
247 trigger(Event:Dirty_Writeback, in_msg.addr,
248 TBEs[in_msg.addr]);
249 } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
250 trigger(Event:Clean_Writeback, in_msg.addr,
251 TBEs[in_msg.addr]);
252 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
253 trigger(Event:Data, in_msg.addr,
254 TBEs[in_msg.addr]);
255 } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
256 trigger(Event:DMA_ACK, in_msg.addr,
257 TBEs[in_msg.addr]);
258 } else {
259 error("Invalid message");
260 }
261 }
262 }
263 }
264
265 in_port(requestQueue_in, RequestMsg, requestToDir) {
266 if (requestQueue_in.isReady(clockEdge())) {
267 peek(requestQueue_in, RequestMsg) {
268 if (in_msg.Type == CoherenceRequestType:GETS) {
269 trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
270 } else if (in_msg.Type == CoherenceRequestType:GETX) {
271 trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
272 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
273 trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
274 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
275 trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
276 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
277 trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
278 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
279 trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
280 TBEs[makeLineAddress(in_msg.addr)]);
281 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
282 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
283 TBEs[makeLineAddress(in_msg.addr)]);
284 } else {
285 error("Invalid message");
286 }
287 }
288 }
289 }
290
291 // off-chip memory request/response is done
292 in_port(memQueue_in, MemoryMsg, responseFromMemory) {
293 if (memQueue_in.isReady(clockEdge())) {
294 peek(memQueue_in, MemoryMsg) {
295 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
296 trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
297 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
298 trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
299 } else {
300 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
301 error("Invalid message");
302 }
303 }
304 }
305 }
306
307 // Actions
308
309 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
310 peek(requestQueue_in, RequestMsg) {
311 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
312 out_msg.addr := address;
313 out_msg.Type := CoherenceRequestType:WB_ACK;
314 out_msg.Requestor := in_msg.Requestor;
315 out_msg.RequestorMachine := MachineType:Directory;
316 out_msg.Destination.add(in_msg.Requestor);
317 out_msg.MessageSize := MessageSizeType:Writeback_Control;
318 }
319 }
320 }
321
322 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
323 peek(requestQueue_in, RequestMsg) {
324 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
325 out_msg.addr := address;
326 out_msg.Type := CoherenceRequestType:WB_NACK;
327 out_msg.Requestor := in_msg.Requestor;
328 out_msg.RequestorMachine := MachineType:Directory;
329 out_msg.Destination.add(in_msg.Requestor);
330 out_msg.MessageSize := MessageSizeType:Writeback_Control;
331 }
332 }
333 }
334
335 action(c_clearOwner, "c", desc="Clear the owner field") {
336 getDirectoryEntry(address).Owner.clear();
337 }
338
339 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
340 getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
341 getDirectoryEntry(address).Owner.clear();
342 }
343
344 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
345 getDirectoryEntry(address).Sharers.clear();
346 }
347
348 action(d_sendDataMsg, "d", desc="Send data to requestor") {
349 peek(memQueue_in, MemoryMsg) {
350 enqueue(responseNetwork_out, ResponseMsg, 1) {
351 out_msg.addr := address;
352 out_msg.Sender := machineID;
353 out_msg.SenderMachine := MachineType:Directory;
354 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
355 out_msg.DataBlk := in_msg.DataBlk;
356 out_msg.Dirty := false; // By definition, the block is now clean
357 out_msg.Acks := in_msg.Acks;
358 if (in_msg.ReadX) {
359 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
360 } else {
361 out_msg.Type := CoherenceResponseType:DATA;
362 }
363 out_msg.MessageSize := MessageSizeType:Response_Data;
364 }
365 }
366 }
367
368 action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
369 peek(requestQueue_in, RequestMsg) {
370 enqueue(responseNetwork_out, ResponseMsg, 1) {
371 out_msg.addr := address;
372 out_msg.Sender := machineID;
373 out_msg.SenderMachine := MachineType:Directory;
374 out_msg.Destination.add(in_msg.Requestor);
375 out_msg.Dirty := false; // By definition, the block is now clean
376 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
377 out_msg.MessageSize := MessageSizeType:Response_Data;
378 }
379 }
380 }
381
382 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
383 peek(unblockNetwork_in, ResponseMsg) {
384 getDirectoryEntry(address).Owner.clear();
385 getDirectoryEntry(address).Owner.add(in_msg.Sender);
386 }
387 }
388
389 action(f_forwardRequest, "f", desc="Forward request to owner") {
390 peek(requestQueue_in, RequestMsg) {
391 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
392 out_msg.addr := address;
393 out_msg.Type := in_msg.Type;
394 out_msg.Requestor := in_msg.Requestor;
395 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
396 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
397 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
398 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
399 out_msg.Acks := out_msg.Acks - 1;
400 }
401 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
402 }
403 }
404 }
405
406 action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
407 peek(requestQueue_in, RequestMsg) {
408 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
409 out_msg.addr := address;
410 out_msg.Type := in_msg.Type;
411 out_msg.Requestor := machineID;
412 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
413 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
414 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
415 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
416 out_msg.Acks := out_msg.Acks - 1;
417 }
418 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
419 }
420 }
421 }
422
423 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
424 peek(requestQueue_in, RequestMsg) {
425 if ((getDirectoryEntry(in_msg.addr).Sharers.count() > 1) ||
426 ((getDirectoryEntry(in_msg.addr).Sharers.count() > 0) &&
427 (getDirectoryEntry(in_msg.addr).Sharers.isElement(in_msg.Requestor) == false))) {
428 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
429 out_msg.addr := address;
430 out_msg.Type := CoherenceRequestType:INV;
431 out_msg.Requestor := in_msg.Requestor;
432 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
433 // out_msg.Destination := getDirectoryEntry(in_msg.addr).Sharers;
434 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Sharers);
435 out_msg.Destination.remove(in_msg.Requestor);
436 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
437 }
438 }
439 }
440 }
441
442 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
443 requestQueue_in.dequeue(clockEdge());
444 }
445
446 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
447 unblockNetwork_in.dequeue(clockEdge());
448 }
449
450 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
451 peek(unblockNetwork_in, ResponseMsg) {
452 getDirectoryEntry(address).Sharers.add(in_msg.Sender);
453 }
454 }
455
456 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
457 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
458 }
459
460 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
461 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
462 assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
463 }
464
465 action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
466 memQueue_in.dequeue(clockEdge());
467 }
468
469 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
470 peek(requestQueue_in, RequestMsg) {
471 queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
472 }
473 }
474
475 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
476 peek(unblockNetwork_in, ResponseMsg) {
477 if (is_valid(tbe)) {
478 queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
479 in_msg.DataBlk);
480 } else {
481 queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
482 in_msg.DataBlk);
483 }
484 }
485 }
486
487 action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt",
488 desc="Queue off-chip writeback request") {
489 peek(unblockNetwork_in, ResponseMsg) {
490 DataBlock DataBlk := in_msg.DataBlk;
491 DataBlk.copyPartial(tbe.DataBlk, getOffset(tbe.PhysicalAddress),
492 tbe.Len);
493 queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
494 DataBlk);
495 }
496 }
497
498 action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
499 peek(requestQueue_in, RequestMsg) {
500 queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
501 in_msg.DataBlk);
502 }
503 }
504
505 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
506 requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
507 }
508
509 action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
510 peek(requestQueue_in, RequestMsg) {
511 enqueue(responseNetwork_out, ResponseMsg, 1) {
512 out_msg.addr := address;
513 out_msg.Sender := machineID;
514 out_msg.SenderMachine := MachineType:Directory;
515 out_msg.Destination.add(in_msg.Requestor);
516 out_msg.DataBlk := in_msg.DataBlk;
517 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
518 out_msg.Type := CoherenceResponseType:DMA_ACK;
519 out_msg.MessageSize := MessageSizeType:Writeback_Control;
520 }
521 }
522 }
523
524 action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
525 peek(unblockNetwork_in, ResponseMsg) {
526 enqueue(responseNetwork_out, ResponseMsg, 1) {
527 out_msg.addr := address;
528 out_msg.Sender := machineID;
529 out_msg.SenderMachine := MachineType:Directory;
530 if (is_valid(tbe)) {
531 out_msg.Destination.add(tbe.Requestor);
532 }
533 out_msg.DataBlk := in_msg.DataBlk;
534 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
535 out_msg.Type := CoherenceResponseType:DMA_ACK;
536 out_msg.MessageSize := MessageSizeType:Writeback_Control;
537 }
538 }
539 }
540
541 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
542 peek (requestQueue_in, RequestMsg) {
543 TBEs.allocate(address);
544 set_tbe(TBEs[address]);
545 tbe.PhysicalAddress := in_msg.addr;
546 tbe.Len := in_msg.Len;
547 tbe.DataBlk := in_msg.DataBlk;
548 tbe.Requestor := in_msg.Requestor;
549 }
550 }
551
552 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
553 TBEs.deallocate(address);
554 unset_tbe();
555 }
556
557
558 // TRANSITIONS
559 transition(I, GETX, MM) {
560 qf_queueMemoryFetchRequest;
561 i_popIncomingRequestQueue;
562 }
563
564 transition(I, DMA_READ, XI_M) {
565 qf_queueMemoryFetchRequest;
566 i_popIncomingRequestQueue;
567 }
568
569 transition(I, DMA_WRITE, XI_U) {
570 qw_queueMemoryWBRequest2;
571 a_sendDMAAck; // ack count may be zero
572 i_popIncomingRequestQueue;
573 }
574
575 transition(XI_M, Memory_Data, I) {
576 d_sendDataMsg; // ack count may be zero
577 q_popMemQueue;
578 }
579
580 transition(XI_U, Exclusive_Unblock, I) {
581 cc_clearSharers;
582 c_clearOwner;
583 j_popIncomingUnblockQueue;
584 }
585
586 transition(S, GETX, MM) {
587 qf_queueMemoryFetchRequest;
588 g_sendInvalidations;
589 i_popIncomingRequestQueue;
590 }
591
592 transition(S, DMA_READ) {
593 //qf_queueMemoryFetchRequest;
594 p_fwdDataToDMA;
595 //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
596 i_popIncomingRequestQueue;
597 }
598
599 transition(S, DMA_WRITE, XI_U) {
600 qw_queueMemoryWBRequest2;
601 a_sendDMAAck; // ack count may be zero
602 g_sendInvalidations; // the DMA will collect invalidations
603 i_popIncomingRequestQueue;
604 }
605
606 transition(I, GETS, IS) {
607 qf_queueMemoryFetchRequest;
608 i_popIncomingRequestQueue;
609 }
610
611 transition({S, SS}, GETS, SS) {
612 qf_queueMemoryFetchRequest;
613 n_incrementOutstanding;
614 i_popIncomingRequestQueue;
615 }
616
617 transition({I, S}, PUTO) {
618 b_sendWriteBackNack;
619 i_popIncomingRequestQueue;
620 }
621
622 transition({I, S, O}, PUTX) {
623 b_sendWriteBackNack;
624 i_popIncomingRequestQueue;
625 }
626
627 transition(O, GETX, MM) {
628 f_forwardRequest;
629 g_sendInvalidations;
630 i_popIncomingRequestQueue;
631 }
632
633 transition(O, DMA_READ, OD) {
634 f_forwardRequest; // this will cause the data to go to DMA directly
635 //g_sendInvalidations; // this will cause acks to be sent to the DMA
636 i_popIncomingRequestQueue;
637 }
638
639 transition(OD, DMA_ACK, O) {
640 j_popIncomingUnblockQueue;
641 }
642
643 transition({O,M}, DMA_WRITE, OI_D) {
644 f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
645 g_sendInvalidations; // these go to the DMA Controller
646 v_allocateTBE;
647 i_popIncomingRequestQueue;
648 }
649
650 transition(OI_D, Data, XI_U) {
651 qw_queueMemoryWBRequestFromMessageAndTBE;
652 a_sendDMAAck2; // ack count may be zero
653 w_deallocateTBE;
654 j_popIncomingUnblockQueue;
655 }
656
657 transition({O, OO}, GETS, OO) {
658 f_forwardRequest;
659 n_incrementOutstanding;
660 i_popIncomingRequestQueue;
661 }
662
663 transition(M, GETX, MM) {
664 f_forwardRequest;
665 i_popIncomingRequestQueue;
666 }
667
668 // no exclusive unblock will show up to the directory
669 transition(M, DMA_READ, MD) {
670 f_forwardRequest; // this will cause the data to go to DMA directly
671 i_popIncomingRequestQueue;
672 }
673
674 transition(MD, DMA_ACK, M) {
675 j_popIncomingUnblockQueue;
676 }
677
678 transition(M, GETS, MO) {
679 f_forwardRequest;
680 i_popIncomingRequestQueue;
681 }
682
683 transition(M, PUTX, MI) {
684 a_sendWriteBackAck;
685 i_popIncomingRequestQueue;
686 }
687
688 // happens if M->O transition happens on-chip
689 transition(M, PUTO, MI) {
690 a_sendWriteBackAck;
691 i_popIncomingRequestQueue;
692 }
693
694 transition(M, PUTO_SHARERS, MIS) {
695 a_sendWriteBackAck;
696 i_popIncomingRequestQueue;
697 }
698
699 transition(O, PUTO, OS) {
700 a_sendWriteBackAck;
701 i_popIncomingRequestQueue;
702 }
703
704 transition(O, PUTO_SHARERS, OSS) {
705 a_sendWriteBackAck;
706 i_popIncomingRequestQueue;
707 }
708
709
710 transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
711 zz_recycleRequest;
712 }
713
714 transition({MM, MO}, Exclusive_Unblock, M) {
715 cc_clearSharers;
716 e_ownerIsUnblocker;
717 j_popIncomingUnblockQueue;
718 }
719
720 transition(MO, Unblock, O) {
721 m_addUnlockerToSharers;
722 j_popIncomingUnblockQueue;
723 }
724
725 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
726 zz_recycleRequest;
727 }
728
729 transition(IS, GETS) {
730 zz_recycleRequest;
731 }
732
733 transition(IS, Unblock, S) {
734 m_addUnlockerToSharers;
735 j_popIncomingUnblockQueue;
736 }
737
738 transition(IS, Exclusive_Unblock, M) {
739 cc_clearSharers;
740 e_ownerIsUnblocker;
741 j_popIncomingUnblockQueue;
742 }
743
744 transition(SS, Unblock) {
745 m_addUnlockerToSharers;
746 o_decrementOutstanding;
747 j_popIncomingUnblockQueue;
748 }
749
750 transition(SS, Last_Unblock, S) {
751 m_addUnlockerToSharers;
752 o_decrementOutstanding;
753 j_popIncomingUnblockQueue;
754 }
755
756 transition(OO, Unblock) {
757 m_addUnlockerToSharers;
758 o_decrementOutstanding;
759 j_popIncomingUnblockQueue;
760 }
761
762 transition(OO, Last_Unblock, O) {
763 m_addUnlockerToSharers;
764 o_decrementOutstanding;
765 j_popIncomingUnblockQueue;
766 }
767
768 transition(MI, Dirty_Writeback, I) {
769 c_clearOwner;
770 cc_clearSharers;
771 qw_queueMemoryWBRequest;
772 j_popIncomingUnblockQueue;
773 }
774
775 transition(MIS, Dirty_Writeback, S) {
776 c_moveOwnerToSharer;
777 qw_queueMemoryWBRequest;
778 j_popIncomingUnblockQueue;
779 }
780
781 transition(MIS, Clean_Writeback, S) {
782 c_moveOwnerToSharer;
783 j_popIncomingUnblockQueue;
784 }
785
786 transition(OS, Dirty_Writeback, S) {
787 c_clearOwner;
788 qw_queueMemoryWBRequest;
789 j_popIncomingUnblockQueue;
790 }
791
792 transition(OSS, Dirty_Writeback, S) {
793 c_moveOwnerToSharer;
794 qw_queueMemoryWBRequest;
795 j_popIncomingUnblockQueue;
796 }
797
798 transition(OSS, Clean_Writeback, S) {
799 c_moveOwnerToSharer;
800 j_popIncomingUnblockQueue;
801 }
802
803 transition(MI, Clean_Writeback, I) {
804 c_clearOwner;
805 cc_clearSharers;
806 j_popIncomingUnblockQueue;
807 }
808
809 transition(OS, Clean_Writeback, S) {
810 c_clearOwner;
811 j_popIncomingUnblockQueue;
812 }
813
814 transition({MI, MIS}, Unblock, M) {
815 j_popIncomingUnblockQueue;
816 }
817
818 transition({OS, OSS}, Unblock, O) {
819 j_popIncomingUnblockQueue;
820 }
821
822 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
823 d_sendDataMsg;
824 q_popMemQueue;
825 }
826
827 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
828 //a_sendAck;
829 q_popMemQueue;
830 }
831
832 }