mem-ruby: Replace SLICC queueMemory calls with enqueue
[gem5.git] / src / mem / ruby / protocol / MOESI_CMP_directory-dir.sm
1 /*
2 * Copyright (c) 2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 machine(MachineType:Directory, "Directory protocol")
42 : DirectoryMemory * directory;
43 Cycles directory_latency := 6;
44 Cycles to_memory_controller_latency := 1;
45
46 // Message Queues
47 MessageBuffer * requestToDir, network="From", virtual_network="1",
48 vnet_type="request"; // a mod-L2 bank -> this Dir
49 MessageBuffer * responseToDir, network="From", virtual_network="2",
50 vnet_type="response"; // a mod-L2 bank -> this Dir
51
52 MessageBuffer * forwardFromDir, network="To", virtual_network="1",
53 vnet_type="forward";
54 MessageBuffer * responseFromDir, network="To", virtual_network="2",
55 vnet_type="response"; // Dir -> mod-L2 bank
56
57 MessageBuffer * requestToMemory;
58 MessageBuffer * responseFromMemory;
59 {
60 // STATES
61 state_declaration(State, desc="Directory states", default="Directory_State_I") {
62 // Base states
63 I, AccessPermission:Read_Write, desc="Invalid";
64 S, AccessPermission:Read_Only, desc="Shared";
65 O, AccessPermission:Maybe_Stale, desc="Owner";
66 M, AccessPermission:Maybe_Stale, desc="Modified";
67
68 IS, AccessPermission:Busy, desc="Blocked, was in idle";
69 SS, AccessPermission:Read_Only, desc="Blocked, was in shared";
70 OO, AccessPermission:Busy, desc="Blocked, was in owned";
71 MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified";
72 MM, AccessPermission:Busy, desc="Blocked, going to modified";
73
74 MI, AccessPermission:Busy, desc="Blocked on a writeback";
75 MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
76 OS, AccessPermission:Busy, desc="Blocked on a writeback";
77 OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received";
78
79 XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller";
80 XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock";
81 OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data";
82
83 OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2";
84 MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2";
85 }
86
87 // Events
88 enumeration(Event, desc="Directory events") {
89 GETX, desc="A GETX arrives";
90 GETS, desc="A GETS arrives";
91 PUTX, desc="A PUTX arrives";
92 PUTO, desc="A PUTO arrives";
93 PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
94 Unblock, desc="An unblock message arrives";
95 Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
96 Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
97 Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
98 Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
99 Memory_Data, desc="Fetched data from memory arrives";
100 Memory_Ack, desc="Writeback Ack from memory arrives";
101 DMA_READ, desc="DMA Read";
102 DMA_WRITE, desc="DMA Write";
103 DMA_ACK, desc="DMA Ack";
104 Data, desc="Data to directory";
105 }
106
107 // TYPES
108
109 // DirectoryEntry
110 structure(Entry, desc="...", interface='AbstractCacheEntry', main="false") {
111 State DirectoryState, desc="Directory state";
112 NetDest Sharers, desc="Sharers for this block";
113 NetDest Owner, desc="Owner of this block";
114 int WaitingUnblocks, desc="Number of acks we're waiting for";
115 }
116
117 structure(TBE, desc="...") {
118 Addr PhysicalAddress, desc="Physical address for this entry";
119 int Len, desc="Length of request";
120 DataBlock DataBlk, desc="DataBlk";
121 MachineID Requestor, desc="original requestor";
122 }
123
124 structure(TBETable, external = "yes") {
125 TBE lookup(Addr);
126 void allocate(Addr);
127 void deallocate(Addr);
128 bool isPresent(Addr);
129 }
130
131 // ** OBJECTS **
132 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
133
134 Tick clockEdge();
135 Tick cyclesToTicks(Cycles c);
136 void set_tbe(TBE b);
137 void unset_tbe();
138
139 Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
140 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
141
142 if (is_valid(dir_entry)) {
143 return dir_entry;
144 }
145
146 dir_entry := static_cast(Entry, "pointer",
147 directory.allocate(addr, new Entry));
148 return dir_entry;
149 }
150
151 State getState(TBE tbe, Addr addr) {
152 return getDirectoryEntry(addr).DirectoryState;
153 }
154
155 void setState(TBE tbe, Addr addr, State state) {
156 if (directory.isPresent(addr)) {
157
158 if (state == State:I) {
159 assert(getDirectoryEntry(addr).Owner.count() == 0);
160 assert(getDirectoryEntry(addr).Sharers.count() == 0);
161 }
162
163 if (state == State:S) {
164 assert(getDirectoryEntry(addr).Owner.count() == 0);
165 }
166
167 if (state == State:O) {
168 assert(getDirectoryEntry(addr).Owner.count() == 1);
169 assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false);
170 }
171
172 if (state == State:M) {
173 assert(getDirectoryEntry(addr).Owner.count() == 1);
174 assert(getDirectoryEntry(addr).Sharers.count() == 0);
175 }
176
177 if ((state != State:SS) && (state != State:OO)) {
178 assert(getDirectoryEntry(addr).WaitingUnblocks == 0);
179 }
180
181 if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) {
182 getDirectoryEntry(addr).DirectoryState := state;
183 // disable coherence checker
184 // sequencer.checkCoherence(addr);
185 }
186 else {
187 getDirectoryEntry(addr).DirectoryState := state;
188 }
189 }
190 }
191
192 AccessPermission getAccessPermission(Addr addr) {
193 if (directory.isPresent(addr)) {
194 DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
195 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
196 }
197
198 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
199 return AccessPermission:NotPresent;
200 }
201
202 void setAccessPermission(Addr addr, State state) {
203 if (directory.isPresent(addr)) {
204 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
205 }
206 }
207
208 void functionalRead(Addr addr, Packet *pkt) {
209 functionalMemoryRead(pkt);
210 }
211
212 int functionalWrite(Addr addr, Packet *pkt) {
213 int num_functional_writes := 0;
214 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
215 return num_functional_writes;
216 }
217
218 // if no sharers, then directory can be considered
219 // both a sharer and exclusive w.r.t. coherence checking
220 bool isBlockShared(Addr addr) {
221 if (directory.isPresent(addr)) {
222 if (getDirectoryEntry(addr).DirectoryState == State:I) {
223 return true;
224 }
225 }
226 return false;
227 }
228
229 bool isBlockExclusive(Addr addr) {
230 if (directory.isPresent(addr)) {
231 if (getDirectoryEntry(addr).DirectoryState == State:I) {
232 return true;
233 }
234 }
235 return false;
236 }
237
238 // ** OUT_PORTS **
239 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
240 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
241 out_port(memQueue_out, MemoryMsg, requestToMemory);
242
243 // ** IN_PORTS **
244
245 in_port(unblockNetwork_in, ResponseMsg, responseToDir, rank=2) {
246 if (unblockNetwork_in.isReady(clockEdge())) {
247 peek(unblockNetwork_in, ResponseMsg) {
248 if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
249 if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
250 trigger(Event:Last_Unblock, in_msg.addr,
251 TBEs[in_msg.addr]);
252 } else {
253 trigger(Event:Unblock, in_msg.addr,
254 TBEs[in_msg.addr]);
255 }
256 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
257 trigger(Event:Exclusive_Unblock, in_msg.addr,
258 TBEs[in_msg.addr]);
259 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
260 trigger(Event:Data, in_msg.addr,
261 TBEs[in_msg.addr]);
262 } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
263 trigger(Event:DMA_ACK, in_msg.addr,
264 TBEs[in_msg.addr]);
265 } else {
266 error("Invalid message");
267 }
268 }
269 }
270 }
271
272 in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
273 if (requestQueue_in.isReady(clockEdge())) {
274 peek(requestQueue_in, RequestMsg) {
275 if (in_msg.Type == CoherenceRequestType:GETS) {
276 trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
277 } else if (in_msg.Type == CoherenceRequestType:GETX) {
278 trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
279 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
280 trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
281 } else if (in_msg.Type == CoherenceRequestType:PUTO) {
282 trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
283 } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
284 trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
285 } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_DIRTY_DATA) {
286 trigger(Event:Dirty_Writeback, in_msg.addr,
287 TBEs[in_msg.addr]);
288 } else if (in_msg.Type == CoherenceRequestType:WRITEBACK_CLEAN_ACK) {
289 trigger(Event:Clean_Writeback, in_msg.addr,
290 TBEs[in_msg.addr]);
291 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
292 trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
293 TBEs[makeLineAddress(in_msg.addr)]);
294 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
295 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
296 TBEs[makeLineAddress(in_msg.addr)]);
297 } else {
298 error("Invalid message");
299 }
300 }
301 }
302 }
303
304 // off-chip memory request/response is done
305 in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=0) {
306 if (memQueue_in.isReady(clockEdge())) {
307 peek(memQueue_in, MemoryMsg) {
308 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
309 trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
310 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
311 trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
312 } else {
313 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
314 error("Invalid message");
315 }
316 }
317 }
318 }
319
320 // Actions
321
322 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
323 peek(requestQueue_in, RequestMsg) {
324 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
325 out_msg.addr := address;
326 out_msg.Type := CoherenceResponseType:WB_ACK;
327 out_msg.Sender := in_msg.Requestor;
328 out_msg.SenderMachine := MachineType:Directory;
329 out_msg.Destination.add(in_msg.Requestor);
330 out_msg.MessageSize := MessageSizeType:Writeback_Control;
331 }
332 }
333 }
334
335 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
336 peek(requestQueue_in, RequestMsg) {
337 enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
338 out_msg.addr := address;
339 out_msg.Type := CoherenceResponseType:WB_NACK;
340 out_msg.Sender := in_msg.Requestor;
341 out_msg.SenderMachine := MachineType:Directory;
342 out_msg.Destination.add(in_msg.Requestor);
343 out_msg.MessageSize := MessageSizeType:Writeback_Control;
344 }
345 }
346 }
347
348 action(c_clearOwner, "c", desc="Clear the owner field") {
349 getDirectoryEntry(address).Owner.clear();
350 }
351
352 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
353 getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner);
354 getDirectoryEntry(address).Owner.clear();
355 }
356
357 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
358 getDirectoryEntry(address).Sharers.clear();
359 }
360
361 action(d_sendDataMsg, "d", desc="Send data to requestor") {
362 peek(memQueue_in, MemoryMsg) {
363 enqueue(responseNetwork_out, ResponseMsg, 1) {
364 out_msg.addr := address;
365 out_msg.Sender := machineID;
366 out_msg.SenderMachine := MachineType:Directory;
367 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
368 out_msg.DataBlk := in_msg.DataBlk;
369 out_msg.Dirty := false; // By definition, the block is now clean
370 out_msg.Acks := in_msg.Acks;
371 if (in_msg.ReadX) {
372 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
373 } else {
374 out_msg.Type := CoherenceResponseType:DATA;
375 }
376 out_msg.MessageSize := MessageSizeType:Response_Data;
377 }
378 }
379 }
380
381 action(p_fwdDataToDMA, "\d", desc="Send data to requestor") {
382 peek(requestQueue_in, RequestMsg) {
383 enqueue(responseNetwork_out, ResponseMsg, 1) {
384 out_msg.addr := address;
385 out_msg.Sender := machineID;
386 out_msg.SenderMachine := MachineType:Directory;
387 out_msg.Destination.add(in_msg.Requestor);
388 out_msg.Dirty := false; // By definition, the block is now clean
389 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
390 out_msg.MessageSize := MessageSizeType:Response_Data;
391 }
392 }
393 }
394
395 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
396 peek(unblockNetwork_in, ResponseMsg) {
397 getDirectoryEntry(address).Owner.clear();
398 getDirectoryEntry(address).Owner.add(in_msg.Sender);
399 }
400 }
401
402 action(f_forwardRequest, "f", desc="Forward request to owner") {
403 peek(requestQueue_in, RequestMsg) {
404 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
405 out_msg.addr := address;
406 out_msg.Type := in_msg.Type;
407 out_msg.Requestor := in_msg.Requestor;
408 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
409 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
410 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
411 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
412 out_msg.Acks := out_msg.Acks - 1;
413 }
414 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
415 }
416 }
417 }
418
419 action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
420 peek(requestQueue_in, RequestMsg) {
421 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
422 out_msg.addr := address;
423 out_msg.Type := in_msg.Type;
424 out_msg.Requestor := machineID;
425 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
426 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner);
427 out_msg.Acks := getDirectoryEntry(address).Sharers.count();
428 if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
429 out_msg.Acks := out_msg.Acks - 1;
430 }
431 out_msg.MessageSize := MessageSizeType:Forwarded_Control;
432 }
433 }
434 }
435
436 action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
437 peek(requestQueue_in, RequestMsg) {
438 if ((getDirectoryEntry(in_msg.addr).Sharers.count() > 1) ||
439 ((getDirectoryEntry(in_msg.addr).Sharers.count() > 0) &&
440 (getDirectoryEntry(in_msg.addr).Sharers.isElement(in_msg.Requestor) == false))) {
441 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
442 out_msg.addr := address;
443 out_msg.Type := CoherenceRequestType:INV;
444 out_msg.Requestor := in_msg.Requestor;
445 out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor);
446 // out_msg.Destination := getDirectoryEntry(in_msg.addr).Sharers;
447 out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Sharers);
448 out_msg.Destination.remove(in_msg.Requestor);
449 out_msg.MessageSize := MessageSizeType:Invalidate_Control;
450 }
451 }
452 }
453 }
454
455 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
456 requestQueue_in.dequeue(clockEdge());
457 }
458
459 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
460 unblockNetwork_in.dequeue(clockEdge());
461 }
462
463 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
464 peek(unblockNetwork_in, ResponseMsg) {
465 getDirectoryEntry(address).Sharers.add(in_msg.Sender);
466 }
467 }
468
469 action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
470 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1;
471 }
472
473 action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
474 getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1;
475 assert(getDirectoryEntry(address).WaitingUnblocks >= 0);
476 }
477
478 action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
479 memQueue_in.dequeue(clockEdge());
480 }
481
482 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
483 peek(requestQueue_in, RequestMsg) {
484 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
485 out_msg.addr := address;
486 out_msg.Type := MemoryRequestType:MEMORY_READ;
487 out_msg.Sender := in_msg.Requestor;
488 out_msg.MessageSize := MessageSizeType:Request_Control;
489 out_msg.Len := 0;
490 }
491 }
492 }
493
494 action(qw_queueMemoryWBFromCacheRequest, "qw", desc="Queue off-chip writeback request") {
495 peek(requestQueue_in, RequestMsg) {
496 if (is_valid(tbe)) {
497 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
498 out_msg.addr := address;
499 out_msg.Type := MemoryRequestType:MEMORY_WB;
500 out_msg.Sender := tbe.Requestor;
501 out_msg.MessageSize := MessageSizeType:Writeback_Data;
502 out_msg.DataBlk := in_msg.DataBlk;
503 out_msg.Len := 0;
504 }
505 } else {
506 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
507 out_msg.addr := address;
508 out_msg.Type := MemoryRequestType:MEMORY_WB;
509 out_msg.Sender := in_msg.Requestor;
510 out_msg.MessageSize := MessageSizeType:Writeback_Data;
511 out_msg.DataBlk := in_msg.DataBlk;
512 out_msg.Len := 0;
513 }
514 }
515 }
516 }
517
518 action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt",
519 desc="Queue off-chip writeback request") {
520 peek(unblockNetwork_in, ResponseMsg) {
521 DataBlock DataBlk := in_msg.DataBlk;
522 DataBlk.copyPartial(tbe.DataBlk, getOffset(tbe.PhysicalAddress),
523 tbe.Len);
524 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
525 out_msg.addr := address;
526 out_msg.Type := MemoryRequestType:MEMORY_WB;
527 out_msg.Sender := tbe.Requestor;
528 out_msg.MessageSize := MessageSizeType:Writeback_Data;
529 out_msg.DataBlk := DataBlk;
530 out_msg.Len := 0;
531 }
532 }
533 }
534
535 action(qw_queueMemoryWBFromDMARequest, "/qw", desc="Queue off-chip writeback request") {
536 peek(requestQueue_in, RequestMsg) {
537 enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) {
538 out_msg.addr := address;
539 out_msg.Type := MemoryRequestType:MEMORY_WB;
540 out_msg.Sender := in_msg.Requestor;
541 out_msg.MessageSize := MessageSizeType:Writeback_Data;
542 out_msg.DataBlk := in_msg.DataBlk;
543 out_msg.Len := 0;
544 }
545 }
546 }
547
548 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
549 requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
550 }
551
552 action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
553 peek(requestQueue_in, RequestMsg) {
554 enqueue(responseNetwork_out, ResponseMsg, 1) {
555 out_msg.addr := address;
556 out_msg.Sender := machineID;
557 out_msg.SenderMachine := MachineType:Directory;
558 out_msg.Destination.add(in_msg.Requestor);
559 out_msg.DataBlk := in_msg.DataBlk;
560 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
561 out_msg.Type := CoherenceResponseType:DMA_ACK;
562 out_msg.MessageSize := MessageSizeType:Writeback_Control;
563 }
564 }
565 }
566
567 action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") {
568 peek(unblockNetwork_in, ResponseMsg) {
569 enqueue(responseNetwork_out, ResponseMsg, 1) {
570 out_msg.addr := address;
571 out_msg.Sender := machineID;
572 out_msg.SenderMachine := MachineType:Directory;
573 if (is_valid(tbe)) {
574 out_msg.Destination.add(tbe.Requestor);
575 }
576 out_msg.DataBlk := in_msg.DataBlk;
577 out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
578 out_msg.Type := CoherenceResponseType:DMA_ACK;
579 out_msg.MessageSize := MessageSizeType:Writeback_Control;
580 }
581 }
582 }
583
584 action(v_allocateTBE, "v", desc="Allocate TBE entry") {
585 peek (requestQueue_in, RequestMsg) {
586 TBEs.allocate(address);
587 set_tbe(TBEs[address]);
588 tbe.PhysicalAddress := in_msg.addr;
589 tbe.Len := in_msg.Len;
590 tbe.DataBlk := in_msg.DataBlk;
591 tbe.Requestor := in_msg.Requestor;
592 }
593 }
594
595 action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
596 TBEs.deallocate(address);
597 unset_tbe();
598 }
599
600
601 // TRANSITIONS
602 transition(I, GETX, MM) {
603 qf_queueMemoryFetchRequest;
604 i_popIncomingRequestQueue;
605 }
606
607 transition(I, DMA_READ, XI_M) {
608 qf_queueMemoryFetchRequest;
609 i_popIncomingRequestQueue;
610 }
611
612 transition(I, DMA_WRITE, XI_U) {
613 qw_queueMemoryWBFromDMARequest;
614 a_sendDMAAck; // ack count may be zero
615 i_popIncomingRequestQueue;
616 }
617
618 transition(XI_M, Memory_Data, I) {
619 d_sendDataMsg; // ack count may be zero
620 q_popMemQueue;
621 }
622
623 transition(XI_U, Exclusive_Unblock, I) {
624 cc_clearSharers;
625 c_clearOwner;
626 j_popIncomingUnblockQueue;
627 }
628
629 transition(S, GETX, MM) {
630 qf_queueMemoryFetchRequest;
631 g_sendInvalidations;
632 i_popIncomingRequestQueue;
633 }
634
635 transition(S, DMA_READ) {
636 //qf_queueMemoryFetchRequest;
637 p_fwdDataToDMA;
638 //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
639 i_popIncomingRequestQueue;
640 }
641
642 transition(S, DMA_WRITE, XI_U) {
643 qw_queueMemoryWBFromDMARequest;
644 a_sendDMAAck; // ack count may be zero
645 g_sendInvalidations; // the DMA will collect invalidations
646 i_popIncomingRequestQueue;
647 }
648
649 transition(I, GETS, IS) {
650 qf_queueMemoryFetchRequest;
651 i_popIncomingRequestQueue;
652 }
653
654 transition({S, SS}, GETS, SS) {
655 qf_queueMemoryFetchRequest;
656 n_incrementOutstanding;
657 i_popIncomingRequestQueue;
658 }
659
660 transition({I, S}, PUTO) {
661 b_sendWriteBackNack;
662 i_popIncomingRequestQueue;
663 }
664
665 transition({I, S, O}, PUTX) {
666 b_sendWriteBackNack;
667 i_popIncomingRequestQueue;
668 }
669
670 transition(O, GETX, MM) {
671 f_forwardRequest;
672 g_sendInvalidations;
673 i_popIncomingRequestQueue;
674 }
675
676 transition(O, DMA_READ, OD) {
677 f_forwardRequest; // this will cause the data to go to DMA directly
678 //g_sendInvalidations; // this will cause acks to be sent to the DMA
679 i_popIncomingRequestQueue;
680 }
681
682 transition(OD, DMA_ACK, O) {
683 j_popIncomingUnblockQueue;
684 }
685
686 transition({O,M}, DMA_WRITE, OI_D) {
687 f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
688 g_sendInvalidations; // these go to the DMA Controller
689 v_allocateTBE;
690 i_popIncomingRequestQueue;
691 }
692
693 transition(OI_D, Data, XI_U) {
694 qw_queueMemoryWBRequestFromMessageAndTBE;
695 a_sendDMAAck2; // ack count may be zero
696 w_deallocateTBE;
697 j_popIncomingUnblockQueue;
698 }
699
700 transition({O, OO}, GETS, OO) {
701 f_forwardRequest;
702 n_incrementOutstanding;
703 i_popIncomingRequestQueue;
704 }
705
706 transition(M, GETX, MM) {
707 f_forwardRequest;
708 i_popIncomingRequestQueue;
709 }
710
711 // no exclusive unblock will show up to the directory
712 transition(M, DMA_READ, MD) {
713 f_forwardRequest; // this will cause the data to go to DMA directly
714 i_popIncomingRequestQueue;
715 }
716
717 transition(MD, DMA_ACK, M) {
718 j_popIncomingUnblockQueue;
719 }
720
721 transition(M, GETS, MO) {
722 f_forwardRequest;
723 i_popIncomingRequestQueue;
724 }
725
726 transition(M, PUTX, MI) {
727 a_sendWriteBackAck;
728 i_popIncomingRequestQueue;
729 }
730
731 // happens if M->O transition happens on-chip
732 transition(M, PUTO, MI) {
733 a_sendWriteBackAck;
734 i_popIncomingRequestQueue;
735 }
736
737 transition(M, PUTO_SHARERS, MIS) {
738 a_sendWriteBackAck;
739 i_popIncomingRequestQueue;
740 }
741
742 transition(O, PUTO, OS) {
743 a_sendWriteBackAck;
744 i_popIncomingRequestQueue;
745 }
746
747 transition(O, PUTO_SHARERS, OSS) {
748 a_sendWriteBackAck;
749 i_popIncomingRequestQueue;
750 }
751
752
753 transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
754 zz_recycleRequest;
755 }
756
757 transition({MM, MO}, Exclusive_Unblock, M) {
758 cc_clearSharers;
759 e_ownerIsUnblocker;
760 j_popIncomingUnblockQueue;
761 }
762
763 transition(MO, Unblock, O) {
764 m_addUnlockerToSharers;
765 j_popIncomingUnblockQueue;
766 }
767
768 transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) {
769 zz_recycleRequest;
770 }
771
772 transition(IS, GETS) {
773 zz_recycleRequest;
774 }
775
776 transition(IS, Unblock, S) {
777 m_addUnlockerToSharers;
778 j_popIncomingUnblockQueue;
779 }
780
781 transition(IS, Exclusive_Unblock, M) {
782 cc_clearSharers;
783 e_ownerIsUnblocker;
784 j_popIncomingUnblockQueue;
785 }
786
787 transition(SS, Unblock) {
788 m_addUnlockerToSharers;
789 o_decrementOutstanding;
790 j_popIncomingUnblockQueue;
791 }
792
793 transition(SS, Last_Unblock, S) {
794 m_addUnlockerToSharers;
795 o_decrementOutstanding;
796 j_popIncomingUnblockQueue;
797 }
798
799 transition(OO, Unblock) {
800 m_addUnlockerToSharers;
801 o_decrementOutstanding;
802 j_popIncomingUnblockQueue;
803 }
804
805 transition(OO, Last_Unblock, O) {
806 m_addUnlockerToSharers;
807 o_decrementOutstanding;
808 j_popIncomingUnblockQueue;
809 }
810
811 transition(MI, Dirty_Writeback, I) {
812 c_clearOwner;
813 cc_clearSharers;
814 qw_queueMemoryWBFromCacheRequest;
815 i_popIncomingRequestQueue;
816 }
817
818 transition(MIS, Dirty_Writeback, S) {
819 c_moveOwnerToSharer;
820 qw_queueMemoryWBFromCacheRequest;
821 i_popIncomingRequestQueue;
822 }
823
824 transition(MIS, Clean_Writeback, S) {
825 c_moveOwnerToSharer;
826 i_popIncomingRequestQueue;
827 }
828
829 transition(OS, Dirty_Writeback, S) {
830 c_clearOwner;
831 qw_queueMemoryWBFromCacheRequest;
832 i_popIncomingRequestQueue;
833 }
834
835 transition(OSS, Dirty_Writeback, S) {
836 c_moveOwnerToSharer;
837 qw_queueMemoryWBFromCacheRequest;
838 i_popIncomingRequestQueue;
839 }
840
841 transition(OSS, Clean_Writeback, S) {
842 c_moveOwnerToSharer;
843 i_popIncomingRequestQueue;
844 }
845
846 transition(MI, Clean_Writeback, I) {
847 c_clearOwner;
848 cc_clearSharers;
849 i_popIncomingRequestQueue;
850 }
851
852 transition(OS, Clean_Writeback, S) {
853 c_clearOwner;
854 i_popIncomingRequestQueue;
855 }
856
857 transition({MI, MIS}, Unblock, M) {
858 j_popIncomingUnblockQueue;
859 }
860
861 transition({OS, OSS}, Unblock, O) {
862 j_popIncomingUnblockQueue;
863 }
864
865 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
866 d_sendDataMsg;
867 q_popMemQueue;
868 }
869
870 transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) {
871 //a_sendAck;
872 q_popMemQueue;
873 }
874
875 }