ef72632849bbab1eb762dc04fbb2f1e940a90d16
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(Directory, "Directory protocol")
31 : DirectoryMemory * directory;
32 Cycles directory_latency := 12;
33 Cycles to_memory_controller_latency := 1;
34
35 MessageBuffer * forwardFromDir, network="To", virtual_network="3",
36 vnet_type="forward";
37 MessageBuffer * responseFromDir, network="To", virtual_network="4",
38 vnet_type="response";
39 MessageBuffer * dmaResponseFromDir, network="To", virtual_network="1",
40 vnet_type="response";
41
42 MessageBuffer * requestToDir, network="From", virtual_network="2",
43 vnet_type="request";
44 MessageBuffer * dmaRequestToDir, network="From", virtual_network="0",
45 vnet_type="request";
46 {
47 // STATES
48 state_declaration(State, desc="Directory states", default="Directory_State_I") {
49 // Base states
50 I, AccessPermission:Read_Write, desc="Invalid";
51 M, AccessPermission:Invalid, desc="Modified";
52
53 M_DRD, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA read";
54 M_DWR, AccessPermission:Busy, desc="Blocked on an invalidation for a DMA write";
55
56 M_DWRI, AccessPermission:Busy, desc="Intermediate state M_DWR-->I";
57 M_DRDI, AccessPermission:Busy, desc="Intermediate state M_DRD-->I";
58
59 IM, AccessPermission:Busy, desc="Intermediate state I-->M";
60 MI, AccessPermission:Busy, desc="Intermediate state M-->I";
61 ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
62 ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
63 }
64
65 // Events
66 enumeration(Event, desc="Directory events") {
67 // processor requests
68 GETX, desc="A GETX arrives";
69 GETS, desc="A GETS arrives";
70 PUTX, desc="A PUTX arrives";
71 PUTX_NotOwner, desc="A PUTX arrives";
72
73 // DMA requests
74 DMA_READ, desc="A DMA Read memory request";
75 DMA_WRITE, desc="A DMA Write memory request";
76
77 // Memory Controller
78 Memory_Data, desc="Fetched data from memory arrives";
79 Memory_Ack, desc="Writeback Ack from memory arrives";
80 }
81
82 // TYPES
83
84 // DirectoryEntry
85 structure(Entry, desc="...", interface="AbstractEntry") {
86 State DirectoryState, desc="Directory state";
87 NetDest Sharers, desc="Sharers for this block";
88 NetDest Owner, desc="Owner of this block";
89 }
90
91 // TBE entries for DMA requests
92 structure(TBE, desc="TBE entries for outstanding DMA requests") {
93 Address PhysicalAddress, desc="physical address";
94 State TBEState, desc="Transient State";
95 DataBlock DataBlk, desc="Data to be written (DMA write only)";
96 int Len, desc="...";
97 MachineID DmaRequestor, desc="DMA requestor";
98 }
99
100 structure(TBETable, external="yes") {
101 TBE lookup(Address);
102 void allocate(Address);
103 void deallocate(Address);
104 bool isPresent(Address);
105 }
106
107 // ** OBJECTS **
108 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
109
110 void set_tbe(TBE b);
111 void unset_tbe();
112
113 Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
114 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
115
116 if (is_valid(dir_entry)) {
117 return dir_entry;
118 }
119
120 dir_entry := static_cast(Entry, "pointer",
121 directory.allocate(addr, new Entry));
122 return dir_entry;
123 }
124
125 State getState(TBE tbe, Address addr) {
126 if (is_valid(tbe)) {
127 return tbe.TBEState;
128 } else if (directory.isPresent(addr)) {
129 return getDirectoryEntry(addr).DirectoryState;
130 } else {
131 return State:I;
132 }
133 }
134
135 void setState(TBE tbe, Address addr, State state) {
136
137 if (is_valid(tbe)) {
138 tbe.TBEState := state;
139 }
140
141 if (directory.isPresent(addr)) {
142
143 if (state == State:M) {
144 assert(getDirectoryEntry(addr).Owner.count() == 1);
145 assert(getDirectoryEntry(addr).Sharers.count() == 0);
146 }
147
148 getDirectoryEntry(addr).DirectoryState := state;
149
150 if (state == State:I) {
151 assert(getDirectoryEntry(addr).Owner.count() == 0);
152 assert(getDirectoryEntry(addr).Sharers.count() == 0);
153 }
154 }
155 }
156
157 AccessPermission getAccessPermission(Address addr) {
158 TBE tbe := TBEs[addr];
159 if(is_valid(tbe)) {
160 return Directory_State_to_permission(tbe.TBEState);
161 }
162
163 if(directory.isPresent(addr)) {
164 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
165 }
166
167 return AccessPermission:NotPresent;
168 }
169
170 void setAccessPermission(Address addr, State state) {
171 if (directory.isPresent(addr)) {
172 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
173 }
174 }
175
176 void functionalRead(Address addr, Packet *pkt) {
177 TBE tbe := TBEs[addr];
178 if(is_valid(tbe)) {
179 testAndRead(addr, tbe.DataBlk, pkt);
180 } else {
181 functionalMemoryRead(pkt);
182 }
183 }
184
185 int functionalWrite(Address addr, Packet *pkt) {
186 int num_functional_writes := 0;
187
188 TBE tbe := TBEs[addr];
189 if(is_valid(tbe)) {
190 num_functional_writes := num_functional_writes +
191 testAndWrite(addr, tbe.DataBlk, pkt);
192 }
193
194 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
195 return num_functional_writes;
196 }
197
198 MessageBuffer responseFromMemory;
199
200 // ** OUT_PORTS **
201 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
202 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
203 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
204 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
205
206 // ** IN_PORTS **
207 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
208 if (dmaRequestQueue_in.isReady()) {
209 peek(dmaRequestQueue_in, DMARequestMsg) {
210 TBE tbe := TBEs[in_msg.LineAddress];
211 if (in_msg.Type == DMARequestType:READ) {
212 trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
213 } else if (in_msg.Type == DMARequestType:WRITE) {
214 trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
215 } else {
216 error("Invalid message");
217 }
218 }
219 }
220 }
221
222 in_port(requestQueue_in, RequestMsg, requestToDir) {
223 if (requestQueue_in.isReady()) {
224 peek(requestQueue_in, RequestMsg) {
225 TBE tbe := TBEs[in_msg.Addr];
226 if (in_msg.Type == CoherenceRequestType:GETS) {
227 trigger(Event:GETS, in_msg.Addr, tbe);
228 } else if (in_msg.Type == CoherenceRequestType:GETX) {
229 trigger(Event:GETX, in_msg.Addr, tbe);
230 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
231 if (getDirectoryEntry(in_msg.Addr).Owner.isElement(in_msg.Requestor)) {
232 trigger(Event:PUTX, in_msg.Addr, tbe);
233 } else {
234 trigger(Event:PUTX_NotOwner, in_msg.Addr, tbe);
235 }
236 } else {
237 error("Invalid message");
238 }
239 }
240 }
241 }
242
243 //added by SS
244 // off-chip memory request/response is done
245 in_port(memQueue_in, MemoryMsg, responseFromMemory) {
246 if (memQueue_in.isReady()) {
247 peek(memQueue_in, MemoryMsg) {
248 TBE tbe := TBEs[in_msg.Addr];
249 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
250 trigger(Event:Memory_Data, in_msg.Addr, tbe);
251 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
252 trigger(Event:Memory_Ack, in_msg.Addr, tbe);
253 } else {
254 DPRINTF(RubySlicc,"%s\n", in_msg.Type);
255 error("Invalid message");
256 }
257 }
258 }
259 }
260
261 // Actions
262
263 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
264 peek(requestQueue_in, RequestMsg) {
265 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
266 out_msg.Addr := address;
267 out_msg.Type := CoherenceRequestType:WB_ACK;
268 out_msg.Requestor := in_msg.Requestor;
269 out_msg.Destination.add(in_msg.Requestor);
270 out_msg.MessageSize := MessageSizeType:Writeback_Control;
271 }
272 }
273 }
274
275 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
276 peek(memQueue_in, MemoryMsg) {
277 enqueue(forwardNetwork_out, RequestMsg, 1) {
278 out_msg.Addr := address;
279 out_msg.Type := CoherenceRequestType:WB_ACK;
280 out_msg.Requestor := in_msg.OriginalRequestorMachId;
281 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
282 out_msg.MessageSize := MessageSizeType:Writeback_Control;
283 }
284 }
285 }
286
287 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
288 peek(requestQueue_in, RequestMsg) {
289 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
290 out_msg.Addr := address;
291 out_msg.Type := CoherenceRequestType:WB_NACK;
292 out_msg.Requestor := in_msg.Requestor;
293 out_msg.Destination.add(in_msg.Requestor);
294 out_msg.MessageSize := MessageSizeType:Writeback_Control;
295 }
296 }
297 }
298
299 action(c_clearOwner, "c", desc="Clear the owner field") {
300 getDirectoryEntry(address).Owner.clear();
301 }
302
303 action(d_sendData, "d", desc="Send data to requestor") {
304 peek(memQueue_in, MemoryMsg) {
305 enqueue(responseNetwork_out, ResponseMsg, 1) {
306 out_msg.Addr := address;
307 out_msg.Type := CoherenceResponseType:DATA;
308 out_msg.Sender := machineID;
309 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
310 out_msg.DataBlk := in_msg.DataBlk;
311 out_msg.MessageSize := MessageSizeType:Response_Data;
312 }
313 }
314 }
315
316 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
317 peek(memQueue_in, MemoryMsg) {
318 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
319 assert(is_valid(tbe));
320 out_msg.PhysicalAddress := address;
321 out_msg.LineAddress := address;
322 out_msg.Type := DMAResponseType:DATA;
323 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
324 out_msg.Destination.add(tbe.DmaRequestor);
325 out_msg.MessageSize := MessageSizeType:Response_Data;
326 }
327 }
328 }
329
330
331
332 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
333 peek(requestQueue_in, RequestMsg) {
334 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
335 assert(is_valid(tbe));
336 out_msg.PhysicalAddress := address;
337 out_msg.LineAddress := address;
338 out_msg.Type := DMAResponseType:DATA;
339
340 // we send the entire data block and rely on the dma controller
341 // to split it up if need be
342 out_msg.DataBlk := in_msg.DataBlk;
343 out_msg.Destination.add(tbe.DmaRequestor);
344 out_msg.MessageSize := MessageSizeType:Response_Data;
345 }
346 }
347 }
348
349 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
350 enqueue(dmaResponseNetwork_out, DMAResponseMsg, 1) {
351 assert(is_valid(tbe));
352 out_msg.PhysicalAddress := address;
353 out_msg.LineAddress := address;
354 out_msg.Type := DMAResponseType:ACK;
355 out_msg.Destination.add(tbe.DmaRequestor);
356 out_msg.MessageSize := MessageSizeType:Writeback_Control;
357 }
358 }
359
360 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
361 peek(requestQueue_in, RequestMsg) {
362 getDirectoryEntry(address).Owner.clear();
363 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
364 }
365 }
366
367 action(f_forwardRequest, "f", desc="Forward request to owner") {
368 peek(requestQueue_in, RequestMsg) {
369 APPEND_TRANSITION_COMMENT("Own: ");
370 APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Addr).Owner);
371 APPEND_TRANSITION_COMMENT("Req: ");
372 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
373 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
374 out_msg.Addr := address;
375 out_msg.Type := in_msg.Type;
376 out_msg.Requestor := in_msg.Requestor;
377 out_msg.Destination := getDirectoryEntry(in_msg.Addr).Owner;
378 out_msg.MessageSize := MessageSizeType:Writeback_Control;
379 }
380 }
381 }
382
383 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
384 peek(dmaRequestQueue_in, DMARequestMsg) {
385 enqueue(forwardNetwork_out, RequestMsg, directory_latency) {
386 out_msg.Addr := address;
387 out_msg.Type := CoherenceRequestType:INV;
388 out_msg.Requestor := machineID;
389 out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
390 out_msg.MessageSize := MessageSizeType:Writeback_Control;
391 }
392 }
393 }
394
395 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
396 requestQueue_in.dequeue();
397 }
398
399 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
400 dmaRequestQueue_in.dequeue();
401 }
402
403 action(v_allocateTBE, "v", desc="Allocate TBE") {
404 peek(dmaRequestQueue_in, DMARequestMsg) {
405 TBEs.allocate(address);
406 set_tbe(TBEs[address]);
407 tbe.DataBlk := in_msg.DataBlk;
408 tbe.PhysicalAddress := in_msg.PhysicalAddress;
409 tbe.Len := in_msg.Len;
410 tbe.DmaRequestor := in_msg.Requestor;
411 }
412 }
413
414 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
415 peek(dmaRequestQueue_in, DMARequestMsg) {
416 TBEs.allocate(address);
417 set_tbe(TBEs[address]);
418 tbe.DmaRequestor := in_msg.Requestor;
419 }
420 }
421
422 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
423 peek(requestQueue_in, RequestMsg) {
424 TBEs.allocate(address);
425 set_tbe(TBEs[address]);
426 tbe.DataBlk := in_msg.DataBlk;
427 }
428 }
429
430 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
431 TBEs.deallocate(address);
432 unset_tbe();
433 }
434
435 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
436 requestQueue_in.recycle();
437 }
438
439 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
440 dmaRequestQueue_in.recycle();
441 }
442
443
444 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
445 peek(requestQueue_in, RequestMsg) {
446 queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
447 }
448 }
449
450 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
451 peek(dmaRequestQueue_in, DMARequestMsg) {
452 queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
453 }
454 }
455
456 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
457 peek(dmaRequestQueue_in, DMARequestMsg) {
458 queueMemoryWritePartial(in_msg.Requestor, address,
459 to_memory_controller_latency, in_msg.DataBlk,
460 in_msg.Len);
461 }
462 }
463
464 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
465 peek(requestQueue_in, RequestMsg) {
466 queueMemoryWritePartial(in_msg.Requestor, address,
467 to_memory_controller_latency, tbe.DataBlk,
468 tbe.Len);
469 }
470 }
471
472 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
473 peek(requestQueue_in, RequestMsg) {
474 queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
475 in_msg.DataBlk);
476 }
477 }
478
479 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
480 memQueue_in.dequeue();
481 }
482
483 // TRANSITIONS
484 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
485 z_recycleRequestQueue;
486 }
487
488 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
489 z_recycleRequestQueue;
490 }
491
492 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
493 y_recycleDMARequestQueue;
494 }
495
496
497 transition(I, GETX, IM) {
498 //d_sendData;
499 qf_queueMemoryFetchRequest;
500 e_ownerIsRequestor;
501 i_popIncomingRequestQueue;
502 }
503
504 transition(IM, Memory_Data, M) {
505 d_sendData;
506 //e_ownerIsRequestor;
507 l_popMemQueue;
508 }
509
510
511 transition(I, DMA_READ, ID) {
512 //dr_sendDMAData;
513 r_allocateTbeForDmaRead;
514 qf_queueMemoryFetchRequestDMA;
515 p_popIncomingDMARequestQueue;
516 }
517
518 transition(ID, Memory_Data, I) {
519 dr_sendDMAData;
520 //p_popIncomingDMARequestQueue;
521 w_deallocateTBE;
522 l_popMemQueue;
523 }
524
525
526
527 transition(I, DMA_WRITE, ID_W) {
528 v_allocateTBE;
529 qw_queueMemoryWBRequest_partial;
530 p_popIncomingDMARequestQueue;
531 }
532
533 transition(ID_W, Memory_Ack, I) {
534 da_sendDMAAck;
535 w_deallocateTBE;
536 l_popMemQueue;
537 }
538
539 transition(M, DMA_READ, M_DRD) {
540 v_allocateTBE;
541 inv_sendCacheInvalidate;
542 p_popIncomingDMARequestQueue;
543 }
544
545 transition(M_DRD, PUTX, M_DRDI) {
546 drp_sendDMAData;
547 c_clearOwner;
548 l_queueMemoryWBRequest;
549 i_popIncomingRequestQueue;
550 }
551
552 transition(M_DRDI, Memory_Ack, I) {
553 l_sendWriteBackAck;
554 w_deallocateTBE;
555 l_popMemQueue;
556 }
557
558
559 transition(M, DMA_WRITE, M_DWR) {
560 v_allocateTBE;
561 inv_sendCacheInvalidate;
562 p_popIncomingDMARequestQueue;
563 }
564
565 transition(M_DWR, PUTX, M_DWRI) {
566 qw_queueMemoryWBRequest_partialTBE;
567 c_clearOwner;
568 i_popIncomingRequestQueue;
569 }
570
571 transition(M_DWRI, Memory_Ack, I) {
572 l_sendWriteBackAck;
573 da_sendDMAAck;
574 w_deallocateTBE;
575 l_popMemQueue;
576 }
577
578 transition(M, GETX, M) {
579 f_forwardRequest;
580 e_ownerIsRequestor;
581 i_popIncomingRequestQueue;
582 }
583
584 transition(M, PUTX, MI) {
585 c_clearOwner;
586 v_allocateTBEFromRequestNet;
587 l_queueMemoryWBRequest;
588 i_popIncomingRequestQueue;
589 }
590
591 transition(MI, Memory_Ack, I) {
592 l_sendWriteBackAck;
593 w_deallocateTBE;
594 l_popMemQueue;
595 }
596
597 transition(M, PUTX_NotOwner, M) {
598 b_sendWriteBackNack;
599 i_popIncomingRequestQueue;
600 }
601
602 transition(I, PUTX_NotOwner, I) {
603 b_sendWriteBackNack;
604 i_popIncomingRequestQueue;
605 }
606 }