merge
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1
2 machine(Directory, "Directory protocol")
3 : int directory_latency
4 {
5
6 MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
7 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
8 MessageBuffer dmaRequestFromDir, network="To", virtual_network="4", ordered="true";
9
10 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="true";
11 MessageBuffer dmaRequestToDir, network="From", virtual_network="5", ordered="true";
12
13 // STATES
14 enumeration(State, desc="Directory states", default="Directory_State_I") {
15 // Base states
16 I, desc="Invalid";
17 M, desc="Modified";
18
19 M_DRD, desc="Blocked on an invalidation for a DMA read";
20 M_DWR, desc="Blocked on an invalidation for a DMA write";
21
22 M_DWRI, desc="Intermediate state M_DWR-->I";
23 M_DRDI, desc="Intermediate state M_DRD-->I";
24
25 IM, desc="Intermediate state I-->M";
26 MI, desc="Intermediate state M-->I";
27 ID, desc="Intermediate state for DMA_READ when in I";
28 ID_W, desc="Intermediate state for DMA_WRITE when in I";
29 }
30
31 // Events
32 enumeration(Event, desc="Directory events") {
33 // processor requests
34 GETX, desc="A GETX arrives";
35 GETS, desc="A GETS arrives";
36 PUTX, desc="A PUTX arrives";
37 PUTX_NotOwner, desc="A PUTX arrives";
38
39 // DMA requests
40 DMA_READ, desc="A DMA Read memory request";
41 DMA_WRITE, desc="A DMA Write memory request";
42
43 // Memory Controller
44 Memory_Data, desc="Fetched data from memory arrives";
45 Memory_Ack, desc="Writeback Ack from memory arrives";
46 }
47
48 // TYPES
49
50 // DirectoryEntry
51 structure(Entry, desc="...") {
52 State DirectoryState, desc="Directory state";
53 DataBlock DataBlk, desc="data for the block";
54 NetDest Sharers, desc="Sharers for this block";
55 NetDest Owner, desc="Owner of this block";
56 }
57
58 external_type(DirectoryMemory) {
59 Entry lookup(Address);
60 bool isPresent(Address);
61 void invalidateBlock(Address);
62 }
63
64 external_type(MemoryControl, inport="yes", outport="yes") {
65
66 }
67
68
69 // TBE entries for DMA requests
70 structure(TBE, desc="TBE entries for outstanding DMA requests") {
71 Address PhysicalAddress, desc="physical address";
72 State TBEState, desc="Transient State";
73 DataBlock DataBlk, desc="Data to be written (DMA write only)";
74 int Len, desc="...";
75 MachineID DmaRequestor, desc="DMA requestor";
76 }
77
78 external_type(TBETable) {
79 TBE lookup(Address);
80 void allocate(Address);
81 void deallocate(Address);
82 bool isPresent(Address);
83 }
84
85 // ** OBJECTS **
86 DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory"])';
87
88 MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_control"])';
89
90 TBETable TBEs, template_hack="<Directory_TBE>";
91
92 State getState(Address addr) {
93 if (TBEs.isPresent(addr)) {
94 return TBEs[addr].TBEState;
95 } else if (directory.isPresent(addr)) {
96 return directory[addr].DirectoryState;
97 } else {
98 return State:I;
99 }
100 }
101
102 void setState(Address addr, State state) {
103
104 if (TBEs.isPresent(addr)) {
105 TBEs[addr].TBEState := state;
106 }
107
108 if (directory.isPresent(addr)) {
109
110 if (state == State:I) {
111 assert(directory[addr].Owner.count() == 0);
112 assert(directory[addr].Sharers.count() == 0);
113 } else if (state == State:M) {
114 assert(directory[addr].Owner.count() == 1);
115 assert(directory[addr].Sharers.count() == 0);
116 }
117
118 directory[addr].DirectoryState := state;
119 }
120 }
121
122 // ** OUT_PORTS **
123 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
124 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
125 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
126 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaRequestFromDir);
127
128 //added by SS
129 out_port(memQueue_out, MemoryMsg, memBuffer);
130 // ** IN_PORTS **
131
132 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
133 if (dmaRequestQueue_in.isReady()) {
134 peek(dmaRequestQueue_in, DMARequestMsg) {
135 if (in_msg.Type == DMARequestType:READ) {
136 trigger(Event:DMA_READ, in_msg.LineAddress);
137 } else if (in_msg.Type == DMARequestType:WRITE) {
138 trigger(Event:DMA_WRITE, in_msg.LineAddress);
139 } else {
140 error("Invalid message");
141 }
142 }
143 }
144 }
145
146 in_port(requestQueue_in, RequestMsg, requestToDir) {
147 if (requestQueue_in.isReady()) {
148 peek(requestQueue_in, RequestMsg) {
149 if (in_msg.Type == CoherenceRequestType:GETS) {
150 trigger(Event:GETS, in_msg.Address);
151 } else if (in_msg.Type == CoherenceRequestType:GETX) {
152 trigger(Event:GETX, in_msg.Address);
153 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
154 if (directory[in_msg.Address].Owner.isElement(in_msg.Requestor)) {
155 trigger(Event:PUTX, in_msg.Address);
156 } else {
157 trigger(Event:PUTX_NotOwner, in_msg.Address);
158 }
159 } else {
160 error("Invalid message");
161 }
162 }
163 }
164 }
165
166 //added by SS
167 // off-chip memory request/response is done
168 in_port(memQueue_in, MemoryMsg, memBuffer) {
169 if (memQueue_in.isReady()) {
170 peek(memQueue_in, MemoryMsg) {
171 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
172 trigger(Event:Memory_Data, in_msg.Address);
173 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
174 trigger(Event:Memory_Ack, in_msg.Address);
175 } else {
176 DEBUG_EXPR(in_msg.Type);
177 error("Invalid message");
178 }
179 }
180 }
181 }
182
183 // Actions
184
185 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
186 peek(requestQueue_in, RequestMsg) {
187 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
188 out_msg.Address := address;
189 out_msg.Type := CoherenceRequestType:WB_ACK;
190 out_msg.Requestor := in_msg.Requestor;
191 out_msg.Destination.add(in_msg.Requestor);
192 out_msg.MessageSize := MessageSizeType:Writeback_Control;
193 }
194 }
195 }
196
197 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
198 peek(memQueue_in, MemoryMsg) {
199 enqueue(forwardNetwork_out, RequestMsg, latency="1") {
200 out_msg.Address := address;
201 out_msg.Type := CoherenceRequestType:WB_ACK;
202 out_msg.Requestor := in_msg.OriginalRequestorMachId;
203 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
204 out_msg.MessageSize := MessageSizeType:Writeback_Control;
205 }
206 }
207 }
208
209 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
210 peek(requestQueue_in, RequestMsg) {
211 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
212 out_msg.Address := address;
213 out_msg.Type := CoherenceRequestType:WB_NACK;
214 out_msg.Requestor := in_msg.Requestor;
215 out_msg.Destination.add(in_msg.Requestor);
216 out_msg.MessageSize := MessageSizeType:Writeback_Control;
217 }
218 }
219 }
220
221 action(c_clearOwner, "c", desc="Clear the owner field") {
222 directory[address].Owner.clear();
223 }
224
225 action(d_sendData, "d", desc="Send data to requestor") {
226 peek(memQueue_in, MemoryMsg) {
227 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
228 out_msg.Address := address;
229 out_msg.Type := CoherenceResponseType:DATA;
230 out_msg.Sender := machineID;
231 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
232 out_msg.DataBlk := in_msg.DataBlk;
233 out_msg.MessageSize := MessageSizeType:Response_Data;
234 }
235 }
236 }
237
238 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
239 peek(memQueue_in, MemoryMsg) {
240 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
241 out_msg.PhysicalAddress := address;
242 out_msg.LineAddress := address;
243 out_msg.Type := DMAResponseType:DATA;
244 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
245 out_msg.Destination.add(TBEs[address].DmaRequestor);
246 out_msg.MessageSize := MessageSizeType:Response_Data;
247 }
248 }
249 }
250
251
252
253 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
254 peek(requestQueue_in, RequestMsg) {
255 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
256 out_msg.PhysicalAddress := address;
257 out_msg.LineAddress := address;
258 out_msg.Type := DMAResponseType:DATA;
259 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
260 out_msg.Destination.add(TBEs[address].DmaRequestor);
261 out_msg.MessageSize := MessageSizeType:Response_Data;
262 }
263 }
264 }
265
266 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
267 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
268 out_msg.PhysicalAddress := address;
269 out_msg.LineAddress := address;
270 out_msg.Type := DMAResponseType:ACK;
271 out_msg.Destination.add(TBEs[address].DmaRequestor);
272 out_msg.MessageSize := MessageSizeType:Writeback_Control;
273 }
274 }
275
276 action(d_deallocateDirectory, "\d", desc="Deallocate Directory Entry") {
277 directory.invalidateBlock(address);
278 }
279
280 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
281 peek(requestQueue_in, RequestMsg) {
282 directory[address].Owner.clear();
283 directory[address].Owner.add(in_msg.Requestor);
284 }
285 }
286
287 action(f_forwardRequest, "f", desc="Forward request to owner") {
288 peek(requestQueue_in, RequestMsg) {
289 APPEND_TRANSITION_COMMENT("Own: ");
290 APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
291 APPEND_TRANSITION_COMMENT("Req: ");
292 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
293 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
294 out_msg.Address := address;
295 out_msg.Type := in_msg.Type;
296 out_msg.Requestor := in_msg.Requestor;
297 out_msg.Destination := directory[in_msg.Address].Owner;
298 out_msg.MessageSize := MessageSizeType:Writeback_Control;
299 }
300 }
301 }
302
303 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
304 peek(dmaRequestQueue_in, DMARequestMsg) {
305 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
306 out_msg.Address := address;
307 out_msg.Type := CoherenceRequestType:INV;
308 out_msg.Requestor := machineID;
309 out_msg.Destination := directory[in_msg.PhysicalAddress].Owner;
310 out_msg.MessageSize := MessageSizeType:Writeback_Control;
311 }
312 }
313 }
314
315 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
316 requestQueue_in.dequeue();
317 }
318
319 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
320 dmaRequestQueue_in.dequeue();
321 }
322
323 action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
324 peek(requestQueue_in, RequestMsg) {
325 // assert(in_msg.Dirty);
326 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
327 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
328 //directory[in_msg.Address].DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
329 }
330 }
331
332 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
333 directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
334 }
335
336 action(v_allocateTBE, "v", desc="Allocate TBE") {
337 peek(dmaRequestQueue_in, DMARequestMsg) {
338 TBEs.allocate(address);
339 TBEs[address].DataBlk := in_msg.DataBlk;
340 TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
341 TBEs[address].Len := in_msg.Len;
342 TBEs[address].DmaRequestor := in_msg.Requestor;
343 }
344 }
345
346 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
347 peek(dmaRequestQueue_in, DMARequestMsg) {
348 TBEs.allocate(address);
349 TBEs[address].DmaRequestor := in_msg.Requestor;
350 }
351 }
352
353 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
354 peek(requestQueue_in, RequestMsg) {
355 TBEs.allocate(address);
356 TBEs[address].DataBlk := in_msg.DataBlk;
357 }
358 }
359
360 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
361 TBEs.deallocate(address);
362 }
363
364 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
365 requestQueue_in.recycle();
366 }
367
368 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
369 dmaRequestQueue_in.recycle();
370 }
371
372
373 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
374 peek(requestQueue_in, RequestMsg) {
375 enqueue(memQueue_out, MemoryMsg, latency="1") {
376 out_msg.Address := address;
377 out_msg.Type := MemoryRequestType:MEMORY_READ;
378 out_msg.Sender := machineID;
379 out_msg.OriginalRequestorMachId := in_msg.Requestor;
380 out_msg.MessageSize := in_msg.MessageSize;
381 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
382 DEBUG_EXPR(out_msg);
383 }
384 }
385 }
386
387 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
388 peek(dmaRequestQueue_in, DMARequestMsg) {
389 enqueue(memQueue_out, MemoryMsg, latency="1") {
390 out_msg.Address := address;
391 out_msg.Type := MemoryRequestType:MEMORY_READ;
392 out_msg.Sender := machineID;
393 //out_msg.OriginalRequestorMachId := machineID;
394 out_msg.MessageSize := in_msg.MessageSize;
395 out_msg.DataBlk := directory[address].DataBlk;
396 DEBUG_EXPR(out_msg);
397 }
398 }
399 }
400
401 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
402 peek(dmaRequestQueue_in, DMARequestMsg) {
403 enqueue(memQueue_out, MemoryMsg, latency="1") {
404 out_msg.Address := address;
405 out_msg.Type := MemoryRequestType:MEMORY_WB;
406 //out_msg.OriginalRequestorMachId := machineID;
407 //out_msg.DataBlk := in_msg.DataBlk;
408 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
409 out_msg.MessageSize := in_msg.MessageSize;
410 //out_msg.Prefetch := in_msg.Prefetch;
411
412 DEBUG_EXPR(out_msg);
413 }
414 }
415 }
416
417 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
418 peek(requestQueue_in, RequestMsg) {
419 enqueue(memQueue_out, MemoryMsg, latency="1") {
420 out_msg.Address := address;
421 out_msg.Type := MemoryRequestType:MEMORY_WB;
422 out_msg.OriginalRequestorMachId := in_msg.Requestor;
423 // get incoming data
424 // out_msg.DataBlk := in_msg.DataBlk;
425 out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
426 out_msg.MessageSize := in_msg.MessageSize;
427 //out_msg.Prefetch := in_msg.Prefetch;
428
429 DEBUG_EXPR(out_msg);
430 }
431 }
432 }
433
434
435
436 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
437 peek(requestQueue_in, RequestMsg) {
438 enqueue(memQueue_out, MemoryMsg, latency="1") {
439 out_msg.Address := address;
440 out_msg.Type := MemoryRequestType:MEMORY_WB;
441 out_msg.OriginalRequestorMachId := in_msg.Requestor;
442 out_msg.DataBlk := in_msg.DataBlk;
443 out_msg.MessageSize := in_msg.MessageSize;
444 //out_msg.Prefetch := in_msg.Prefetch;
445
446 DEBUG_EXPR(out_msg);
447 }
448 }
449 }
450
451 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
452 memQueue_in.dequeue();
453 }
454
455 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
456 //directory[address].DataBlk := TBEs[address].DataBlk;
457 directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
458
459 }
460
461 // TRANSITIONS
462
463 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
464 z_recycleRequestQueue;
465 }
466
467 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
468 z_recycleRequestQueue;
469 }
470
471 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
472 y_recycleDMARequestQueue;
473 }
474
475
476 transition(I, GETX, IM) {
477 //d_sendData;
478 qf_queueMemoryFetchRequest;
479 e_ownerIsRequestor;
480 i_popIncomingRequestQueue;
481 }
482
483 transition(IM, Memory_Data, M) {
484 d_sendData;
485 //e_ownerIsRequestor;
486 l_popMemQueue;
487 }
488
489
490 transition(I, DMA_READ, ID) {
491 //dr_sendDMAData;
492 r_allocateTbeForDmaRead;
493 qf_queueMemoryFetchRequestDMA;
494 p_popIncomingDMARequestQueue;
495 }
496
497 transition(ID, Memory_Data, I) {
498 dr_sendDMAData;
499 //p_popIncomingDMARequestQueue;
500 w_deallocateTBE;
501 l_popMemQueue;
502 }
503
504
505
506 transition(I, DMA_WRITE, ID_W) {
507 v_allocateTBE;
508 qw_queueMemoryWBRequest_partial;
509 p_popIncomingDMARequestQueue;
510 }
511
512 transition(ID_W, Memory_Ack, I) {
513 dwt_writeDMADataFromTBE;
514 da_sendDMAAck;
515 w_deallocateTBE;
516 l_popMemQueue;
517 }
518
519 transition(M, DMA_READ, M_DRD) {
520 v_allocateTBE;
521 inv_sendCacheInvalidate;
522 p_popIncomingDMARequestQueue;
523 }
524
525 transition(M_DRD, PUTX, M_DRDI) {
526 l_writeDataToMemory;
527 drp_sendDMAData;
528 c_clearOwner;
529 l_queueMemoryWBRequest;
530 i_popIncomingRequestQueue;
531 }
532
533 transition(M_DRDI, Memory_Ack, I) {
534 l_sendWriteBackAck;
535 w_deallocateTBE;
536 d_deallocateDirectory;
537 l_popMemQueue;
538 }
539
540
541 transition(M, DMA_WRITE, M_DWR) {
542 v_allocateTBE;
543 inv_sendCacheInvalidate;
544 p_popIncomingDMARequestQueue;
545 }
546
547 transition(M_DWR, PUTX, M_DWRI) {
548 l_writeDataToMemory;
549 qw_queueMemoryWBRequest_partialTBE;
550 c_clearOwner;
551 i_popIncomingRequestQueue;
552 }
553
554 transition(M_DWRI, Memory_Ack, I) {
555 w_writeDataToMemoryFromTBE;
556 l_sendWriteBackAck;
557 da_sendDMAAck;
558 w_deallocateTBE;
559 d_deallocateDirectory;
560 l_popMemQueue;
561 }
562
563 transition(M, GETX, M) {
564 f_forwardRequest;
565 e_ownerIsRequestor;
566 i_popIncomingRequestQueue;
567 }
568
569 transition(M, PUTX, MI) {
570 l_writeDataToMemory;
571 c_clearOwner;
572 v_allocateTBEFromRequestNet;
573 l_queueMemoryWBRequest;
574 i_popIncomingRequestQueue;
575 }
576
577 transition(MI, Memory_Ack, I) {
578 w_writeDataToMemoryFromTBE;
579 l_sendWriteBackAck;
580 w_deallocateTBE;
581 d_deallocateDirectory;
582 l_popMemQueue;
583 }
584
585 transition(M, PUTX_NotOwner, M) {
586 b_sendWriteBackNack;
587 i_popIncomingRequestQueue;
588 }
589
590 transition(I, PUTX_NotOwner, I) {
591 b_sendWriteBackNack;
592 i_popIncomingRequestQueue;
593 }
594
595 }