ruby: fixed dma sequencer bug
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1
2 machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_DIRECTORY_LATENCY LATENCY_MEMORY_LATENCY {
3
4 MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
5 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
6 MessageBuffer dmaRequestFromDir, network="To", virtual_network="4", ordered="true";
7
8 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="true";
9 MessageBuffer dmaRequestToDir, network="From", virtual_network="5", ordered="true";
10
11 // STATES
12 enumeration(State, desc="Directory states", default="Directory_State_I") {
13 // Base states
14 I, desc="Invalid";
15 M, desc="Modified";
16
17 M_DRD, desc="Blocked on an invalidation for a DMA read";
18 M_DWR, desc="Blocked on an invalidation for a DMA write";
19
20 M_DWRI, desc="Intermediate state M_DWR-->I";
21
22 IM, desc="Intermediate state I-->M";
23 MI, desc="Intermediate state M-->I";
24 ID, desc="Intermediate state for DMA_READ when in I";
25 ID_W, desc="Intermediate state for DMA_WRITE when in I";
26 }
27
28 // Events
29 enumeration(Event, desc="Directory events") {
30 // processor requests
31 GETX, desc="A GETX arrives";
32 GETS, desc="A GETS arrives";
33 PUTX, desc="A PUTX arrives";
34 PUTX_NotOwner, desc="A PUTX arrives";
35
36 // DMA requests
37 DMA_READ, desc="A DMA Read memory request";
38 DMA_WRITE, desc="A DMA Write memory request";
39
40 // Memory Controller
41 Memory_Data, desc="Fetched data from memory arrives";
42 Memory_Ack, desc="Writeback Ack from memory arrives";
43 }
44
45 // TYPES
46
47 // DirectoryEntry
48 structure(Entry, desc="...") {
49 State DirectoryState, desc="Directory state";
50 DataBlock DataBlk, desc="data for the block";
51 NetDest Sharers, desc="Sharers for this block";
52 NetDest Owner, desc="Owner of this block";
53 }
54
55 external_type(DirectoryMemory) {
56 Entry lookup(Address);
57 bool isPresent(Address);
58 void invalidateBlock(Address);
59 }
60
61 external_type(MemoryControl, inport="yes", outport="yes") {
62
63 }
64
65
66 // TBE entries for DMA requests
67 structure(TBE, desc="TBE entries for outstanding DMA requests") {
68 State TBEState, desc="Transient State";
69 DataBlock DataBlk, desc="Data to be written (DMA write only)";
70 int Offset, desc="...";
71 int Len, desc="...";
72 }
73
74 external_type(TBETable) {
75 TBE lookup(Address);
76 void allocate(Address);
77 void deallocate(Address);
78 bool isPresent(Address);
79 }
80
81 // ** OBJECTS **
82 DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
83
84 MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
85
86 TBETable TBEs, template_hack="<Directory_TBE>";
87
88 State getState(Address addr) {
89 if (TBEs.isPresent(addr)) {
90 return TBEs[addr].TBEState;
91 } else if (directory.isPresent(addr)) {
92 return directory[addr].DirectoryState;
93 } else {
94 return State:I;
95 }
96 }
97
98 void setState(Address addr, State state) {
99
100 if (TBEs.isPresent(addr)) {
101 TBEs[addr].TBEState := state;
102 }
103
104 if (directory.isPresent(addr)) {
105
106 if (state == State:I) {
107 assert(directory[addr].Owner.count() == 0);
108 assert(directory[addr].Sharers.count() == 0);
109 } else if (state == State:M) {
110 assert(directory[addr].Owner.count() == 1);
111 assert(directory[addr].Sharers.count() == 0);
112 }
113
114 directory[addr].DirectoryState := state;
115 }
116 }
117
118 // ** OUT_PORTS **
119 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
120 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
121 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
122 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaRequestFromDir);
123
124 //added by SS
125 out_port(memQueue_out, MemoryMsg, memBuffer);
126 // ** IN_PORTS **
127
128 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
129 if (dmaRequestQueue_in.isReady()) {
130 peek(dmaRequestQueue_in, DMARequestMsg) {
131 if (in_msg.Type == DMARequestType:READ) {
132 trigger(Event:DMA_READ, in_msg.LineAddress);
133 } else if (in_msg.Type == DMARequestType:WRITE) {
134 trigger(Event:DMA_WRITE, in_msg.LineAddress);
135 } else {
136 error("Invalid message");
137 }
138 }
139 }
140 }
141
142 in_port(requestQueue_in, RequestMsg, requestToDir) {
143 if (requestQueue_in.isReady()) {
144 peek(requestQueue_in, RequestMsg) {
145 if (in_msg.Type == CoherenceRequestType:GETS) {
146 trigger(Event:GETS, in_msg.Address);
147 } else if (in_msg.Type == CoherenceRequestType:GETX) {
148 trigger(Event:GETX, in_msg.Address);
149 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
150 if (directory[in_msg.Address].Owner.isElement(in_msg.Requestor)) {
151 trigger(Event:PUTX, in_msg.Address);
152 } else {
153 trigger(Event:PUTX_NotOwner, in_msg.Address);
154 }
155 } else {
156 error("Invalid message");
157 }
158 }
159 }
160 }
161
162 //added by SS
163 // off-chip memory request/response is done
164 in_port(memQueue_in, MemoryMsg, memBuffer) {
165 if (memQueue_in.isReady()) {
166 peek(memQueue_in, MemoryMsg) {
167 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
168 trigger(Event:Memory_Data, in_msg.Address);
169 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
170 trigger(Event:Memory_Ack, in_msg.Address);
171 } else {
172 DEBUG_EXPR(in_msg.Type);
173 error("Invalid message");
174 }
175 }
176 }
177 }
178
179 // Actions
180
181 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
182 peek(requestQueue_in, RequestMsg) {
183 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
184 out_msg.Address := address;
185 out_msg.Type := CoherenceRequestType:WB_ACK;
186 out_msg.Requestor := in_msg.Requestor;
187 out_msg.Destination.add(in_msg.Requestor);
188 out_msg.MessageSize := MessageSizeType:Writeback_Control;
189 }
190 }
191 }
192
193 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
194 peek(memQueue_in, MemoryMsg) {
195 enqueue(forwardNetwork_out, RequestMsg, latency="TO_MEM_CTRL_LATENCY") {
196 out_msg.Address := address;
197 out_msg.Type := CoherenceRequestType:WB_ACK;
198 out_msg.Requestor := in_msg.OriginalRequestorMachId;
199 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
200 out_msg.MessageSize := MessageSizeType:Writeback_Control;
201 }
202 }
203 }
204
205 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
206 peek(requestQueue_in, RequestMsg) {
207 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
208 out_msg.Address := address;
209 out_msg.Type := CoherenceRequestType:WB_NACK;
210 out_msg.Requestor := in_msg.Requestor;
211 out_msg.Destination.add(in_msg.Requestor);
212 out_msg.MessageSize := MessageSizeType:Writeback_Control;
213 }
214 }
215 }
216
217 action(c_clearOwner, "c", desc="Clear the owner field") {
218 directory[address].Owner.clear();
219 }
220
221 // action(d_sendData, "d", desc="Send data to requestor") {
222 // peek(requestQueue_in, RequestMsg) {
223 // enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
224 // out_msg.Address := address;
225 //
226 // if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
227 // // out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_CLEAN;
228 // out_msg.Type := CoherenceResponseType:DATA;
229 // } else {
230 // out_msg.Type := CoherenceResponseType:DATA;
231 // }
232 //
233 // out_msg.Sender := machineID;
234 // out_msg.Destination.add(in_msg.Requestor);
235 // out_msg.DataBlk := directory[in_msg.Address].DataBlk;
236 // out_msg.MessageSize := MessageSizeType:Response_Data;
237 // }
238 // }
239 // }
240
241 action(d_sendData, "d", desc="Send data to requestor") {
242 peek(memQueue_in, MemoryMsg) {
243 enqueue(responseNetwork_out, ResponseMsg, latency="TO_MEM_CTRL_LATENCY") {
244 out_msg.Address := address;
245 out_msg.Type := CoherenceResponseType:DATA;
246 out_msg.Sender := machineID;
247 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
248 out_msg.DataBlk := in_msg.DataBlk;
249 out_msg.MessageSize := MessageSizeType:Response_Data;
250 }
251 }
252 }
253
254 // action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
255 // peek(dmaRequestQueue_in, DMARequestMsg) {
256 // enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
257 // out_msg.PhysicalAddress := address;
258 // out_msg.Type := DMAResponseType:DATA;
259 // out_msg.DataBlk := directory[in_msg.PhysicalAddress].DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
260 // out_msg.Destination.add(map_Address_to_DMA(address));
261 // out_msg.MessageSize := MessageSizeType:Response_Data;
262 // }
263 // }
264 // }
265
266 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
267 peek(memQueue_in, MemoryMsg) {
268 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
269 out_msg.PhysicalAddress := address;
270 out_msg.LineAddress := address;
271 out_msg.Type := DMAResponseType:DATA;
272 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
273 out_msg.Destination.add(map_Address_to_DMA(address));
274 out_msg.MessageSize := MessageSizeType:Response_Data;
275 }
276 }
277 }
278
279
280
281 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
282 peek(requestQueue_in, RequestMsg) {
283 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
284 out_msg.PhysicalAddress := address;
285 out_msg.LineAddress := address;
286 out_msg.Type := DMAResponseType:DATA;
287 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
288 out_msg.Destination.add(map_Address_to_DMA(address));
289 out_msg.MessageSize := MessageSizeType:Response_Data;
290 }
291 }
292 }
293
294 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
295 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
296 out_msg.PhysicalAddress := address;
297 out_msg.LineAddress := address;
298 out_msg.Type := DMAResponseType:ACK;
299 out_msg.Destination.add(map_Address_to_DMA(address));
300 out_msg.MessageSize := MessageSizeType:Writeback_Control;
301 }
302 }
303
304 action(d_deallocateDirectory, "\d", desc="Deallocate Directory Entry") {
305 directory.invalidateBlock(address);
306 }
307
308 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
309 peek(requestQueue_in, RequestMsg) {
310 directory[address].Owner.clear();
311 directory[address].Owner.add(in_msg.Requestor);
312 }
313 }
314
315 action(f_forwardRequest, "f", desc="Forward request to owner") {
316 peek(requestQueue_in, RequestMsg) {
317 APPEND_TRANSITION_COMMENT("Own: ");
318 APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
319 APPEND_TRANSITION_COMMENT("Req: ");
320 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
321 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
322 out_msg.Address := address;
323 out_msg.Type := in_msg.Type;
324 out_msg.Requestor := in_msg.Requestor;
325 out_msg.Destination := directory[in_msg.Address].Owner;
326 out_msg.MessageSize := MessageSizeType:Writeback_Control;
327 }
328 }
329 }
330
331 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
332 peek(dmaRequestQueue_in, DMARequestMsg) {
333 enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
334 out_msg.Address := address;
335 out_msg.Type := CoherenceRequestType:INV;
336 out_msg.Requestor := machineID;
337 out_msg.Destination := directory[in_msg.PhysicalAddress].Owner;
338 out_msg.MessageSize := MessageSizeType:Writeback_Control;
339 }
340 }
341 }
342
343 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
344 requestQueue_in.dequeue();
345 }
346
347 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
348 dmaRequestQueue_in.dequeue();
349 }
350
351 action(l_writeDataToMemory, "l", desc="Write PUTX data to memory") {
352 peek(requestQueue_in, RequestMsg) {
353 // assert(in_msg.Dirty);
354 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
355 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
356 DEBUG_EXPR(in_msg.Address);
357 DEBUG_EXPR(in_msg.DataBlk);
358 }
359 }
360
361 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
362 directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
363 }
364
365 action(v_allocateTBE, "v", desc="Allocate TBE") {
366 peek(dmaRequestQueue_in, DMARequestMsg) {
367 TBEs.allocate(address);
368 TBEs[address].DataBlk := in_msg.DataBlk;
369 TBEs[address].Offset := in_msg.Offset;
370 TBEs[address].Len := in_msg.Len;
371 }
372 }
373
374 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
375 peek(requestQueue_in, RequestMsg) {
376 TBEs.allocate(address);
377 TBEs[address].DataBlk := in_msg.DataBlk;
378 }
379 }
380
381 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
382 TBEs.deallocate(address);
383 }
384
385 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
386 requestQueue_in.recycle();
387 }
388
389
390 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
391 peek(requestQueue_in, RequestMsg) {
392 enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
393 out_msg.Address := address;
394 out_msg.Type := MemoryRequestType:MEMORY_READ;
395 out_msg.Sender := machineID;
396 out_msg.OriginalRequestorMachId := in_msg.Requestor;
397 out_msg.MessageSize := in_msg.MessageSize;
398 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
399 DEBUG_EXPR(out_msg);
400 }
401 }
402 }
403
404 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
405 peek(dmaRequestQueue_in, DMARequestMsg) {
406 enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
407 out_msg.Address := address;
408 out_msg.Type := MemoryRequestType:MEMORY_READ;
409 out_msg.Sender := machineID;
410 //out_msg.OriginalRequestorMachId := machineID;
411 out_msg.MessageSize := in_msg.MessageSize;
412 out_msg.DataBlk := directory[address].DataBlk;
413 DEBUG_EXPR(out_msg);
414 }
415 }
416 }
417 // action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
418 // peek(dmaRequestQueue_in, DMARequestMsg) {
419 // enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
420 // out_msg.Address := address;
421 // out_msg.Type := MemoryRequestType:MEMORY_WB;
422 // out_msg.OriginalRequestorMachId := machineID;
423 // out_msg.DataBlk := in_msg.DataBlk;
424 // out_msg.MessageSize := in_msg.MessageSize;
425
426 // DEBUG_EXPR(out_msg);
427 // }
428 // }
429 // }
430
431
432 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
433 peek(dmaRequestQueue_in, DMARequestMsg) {
434 enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
435 out_msg.Address := address;
436 out_msg.Type := MemoryRequestType:MEMORY_WB;
437 //out_msg.OriginalRequestorMachId := machineID;
438 //out_msg.DataBlk := in_msg.DataBlk;
439 out_msg.DataBlk.copyPartial(in_msg.DataBlk, in_msg.Offset, in_msg.Len);
440 out_msg.MessageSize := in_msg.MessageSize;
441 //out_msg.Prefetch := in_msg.Prefetch;
442
443 DEBUG_EXPR(out_msg);
444 }
445 }
446 }
447
448 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
449 peek(requestQueue_in, RequestMsg) {
450 enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
451 out_msg.Address := address;
452 out_msg.Type := MemoryRequestType:MEMORY_WB;
453 out_msg.OriginalRequestorMachId := in_msg.Requestor;
454 //out_msg.DataBlk := in_msg.DataBlk;
455 out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
456 out_msg.MessageSize := in_msg.MessageSize;
457 //out_msg.Prefetch := in_msg.Prefetch;
458
459 DEBUG_EXPR(out_msg);
460 }
461 }
462 }
463
464
465
466 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
467 peek(requestQueue_in, RequestMsg) {
468 enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
469 out_msg.Address := address;
470 out_msg.Type := MemoryRequestType:MEMORY_WB;
471 out_msg.OriginalRequestorMachId := in_msg.Requestor;
472 out_msg.DataBlk := in_msg.DataBlk;
473 out_msg.MessageSize := in_msg.MessageSize;
474 //out_msg.Prefetch := in_msg.Prefetch;
475
476 DEBUG_EXPR(out_msg);
477 }
478 }
479 }
480
481 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
482 memQueue_in.dequeue();
483 }
484
485 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
486 directory[address].DataBlk := TBEs[address].DataBlk;
487 }
488
489 // TRANSITIONS
490
491 transition({M_DRD, M_DWR}, GETX) {
492 z_recycleRequestQueue;
493 }
494
495 transition({IM, MI, ID, ID_W}, {GETX, GETS, DMA_READ, DMA_WRITE, PUTX, PUTX_NotOwner} ) {
496 z_recycleRequestQueue;
497 }
498
499 transition(I, GETX, IM) {
500 //d_sendData;
501 qf_queueMemoryFetchRequest;
502 e_ownerIsRequestor;
503 i_popIncomingRequestQueue;
504 }
505
506 transition(IM, Memory_Data, M) {
507 d_sendData;
508 //e_ownerIsRequestor;
509 l_popMemQueue;
510 }
511
512
513 transition(I, DMA_READ, ID) {
514 //dr_sendDMAData;
515 qf_queueMemoryFetchRequestDMA;
516 p_popIncomingDMARequestQueue;
517 }
518
519 transition(ID, Memory_Data, I) {
520 dr_sendDMAData;
521 //p_popIncomingDMARequestQueue;
522 l_popMemQueue;
523 }
524
525
526
527 transition(I, DMA_WRITE, ID_W) {
528 v_allocateTBE;
529 qw_queueMemoryWBRequest_partial;
530 p_popIncomingDMARequestQueue;
531 }
532
533 transition(ID_W, Memory_Ack, I) {
534 dwt_writeDMADataFromTBE;
535 da_sendDMAAck;
536 w_deallocateTBE;
537 l_popMemQueue;
538 }
539
540 transition(M, DMA_READ, M_DRD) {
541 inv_sendCacheInvalidate;
542 p_popIncomingDMARequestQueue;
543 }
544
545 transition(M_DRD, PUTX, I) {
546 drp_sendDMAData;
547 c_clearOwner;
548 a_sendWriteBackAck;
549 d_deallocateDirectory;
550 i_popIncomingRequestQueue;
551 }
552
553 transition(M, DMA_WRITE, M_DWR) {
554 v_allocateTBE;
555 inv_sendCacheInvalidate;
556 p_popIncomingDMARequestQueue;
557 }
558
559 transition(M_DWR, PUTX, M_DWRI) {
560 qw_queueMemoryWBRequest_partialTBE;
561 c_clearOwner;
562 i_popIncomingRequestQueue;
563 }
564
565 transition(M_DWRI, Memory_Ack, I) {
566 w_writeDataToMemoryFromTBE;
567 l_sendWriteBackAck;
568 da_sendDMAAck;
569 w_deallocateTBE;
570 d_deallocateDirectory;
571 l_popMemQueue;
572 }
573
574 transition(M, GETX, M) {
575 f_forwardRequest;
576 e_ownerIsRequestor;
577 i_popIncomingRequestQueue;
578 }
579
580 transition(M, PUTX, MI) {
581 c_clearOwner;
582 v_allocateTBEFromRequestNet;
583 l_queueMemoryWBRequest;
584 i_popIncomingRequestQueue;
585 }
586
587 transition(MI, Memory_Ack, I) {
588 w_writeDataToMemoryFromTBE;
589 l_sendWriteBackAck;
590 w_deallocateTBE;
591 d_deallocateDirectory;
592 l_popMemQueue;
593 }
594
595 transition(M, PUTX_NotOwner, M) {
596 b_sendWriteBackNack;
597 i_popIncomingRequestQueue;
598 }
599
600 transition(I, PUTX_NotOwner, I) {
601 b_sendWriteBackNack;
602 i_popIncomingRequestQueue;
603 }
604
605 }