ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1
2 machine(Directory, "Directory protocol")
3 : DirectoryMemory * directory,
4 MemoryControl * memBuffer,
5 int directory_latency = 12
6 {
7
8 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false";
9 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
10 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true";
11
12 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true";
13 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
14
15 // STATES
16 enumeration(State, desc="Directory states", default="Directory_State_I") {
17 // Base states
18 I, desc="Invalid";
19 M, desc="Modified";
20
21 M_DRD, desc="Blocked on an invalidation for a DMA read";
22 M_DWR, desc="Blocked on an invalidation for a DMA write";
23
24 M_DWRI, desc="Intermediate state M_DWR-->I";
25 M_DRDI, desc="Intermediate state M_DRD-->I";
26
27 IM, desc="Intermediate state I-->M";
28 MI, desc="Intermediate state M-->I";
29 ID, desc="Intermediate state for DMA_READ when in I";
30 ID_W, desc="Intermediate state for DMA_WRITE when in I";
31 }
32
33 // Events
34 enumeration(Event, desc="Directory events") {
35 // processor requests
36 GETX, desc="A GETX arrives";
37 GETS, desc="A GETS arrives";
38 PUTX, desc="A PUTX arrives";
39 PUTX_NotOwner, desc="A PUTX arrives";
40
41 // DMA requests
42 DMA_READ, desc="A DMA Read memory request";
43 DMA_WRITE, desc="A DMA Write memory request";
44
45 // Memory Controller
46 Memory_Data, desc="Fetched data from memory arrives";
47 Memory_Ack, desc="Writeback Ack from memory arrives";
48 }
49
50 // TYPES
51
52 // DirectoryEntry
53 structure(Entry, desc="...", interface="AbstractEntry") {
54 State DirectoryState, desc="Directory state";
55 DataBlock DataBlk, desc="data for the block";
56 NetDest Sharers, desc="Sharers for this block";
57 NetDest Owner, desc="Owner of this block";
58 }
59
60 // TBE entries for DMA requests
61 structure(TBE, desc="TBE entries for outstanding DMA requests") {
62 Address PhysicalAddress, desc="physical address";
63 State TBEState, desc="Transient State";
64 DataBlock DataBlk, desc="Data to be written (DMA write only)";
65 int Len, desc="...";
66 MachineID DmaRequestor, desc="DMA requestor";
67 }
68
69 external_type(TBETable) {
70 TBE lookup(Address);
71 void allocate(Address);
72 void deallocate(Address);
73 bool isPresent(Address);
74 }
75
76 // ** OBJECTS **
77 TBETable TBEs, template_hack="<Directory_TBE>";
78
79 void set_tbe(TBE b);
80 void unset_tbe();
81
82 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
83 return static_cast(Entry, directory[addr]);
84 }
85
86 State getState(TBE tbe, Address addr) {
87 if (is_valid(tbe)) {
88 return tbe.TBEState;
89 } else if (directory.isPresent(addr)) {
90 return getDirectoryEntry(addr).DirectoryState;
91 } else {
92 return State:I;
93 }
94 }
95
96 void setState(TBE tbe, Address addr, State state) {
97
98 if (is_valid(tbe)) {
99 tbe.TBEState := state;
100 }
101
102 if (directory.isPresent(addr)) {
103
104 if (state == State:M) {
105 assert(getDirectoryEntry(addr).Owner.count() == 1);
106 assert(getDirectoryEntry(addr).Sharers.count() == 0);
107 }
108
109 getDirectoryEntry(addr).DirectoryState := state;
110
111 if (state == State:I) {
112 assert(getDirectoryEntry(addr).Owner.count() == 0);
113 assert(getDirectoryEntry(addr).Sharers.count() == 0);
114 directory.invalidateBlock(addr);
115 }
116 }
117 }
118
119 // ** OUT_PORTS **
120 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
121 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
122 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
123 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
124
125 //added by SS
126 out_port(memQueue_out, MemoryMsg, memBuffer);
127 // ** IN_PORTS **
128
129 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
130 if (dmaRequestQueue_in.isReady()) {
131 peek(dmaRequestQueue_in, DMARequestMsg) {
132 TBE tbe := TBEs[in_msg.LineAddress];
133 if (in_msg.Type == DMARequestType:READ) {
134 trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
135 } else if (in_msg.Type == DMARequestType:WRITE) {
136 trigger(Event:DMA_WRITE, in_msg.LineAddress, tbe);
137 } else {
138 error("Invalid message");
139 }
140 }
141 }
142 }
143
144 in_port(requestQueue_in, RequestMsg, requestToDir) {
145 if (requestQueue_in.isReady()) {
146 peek(requestQueue_in, RequestMsg) {
147 TBE tbe := TBEs[in_msg.Address];
148 if (in_msg.Type == CoherenceRequestType:GETS) {
149 trigger(Event:GETS, in_msg.Address, tbe);
150 } else if (in_msg.Type == CoherenceRequestType:GETX) {
151 trigger(Event:GETX, in_msg.Address, tbe);
152 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
153 if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
154 trigger(Event:PUTX, in_msg.Address, tbe);
155 } else {
156 trigger(Event:PUTX_NotOwner, in_msg.Address, tbe);
157 }
158 } else {
159 error("Invalid message");
160 }
161 }
162 }
163 }
164
165 //added by SS
166 // off-chip memory request/response is done
167 in_port(memQueue_in, MemoryMsg, memBuffer) {
168 if (memQueue_in.isReady()) {
169 peek(memQueue_in, MemoryMsg) {
170 TBE tbe := TBEs[in_msg.Address];
171 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
172 trigger(Event:Memory_Data, in_msg.Address, tbe);
173 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
174 trigger(Event:Memory_Ack, in_msg.Address, tbe);
175 } else {
176 DPRINTF(RubySlicc,"%s\n", in_msg.Type);
177 error("Invalid message");
178 }
179 }
180 }
181 }
182
183 // Actions
184
185 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
186 peek(requestQueue_in, RequestMsg) {
187 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
188 out_msg.Address := address;
189 out_msg.Type := CoherenceRequestType:WB_ACK;
190 out_msg.Requestor := in_msg.Requestor;
191 out_msg.Destination.add(in_msg.Requestor);
192 out_msg.MessageSize := MessageSizeType:Writeback_Control;
193 }
194 }
195 }
196
197 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
198 peek(memQueue_in, MemoryMsg) {
199 enqueue(forwardNetwork_out, RequestMsg, latency="1") {
200 out_msg.Address := address;
201 out_msg.Type := CoherenceRequestType:WB_ACK;
202 out_msg.Requestor := in_msg.OriginalRequestorMachId;
203 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
204 out_msg.MessageSize := MessageSizeType:Writeback_Control;
205 }
206 }
207 }
208
209 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
210 peek(requestQueue_in, RequestMsg) {
211 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
212 out_msg.Address := address;
213 out_msg.Type := CoherenceRequestType:WB_NACK;
214 out_msg.Requestor := in_msg.Requestor;
215 out_msg.Destination.add(in_msg.Requestor);
216 out_msg.MessageSize := MessageSizeType:Writeback_Control;
217 }
218 }
219 }
220
221 action(c_clearOwner, "c", desc="Clear the owner field") {
222 getDirectoryEntry(address).Owner.clear();
223 }
224
225 action(d_sendData, "d", desc="Send data to requestor") {
226 peek(memQueue_in, MemoryMsg) {
227 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
228 out_msg.Address := address;
229 out_msg.Type := CoherenceResponseType:DATA;
230 out_msg.Sender := machineID;
231 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
232 out_msg.DataBlk := in_msg.DataBlk;
233 out_msg.MessageSize := MessageSizeType:Response_Data;
234 }
235 }
236 }
237
238 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
239 peek(memQueue_in, MemoryMsg) {
240 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
241 assert(is_valid(tbe));
242 out_msg.PhysicalAddress := address;
243 out_msg.LineAddress := address;
244 out_msg.Type := DMAResponseType:DATA;
245 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
246 out_msg.Destination.add(tbe.DmaRequestor);
247 out_msg.MessageSize := MessageSizeType:Response_Data;
248 }
249 }
250 }
251
252
253
254 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
255 peek(requestQueue_in, RequestMsg) {
256 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
257 assert(is_valid(tbe));
258 out_msg.PhysicalAddress := address;
259 out_msg.LineAddress := address;
260 out_msg.Type := DMAResponseType:DATA;
261 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
262 out_msg.Destination.add(tbe.DmaRequestor);
263 out_msg.MessageSize := MessageSizeType:Response_Data;
264 }
265 }
266 }
267
268 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
269 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
270 assert(is_valid(tbe));
271 out_msg.PhysicalAddress := address;
272 out_msg.LineAddress := address;
273 out_msg.Type := DMAResponseType:ACK;
274 out_msg.Destination.add(tbe.DmaRequestor);
275 out_msg.MessageSize := MessageSizeType:Writeback_Control;
276 }
277 }
278
279 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
280 peek(requestQueue_in, RequestMsg) {
281 getDirectoryEntry(address).Owner.clear();
282 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
283 }
284 }
285
286 action(f_forwardRequest, "f", desc="Forward request to owner") {
287 peek(requestQueue_in, RequestMsg) {
288 APPEND_TRANSITION_COMMENT("Own: ");
289 APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Address).Owner);
290 APPEND_TRANSITION_COMMENT("Req: ");
291 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
292 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
293 out_msg.Address := address;
294 out_msg.Type := in_msg.Type;
295 out_msg.Requestor := in_msg.Requestor;
296 out_msg.Destination := getDirectoryEntry(in_msg.Address).Owner;
297 out_msg.MessageSize := MessageSizeType:Writeback_Control;
298 }
299 }
300 }
301
302 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
303 peek(dmaRequestQueue_in, DMARequestMsg) {
304 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
305 out_msg.Address := address;
306 out_msg.Type := CoherenceRequestType:INV;
307 out_msg.Requestor := machineID;
308 out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
309 out_msg.MessageSize := MessageSizeType:Writeback_Control;
310 }
311 }
312 }
313
314 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
315 requestQueue_in.dequeue();
316 }
317
318 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
319 dmaRequestQueue_in.dequeue();
320 }
321
322 action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
323 peek(requestQueue_in, RequestMsg) {
324 // assert(in_msg.Dirty);
325 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
326 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
327 //getDirectoryEntry(in_msg.Address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
328 }
329 }
330
331 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
332 assert(is_valid(tbe));
333 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
334 }
335
336 action(v_allocateTBE, "v", desc="Allocate TBE") {
337 peek(dmaRequestQueue_in, DMARequestMsg) {
338 TBEs.allocate(address);
339 set_tbe(TBEs[address]);
340 tbe.DataBlk := in_msg.DataBlk;
341 tbe.PhysicalAddress := in_msg.PhysicalAddress;
342 tbe.Len := in_msg.Len;
343 tbe.DmaRequestor := in_msg.Requestor;
344 }
345 }
346
347 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
348 peek(dmaRequestQueue_in, DMARequestMsg) {
349 TBEs.allocate(address);
350 set_tbe(TBEs[address]);
351 tbe.DmaRequestor := in_msg.Requestor;
352 }
353 }
354
355 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
356 peek(requestQueue_in, RequestMsg) {
357 TBEs.allocate(address);
358 set_tbe(TBEs[address]);
359 tbe.DataBlk := in_msg.DataBlk;
360 }
361 }
362
363 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
364 TBEs.deallocate(address);
365 unset_tbe();
366 }
367
368 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
369 requestQueue_in.recycle();
370 }
371
372 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
373 dmaRequestQueue_in.recycle();
374 }
375
376
377 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
378 peek(requestQueue_in, RequestMsg) {
379 enqueue(memQueue_out, MemoryMsg, latency="1") {
380 out_msg.Address := address;
381 out_msg.Type := MemoryRequestType:MEMORY_READ;
382 out_msg.Sender := machineID;
383 out_msg.OriginalRequestorMachId := in_msg.Requestor;
384 out_msg.MessageSize := in_msg.MessageSize;
385 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
386 DPRINTF(RubySlicc,"%s\n", out_msg);
387 }
388 }
389 }
390
391 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
392 peek(dmaRequestQueue_in, DMARequestMsg) {
393 enqueue(memQueue_out, MemoryMsg, latency="1") {
394 out_msg.Address := address;
395 out_msg.Type := MemoryRequestType:MEMORY_READ;
396 out_msg.Sender := machineID;
397 //out_msg.OriginalRequestorMachId := machineID;
398 out_msg.MessageSize := in_msg.MessageSize;
399 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
400 DPRINTF(RubySlicc,"%s\n", out_msg);
401 }
402 }
403 }
404
405 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
406 peek(dmaRequestQueue_in, DMARequestMsg) {
407 enqueue(memQueue_out, MemoryMsg, latency="1") {
408 out_msg.Address := address;
409 out_msg.Type := MemoryRequestType:MEMORY_WB;
410 //out_msg.OriginalRequestorMachId := machineID;
411 //out_msg.DataBlk := in_msg.DataBlk;
412 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
413 out_msg.MessageSize := in_msg.MessageSize;
414 //out_msg.Prefetch := in_msg.Prefetch;
415
416 DPRINTF(RubySlicc,"%s\n", out_msg);
417 }
418 }
419 }
420
421 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
422 peek(requestQueue_in, RequestMsg) {
423 enqueue(memQueue_out, MemoryMsg, latency="1") {
424 assert(is_valid(tbe));
425 out_msg.Address := address;
426 out_msg.Type := MemoryRequestType:MEMORY_WB;
427 out_msg.OriginalRequestorMachId := in_msg.Requestor;
428 // get incoming data
429 // out_msg.DataBlk := in_msg.DataBlk;
430 out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
431 out_msg.MessageSize := in_msg.MessageSize;
432 //out_msg.Prefetch := in_msg.Prefetch;
433
434 DPRINTF(RubySlicc,"%s\n", out_msg);
435 }
436 }
437 }
438
439
440
441 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
442 peek(requestQueue_in, RequestMsg) {
443 enqueue(memQueue_out, MemoryMsg, latency="1") {
444 out_msg.Address := address;
445 out_msg.Type := MemoryRequestType:MEMORY_WB;
446 out_msg.Sender := machineID;
447 out_msg.OriginalRequestorMachId := in_msg.Requestor;
448 out_msg.DataBlk := in_msg.DataBlk;
449 out_msg.MessageSize := in_msg.MessageSize;
450 //out_msg.Prefetch := in_msg.Prefetch;
451
452 DPRINTF(RubySlicc,"%s\n", out_msg);
453 }
454 }
455 }
456
457 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
458 memQueue_in.dequeue();
459 }
460
461 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
462 //getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
463 assert(is_valid(tbe));
464 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk,
465 addressOffset(tbe.PhysicalAddress),
466 tbe.Len);
467
468 }
469
470 // TRANSITIONS
471
472 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
473 z_recycleRequestQueue;
474 }
475
476 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
477 z_recycleRequestQueue;
478 }
479
480 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
481 y_recycleDMARequestQueue;
482 }
483
484
485 transition(I, GETX, IM) {
486 //d_sendData;
487 qf_queueMemoryFetchRequest;
488 e_ownerIsRequestor;
489 i_popIncomingRequestQueue;
490 }
491
492 transition(IM, Memory_Data, M) {
493 d_sendData;
494 //e_ownerIsRequestor;
495 l_popMemQueue;
496 }
497
498
499 transition(I, DMA_READ, ID) {
500 //dr_sendDMAData;
501 r_allocateTbeForDmaRead;
502 qf_queueMemoryFetchRequestDMA;
503 p_popIncomingDMARequestQueue;
504 }
505
506 transition(ID, Memory_Data, I) {
507 dr_sendDMAData;
508 //p_popIncomingDMARequestQueue;
509 w_deallocateTBE;
510 l_popMemQueue;
511 }
512
513
514
515 transition(I, DMA_WRITE, ID_W) {
516 v_allocateTBE;
517 qw_queueMemoryWBRequest_partial;
518 p_popIncomingDMARequestQueue;
519 }
520
521 transition(ID_W, Memory_Ack, I) {
522 dwt_writeDMADataFromTBE;
523 da_sendDMAAck;
524 w_deallocateTBE;
525 l_popMemQueue;
526 }
527
528 transition(M, DMA_READ, M_DRD) {
529 v_allocateTBE;
530 inv_sendCacheInvalidate;
531 p_popIncomingDMARequestQueue;
532 }
533
534 transition(M_DRD, PUTX, M_DRDI) {
535 l_writeDataToMemory;
536 drp_sendDMAData;
537 c_clearOwner;
538 l_queueMemoryWBRequest;
539 i_popIncomingRequestQueue;
540 }
541
542 transition(M_DRDI, Memory_Ack, I) {
543 l_sendWriteBackAck;
544 w_deallocateTBE;
545 l_popMemQueue;
546 }
547
548
549 transition(M, DMA_WRITE, M_DWR) {
550 v_allocateTBE;
551 inv_sendCacheInvalidate;
552 p_popIncomingDMARequestQueue;
553 }
554
555 transition(M_DWR, PUTX, M_DWRI) {
556 l_writeDataToMemory;
557 qw_queueMemoryWBRequest_partialTBE;
558 c_clearOwner;
559 i_popIncomingRequestQueue;
560 }
561
562 transition(M_DWRI, Memory_Ack, I) {
563 w_writeDataToMemoryFromTBE;
564 l_sendWriteBackAck;
565 da_sendDMAAck;
566 w_deallocateTBE;
567 l_popMemQueue;
568 }
569
570 transition(M, GETX, M) {
571 f_forwardRequest;
572 e_ownerIsRequestor;
573 i_popIncomingRequestQueue;
574 }
575
576 transition(M, PUTX, MI) {
577 l_writeDataToMemory;
578 c_clearOwner;
579 v_allocateTBEFromRequestNet;
580 l_queueMemoryWBRequest;
581 i_popIncomingRequestQueue;
582 }
583
584 transition(MI, Memory_Ack, I) {
585 w_writeDataToMemoryFromTBE;
586 l_sendWriteBackAck;
587 w_deallocateTBE;
588 l_popMemQueue;
589 }
590
591 transition(M, PUTX_NotOwner, M) {
592 b_sendWriteBackNack;
593 i_popIncomingRequestQueue;
594 }
595
596 transition(I, PUTX_NotOwner, I) {
597 b_sendWriteBackNack;
598 i_popIncomingRequestQueue;
599 }
600
601 }