This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1
2 machine(Directory, "Directory protocol")
3 : DirectoryMemory * directory,
4 MemoryControl * memBuffer,
5 int directory_latency = 12
6 {
7
8 MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false";
9 MessageBuffer responseFromDir, network="To", virtual_network="4", ordered="false";
10 MessageBuffer dmaResponseFromDir, network="To", virtual_network="1", ordered="true";
11
12 MessageBuffer requestToDir, network="From", virtual_network="2", ordered="true";
13 MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
14
15 // STATES
16 enumeration(State, desc="Directory states", default="Directory_State_I") {
17 // Base states
18 I, desc="Invalid";
19 M, desc="Modified";
20
21 M_DRD, desc="Blocked on an invalidation for a DMA read";
22 M_DWR, desc="Blocked on an invalidation for a DMA write";
23
24 M_DWRI, desc="Intermediate state M_DWR-->I";
25 M_DRDI, desc="Intermediate state M_DRD-->I";
26
27 IM, desc="Intermediate state I-->M";
28 MI, desc="Intermediate state M-->I";
29 ID, desc="Intermediate state for DMA_READ when in I";
30 ID_W, desc="Intermediate state for DMA_WRITE when in I";
31 }
32
33 // Events
34 enumeration(Event, desc="Directory events") {
35 // processor requests
36 GETX, desc="A GETX arrives";
37 GETS, desc="A GETS arrives";
38 PUTX, desc="A PUTX arrives";
39 PUTX_NotOwner, desc="A PUTX arrives";
40
41 // DMA requests
42 DMA_READ, desc="A DMA Read memory request";
43 DMA_WRITE, desc="A DMA Write memory request";
44
45 // Memory Controller
46 Memory_Data, desc="Fetched data from memory arrives";
47 Memory_Ack, desc="Writeback Ack from memory arrives";
48 }
49
50 // TYPES
51
52 // DirectoryEntry
53 structure(Entry, desc="...", interface="AbstractEntry") {
54 State DirectoryState, desc="Directory state";
55 DataBlock DataBlk, desc="data for the block";
56 NetDest Sharers, desc="Sharers for this block";
57 NetDest Owner, desc="Owner of this block";
58 }
59
60 // TBE entries for DMA requests
61 structure(TBE, desc="TBE entries for outstanding DMA requests") {
62 Address PhysicalAddress, desc="physical address";
63 State TBEState, desc="Transient State";
64 DataBlock DataBlk, desc="Data to be written (DMA write only)";
65 int Len, desc="...";
66 MachineID DmaRequestor, desc="DMA requestor";
67 }
68
69 external_type(TBETable) {
70 TBE lookup(Address);
71 void allocate(Address);
72 void deallocate(Address);
73 bool isPresent(Address);
74 }
75
76 // ** OBJECTS **
77 TBETable TBEs, template_hack="<Directory_TBE>";
78
79 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
80 return static_cast(Entry, directory[addr]);
81 }
82
83 State getState(Address addr) {
84 if (TBEs.isPresent(addr)) {
85 return TBEs[addr].TBEState;
86 } else if (directory.isPresent(addr)) {
87 return getDirectoryEntry(addr).DirectoryState;
88 } else {
89 return State:I;
90 }
91 }
92
93 void setState(Address addr, State state) {
94
95 if (TBEs.isPresent(addr)) {
96 TBEs[addr].TBEState := state;
97 }
98
99 if (directory.isPresent(addr)) {
100
101 if (state == State:M) {
102 assert(getDirectoryEntry(addr).Owner.count() == 1);
103 assert(getDirectoryEntry(addr).Sharers.count() == 0);
104 }
105
106 getDirectoryEntry(addr).DirectoryState := state;
107
108 if (state == State:I) {
109 assert(getDirectoryEntry(addr).Owner.count() == 0);
110 assert(getDirectoryEntry(addr).Sharers.count() == 0);
111 directory.invalidateBlock(addr);
112 }
113 }
114 }
115
116 // ** OUT_PORTS **
117 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
118 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
119 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
120 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
121
122 //added by SS
123 out_port(memQueue_out, MemoryMsg, memBuffer);
124 // ** IN_PORTS **
125
126 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
127 if (dmaRequestQueue_in.isReady()) {
128 peek(dmaRequestQueue_in, DMARequestMsg) {
129 if (in_msg.Type == DMARequestType:READ) {
130 trigger(Event:DMA_READ, in_msg.LineAddress);
131 } else if (in_msg.Type == DMARequestType:WRITE) {
132 trigger(Event:DMA_WRITE, in_msg.LineAddress);
133 } else {
134 error("Invalid message");
135 }
136 }
137 }
138 }
139
140 in_port(requestQueue_in, RequestMsg, requestToDir) {
141 if (requestQueue_in.isReady()) {
142 peek(requestQueue_in, RequestMsg) {
143 if (in_msg.Type == CoherenceRequestType:GETS) {
144 trigger(Event:GETS, in_msg.Address);
145 } else if (in_msg.Type == CoherenceRequestType:GETX) {
146 trigger(Event:GETX, in_msg.Address);
147 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
148 if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
149 trigger(Event:PUTX, in_msg.Address);
150 } else {
151 trigger(Event:PUTX_NotOwner, in_msg.Address);
152 }
153 } else {
154 error("Invalid message");
155 }
156 }
157 }
158 }
159
160 //added by SS
161 // off-chip memory request/response is done
162 in_port(memQueue_in, MemoryMsg, memBuffer) {
163 if (memQueue_in.isReady()) {
164 peek(memQueue_in, MemoryMsg) {
165 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
166 trigger(Event:Memory_Data, in_msg.Address);
167 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
168 trigger(Event:Memory_Ack, in_msg.Address);
169 } else {
170 DPRINTF(RubySlicc,"%s\n", in_msg.Type);
171 error("Invalid message");
172 }
173 }
174 }
175 }
176
177 // Actions
178
179 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
180 peek(requestQueue_in, RequestMsg) {
181 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
182 out_msg.Address := address;
183 out_msg.Type := CoherenceRequestType:WB_ACK;
184 out_msg.Requestor := in_msg.Requestor;
185 out_msg.Destination.add(in_msg.Requestor);
186 out_msg.MessageSize := MessageSizeType:Writeback_Control;
187 }
188 }
189 }
190
191 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
192 peek(memQueue_in, MemoryMsg) {
193 enqueue(forwardNetwork_out, RequestMsg, latency="1") {
194 out_msg.Address := address;
195 out_msg.Type := CoherenceRequestType:WB_ACK;
196 out_msg.Requestor := in_msg.OriginalRequestorMachId;
197 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
198 out_msg.MessageSize := MessageSizeType:Writeback_Control;
199 }
200 }
201 }
202
203 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
204 peek(requestQueue_in, RequestMsg) {
205 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
206 out_msg.Address := address;
207 out_msg.Type := CoherenceRequestType:WB_NACK;
208 out_msg.Requestor := in_msg.Requestor;
209 out_msg.Destination.add(in_msg.Requestor);
210 out_msg.MessageSize := MessageSizeType:Writeback_Control;
211 }
212 }
213 }
214
215 action(c_clearOwner, "c", desc="Clear the owner field") {
216 getDirectoryEntry(address).Owner.clear();
217 }
218
219 action(d_sendData, "d", desc="Send data to requestor") {
220 peek(memQueue_in, MemoryMsg) {
221 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
222 out_msg.Address := address;
223 out_msg.Type := CoherenceResponseType:DATA;
224 out_msg.Sender := machineID;
225 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
226 out_msg.DataBlk := in_msg.DataBlk;
227 out_msg.MessageSize := MessageSizeType:Response_Data;
228 }
229 }
230 }
231
232 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
233 peek(memQueue_in, MemoryMsg) {
234 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
235 out_msg.PhysicalAddress := address;
236 out_msg.LineAddress := address;
237 out_msg.Type := DMAResponseType:DATA;
238 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
239 out_msg.Destination.add(TBEs[address].DmaRequestor);
240 out_msg.MessageSize := MessageSizeType:Response_Data;
241 }
242 }
243 }
244
245
246
247 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
248 peek(requestQueue_in, RequestMsg) {
249 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
250 out_msg.PhysicalAddress := address;
251 out_msg.LineAddress := address;
252 out_msg.Type := DMAResponseType:DATA;
253 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
254 out_msg.Destination.add(TBEs[address].DmaRequestor);
255 out_msg.MessageSize := MessageSizeType:Response_Data;
256 }
257 }
258 }
259
260 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
261 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
262 out_msg.PhysicalAddress := address;
263 out_msg.LineAddress := address;
264 out_msg.Type := DMAResponseType:ACK;
265 out_msg.Destination.add(TBEs[address].DmaRequestor);
266 out_msg.MessageSize := MessageSizeType:Writeback_Control;
267 }
268 }
269
270 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
271 peek(requestQueue_in, RequestMsg) {
272 getDirectoryEntry(address).Owner.clear();
273 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
274 }
275 }
276
277 action(f_forwardRequest, "f", desc="Forward request to owner") {
278 peek(requestQueue_in, RequestMsg) {
279 APPEND_TRANSITION_COMMENT("Own: ");
280 APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Address).Owner);
281 APPEND_TRANSITION_COMMENT("Req: ");
282 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
283 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
284 out_msg.Address := address;
285 out_msg.Type := in_msg.Type;
286 out_msg.Requestor := in_msg.Requestor;
287 out_msg.Destination := getDirectoryEntry(in_msg.Address).Owner;
288 out_msg.MessageSize := MessageSizeType:Writeback_Control;
289 }
290 }
291 }
292
293 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
294 peek(dmaRequestQueue_in, DMARequestMsg) {
295 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
296 out_msg.Address := address;
297 out_msg.Type := CoherenceRequestType:INV;
298 out_msg.Requestor := machineID;
299 out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
300 out_msg.MessageSize := MessageSizeType:Writeback_Control;
301 }
302 }
303 }
304
305 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
306 requestQueue_in.dequeue();
307 }
308
309 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
310 dmaRequestQueue_in.dequeue();
311 }
312
313 action(l_writeDataToMemory, "pl", desc="Write PUTX data to memory") {
314 peek(requestQueue_in, RequestMsg) {
315 // assert(in_msg.Dirty);
316 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
317 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
318 //getDirectoryEntry(in_msg.Address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
319 }
320 }
321
322 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
323 getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
324 }
325
326 action(v_allocateTBE, "v", desc="Allocate TBE") {
327 peek(dmaRequestQueue_in, DMARequestMsg) {
328 TBEs.allocate(address);
329 TBEs[address].DataBlk := in_msg.DataBlk;
330 TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
331 TBEs[address].Len := in_msg.Len;
332 TBEs[address].DmaRequestor := in_msg.Requestor;
333 }
334 }
335
336 action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
337 peek(dmaRequestQueue_in, DMARequestMsg) {
338 TBEs.allocate(address);
339 TBEs[address].DmaRequestor := in_msg.Requestor;
340 }
341 }
342
343 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
344 peek(requestQueue_in, RequestMsg) {
345 TBEs.allocate(address);
346 TBEs[address].DataBlk := in_msg.DataBlk;
347 }
348 }
349
350 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
351 TBEs.deallocate(address);
352 }
353
354 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
355 requestQueue_in.recycle();
356 }
357
358 action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
359 dmaRequestQueue_in.recycle();
360 }
361
362
363 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
364 peek(requestQueue_in, RequestMsg) {
365 enqueue(memQueue_out, MemoryMsg, latency="1") {
366 out_msg.Address := address;
367 out_msg.Type := MemoryRequestType:MEMORY_READ;
368 out_msg.Sender := machineID;
369 out_msg.OriginalRequestorMachId := in_msg.Requestor;
370 out_msg.MessageSize := in_msg.MessageSize;
371 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
372 DPRINTF(RubySlicc,"%s\n", out_msg);
373 }
374 }
375 }
376
377 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
378 peek(dmaRequestQueue_in, DMARequestMsg) {
379 enqueue(memQueue_out, MemoryMsg, latency="1") {
380 out_msg.Address := address;
381 out_msg.Type := MemoryRequestType:MEMORY_READ;
382 out_msg.Sender := machineID;
383 //out_msg.OriginalRequestorMachId := machineID;
384 out_msg.MessageSize := in_msg.MessageSize;
385 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
386 DPRINTF(RubySlicc,"%s\n", out_msg);
387 }
388 }
389 }
390
391 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
392 peek(dmaRequestQueue_in, DMARequestMsg) {
393 enqueue(memQueue_out, MemoryMsg, latency="1") {
394 out_msg.Address := address;
395 out_msg.Type := MemoryRequestType:MEMORY_WB;
396 //out_msg.OriginalRequestorMachId := machineID;
397 //out_msg.DataBlk := in_msg.DataBlk;
398 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
399 out_msg.MessageSize := in_msg.MessageSize;
400 //out_msg.Prefetch := in_msg.Prefetch;
401
402 DPRINTF(RubySlicc,"%s\n", out_msg);
403 }
404 }
405 }
406
407 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
408 peek(requestQueue_in, RequestMsg) {
409 enqueue(memQueue_out, MemoryMsg, latency="1") {
410 out_msg.Address := address;
411 out_msg.Type := MemoryRequestType:MEMORY_WB;
412 out_msg.OriginalRequestorMachId := in_msg.Requestor;
413 // get incoming data
414 // out_msg.DataBlk := in_msg.DataBlk;
415 out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
416 out_msg.MessageSize := in_msg.MessageSize;
417 //out_msg.Prefetch := in_msg.Prefetch;
418
419 DPRINTF(RubySlicc,"%s\n", out_msg);
420 }
421 }
422 }
423
424
425
426 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
427 peek(requestQueue_in, RequestMsg) {
428 enqueue(memQueue_out, MemoryMsg, latency="1") {
429 out_msg.Address := address;
430 out_msg.Type := MemoryRequestType:MEMORY_WB;
431 out_msg.Sender := machineID;
432 out_msg.OriginalRequestorMachId := in_msg.Requestor;
433 out_msg.DataBlk := in_msg.DataBlk;
434 out_msg.MessageSize := in_msg.MessageSize;
435 //out_msg.Prefetch := in_msg.Prefetch;
436
437 DPRINTF(RubySlicc,"%s\n", out_msg);
438 }
439 }
440 }
441
442 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
443 memQueue_in.dequeue();
444 }
445
446 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
447 //getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
448 getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk,
449 addressOffset(TBEs[address].PhysicalAddress),
450 TBEs[address].Len);
451
452 }
453
454 // TRANSITIONS
455
456 transition({M_DRD, M_DWR, M_DWRI, M_DRDI}, GETX) {
457 z_recycleRequestQueue;
458 }
459
460 transition({IM, MI, ID, ID_W}, {GETX, GETS, PUTX, PUTX_NotOwner} ) {
461 z_recycleRequestQueue;
462 }
463
464 transition({IM, MI, ID, ID_W}, {DMA_READ, DMA_WRITE} ) {
465 y_recycleDMARequestQueue;
466 }
467
468
469 transition(I, GETX, IM) {
470 //d_sendData;
471 qf_queueMemoryFetchRequest;
472 e_ownerIsRequestor;
473 i_popIncomingRequestQueue;
474 }
475
476 transition(IM, Memory_Data, M) {
477 d_sendData;
478 //e_ownerIsRequestor;
479 l_popMemQueue;
480 }
481
482
483 transition(I, DMA_READ, ID) {
484 //dr_sendDMAData;
485 r_allocateTbeForDmaRead;
486 qf_queueMemoryFetchRequestDMA;
487 p_popIncomingDMARequestQueue;
488 }
489
490 transition(ID, Memory_Data, I) {
491 dr_sendDMAData;
492 //p_popIncomingDMARequestQueue;
493 w_deallocateTBE;
494 l_popMemQueue;
495 }
496
497
498
499 transition(I, DMA_WRITE, ID_W) {
500 v_allocateTBE;
501 qw_queueMemoryWBRequest_partial;
502 p_popIncomingDMARequestQueue;
503 }
504
505 transition(ID_W, Memory_Ack, I) {
506 dwt_writeDMADataFromTBE;
507 da_sendDMAAck;
508 w_deallocateTBE;
509 l_popMemQueue;
510 }
511
512 transition(M, DMA_READ, M_DRD) {
513 v_allocateTBE;
514 inv_sendCacheInvalidate;
515 p_popIncomingDMARequestQueue;
516 }
517
518 transition(M_DRD, PUTX, M_DRDI) {
519 l_writeDataToMemory;
520 drp_sendDMAData;
521 c_clearOwner;
522 l_queueMemoryWBRequest;
523 i_popIncomingRequestQueue;
524 }
525
526 transition(M_DRDI, Memory_Ack, I) {
527 l_sendWriteBackAck;
528 w_deallocateTBE;
529 l_popMemQueue;
530 }
531
532
533 transition(M, DMA_WRITE, M_DWR) {
534 v_allocateTBE;
535 inv_sendCacheInvalidate;
536 p_popIncomingDMARequestQueue;
537 }
538
539 transition(M_DWR, PUTX, M_DWRI) {
540 l_writeDataToMemory;
541 qw_queueMemoryWBRequest_partialTBE;
542 c_clearOwner;
543 i_popIncomingRequestQueue;
544 }
545
546 transition(M_DWRI, Memory_Ack, I) {
547 w_writeDataToMemoryFromTBE;
548 l_sendWriteBackAck;
549 da_sendDMAAck;
550 w_deallocateTBE;
551 l_popMemQueue;
552 }
553
554 transition(M, GETX, M) {
555 f_forwardRequest;
556 e_ownerIsRequestor;
557 i_popIncomingRequestQueue;
558 }
559
560 transition(M, PUTX, MI) {
561 l_writeDataToMemory;
562 c_clearOwner;
563 v_allocateTBEFromRequestNet;
564 l_queueMemoryWBRequest;
565 i_popIncomingRequestQueue;
566 }
567
568 transition(MI, Memory_Ack, I) {
569 w_writeDataToMemoryFromTBE;
570 l_sendWriteBackAck;
571 w_deallocateTBE;
572 l_popMemQueue;
573 }
574
575 transition(M, PUTX_NotOwner, M) {
576 b_sendWriteBackNack;
577 i_popIncomingRequestQueue;
578 }
579
580 transition(I, PUTX_NotOwner, I) {
581 b_sendWriteBackNack;
582 i_popIncomingRequestQueue;
583 }
584
585 }