merged Tushar's bug fix with public repository changes
[gem5.git] / src / mem / protocol / MI_example-dir.sm
1
2 machine(Directory, "Directory protocol")
3 : int directory_latency,
4 int dma_select_low_bit,
5 int dma_select_num_bits
6 {
7
8 MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
9 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
10 MessageBuffer dmaRequestFromDir, network="To", virtual_network="4", ordered="true";
11
12 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="true";
13 MessageBuffer dmaRequestToDir, network="From", virtual_network="5", ordered="true";
14
15 // STATES
16 enumeration(State, desc="Directory states", default="Directory_State_I") {
17 // Base states
18 I, desc="Invalid";
19 M, desc="Modified";
20
21 M_DRD, desc="Blocked on an invalidation for a DMA read";
22 M_DWR, desc="Blocked on an invalidation for a DMA write";
23
24 M_DWRI, desc="Intermediate state M_DWR-->I";
25
26 IM, desc="Intermediate state I-->M";
27 MI, desc="Intermediate state M-->I";
28 ID, desc="Intermediate state for DMA_READ when in I";
29 ID_W, desc="Intermediate state for DMA_WRITE when in I";
30 }
31
32 // Events
33 enumeration(Event, desc="Directory events") {
34 // processor requests
35 GETX, desc="A GETX arrives";
36 GETS, desc="A GETS arrives";
37 PUTX, desc="A PUTX arrives";
38 PUTX_NotOwner, desc="A PUTX arrives";
39
40 // DMA requests
41 DMA_READ, desc="A DMA Read memory request";
42 DMA_WRITE, desc="A DMA Write memory request";
43
44 // Memory Controller
45 Memory_Data, desc="Fetched data from memory arrives";
46 Memory_Ack, desc="Writeback Ack from memory arrives";
47 }
48
49 // TYPES
50
51 // DirectoryEntry
52 structure(Entry, desc="...") {
53 State DirectoryState, desc="Directory state";
54 DataBlock DataBlk, desc="data for the block";
55 NetDest Sharers, desc="Sharers for this block";
56 NetDest Owner, desc="Owner of this block";
57 }
58
59 external_type(DirectoryMemory) {
60 Entry lookup(Address);
61 bool isPresent(Address);
62 void invalidateBlock(Address);
63 }
64
65 external_type(MemoryControl, inport="yes", outport="yes") {
66
67 }
68
69
70 // TBE entries for DMA requests
71 structure(TBE, desc="TBE entries for outstanding DMA requests") {
72 Address PhysicalAddress, desc="physical address";
73 State TBEState, desc="Transient State";
74 DataBlock DataBlk, desc="Data to be written (DMA write only)";
75 int Len, desc="...";
76 }
77
78 external_type(TBETable) {
79 TBE lookup(Address);
80 void allocate(Address);
81 void deallocate(Address);
82 bool isPresent(Address);
83 }
84
85 // ** OBJECTS **
86 DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
87
88 MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
89
90 TBETable TBEs, template_hack="<Directory_TBE>";
91
92 State getState(Address addr) {
93 if (TBEs.isPresent(addr)) {
94 return TBEs[addr].TBEState;
95 } else if (directory.isPresent(addr)) {
96 return directory[addr].DirectoryState;
97 } else {
98 return State:I;
99 }
100 }
101
102 void setState(Address addr, State state) {
103
104 if (TBEs.isPresent(addr)) {
105 TBEs[addr].TBEState := state;
106 }
107
108 if (directory.isPresent(addr)) {
109
110 if (state == State:I) {
111 assert(directory[addr].Owner.count() == 0);
112 assert(directory[addr].Sharers.count() == 0);
113 } else if (state == State:M) {
114 assert(directory[addr].Owner.count() == 1);
115 assert(directory[addr].Sharers.count() == 0);
116 }
117
118 directory[addr].DirectoryState := state;
119 }
120 }
121
122 // ** OUT_PORTS **
123 out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
124 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
125 out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
126 out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaRequestFromDir);
127
128 //added by SS
129 out_port(memQueue_out, MemoryMsg, memBuffer);
130 // ** IN_PORTS **
131
132 in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
133 if (dmaRequestQueue_in.isReady()) {
134 peek(dmaRequestQueue_in, DMARequestMsg) {
135 if (in_msg.Type == DMARequestType:READ) {
136 trigger(Event:DMA_READ, in_msg.LineAddress);
137 } else if (in_msg.Type == DMARequestType:WRITE) {
138 trigger(Event:DMA_WRITE, in_msg.LineAddress);
139 } else {
140 error("Invalid message");
141 }
142 }
143 }
144 }
145
146 in_port(requestQueue_in, RequestMsg, requestToDir) {
147 if (requestQueue_in.isReady()) {
148 peek(requestQueue_in, RequestMsg) {
149 if (in_msg.Type == CoherenceRequestType:GETS) {
150 trigger(Event:GETS, in_msg.Address);
151 } else if (in_msg.Type == CoherenceRequestType:GETX) {
152 trigger(Event:GETX, in_msg.Address);
153 } else if (in_msg.Type == CoherenceRequestType:PUTX) {
154 if (directory[in_msg.Address].Owner.isElement(in_msg.Requestor)) {
155 trigger(Event:PUTX, in_msg.Address);
156 } else {
157 trigger(Event:PUTX_NotOwner, in_msg.Address);
158 }
159 } else {
160 error("Invalid message");
161 }
162 }
163 }
164 }
165
166 //added by SS
167 // off-chip memory request/response is done
168 in_port(memQueue_in, MemoryMsg, memBuffer) {
169 if (memQueue_in.isReady()) {
170 peek(memQueue_in, MemoryMsg) {
171 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
172 trigger(Event:Memory_Data, in_msg.Address);
173 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
174 trigger(Event:Memory_Ack, in_msg.Address);
175 } else {
176 DEBUG_EXPR(in_msg.Type);
177 error("Invalid message");
178 }
179 }
180 }
181 }
182
183 // Actions
184
185 action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
186 peek(requestQueue_in, RequestMsg) {
187 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
188 out_msg.Address := address;
189 out_msg.Type := CoherenceRequestType:WB_ACK;
190 out_msg.Requestor := in_msg.Requestor;
191 out_msg.Destination.add(in_msg.Requestor);
192 out_msg.MessageSize := MessageSizeType:Writeback_Control;
193 }
194 }
195 }
196
197 action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
198 peek(memQueue_in, MemoryMsg) {
199 enqueue(forwardNetwork_out, RequestMsg, latency="1") {
200 out_msg.Address := address;
201 out_msg.Type := CoherenceRequestType:WB_ACK;
202 out_msg.Requestor := in_msg.OriginalRequestorMachId;
203 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
204 out_msg.MessageSize := MessageSizeType:Writeback_Control;
205 }
206 }
207 }
208
209 action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
210 peek(requestQueue_in, RequestMsg) {
211 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
212 out_msg.Address := address;
213 out_msg.Type := CoherenceRequestType:WB_NACK;
214 out_msg.Requestor := in_msg.Requestor;
215 out_msg.Destination.add(in_msg.Requestor);
216 out_msg.MessageSize := MessageSizeType:Writeback_Control;
217 }
218 }
219 }
220
221 action(c_clearOwner, "c", desc="Clear the owner field") {
222 directory[address].Owner.clear();
223 }
224
225 action(d_sendData, "d", desc="Send data to requestor") {
226 peek(memQueue_in, MemoryMsg) {
227 enqueue(responseNetwork_out, ResponseMsg, latency="1") {
228 out_msg.Address := address;
229 out_msg.Type := CoherenceResponseType:DATA;
230 out_msg.Sender := machineID;
231 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
232 out_msg.DataBlk := in_msg.DataBlk;
233 out_msg.MessageSize := MessageSizeType:Response_Data;
234 }
235 }
236 }
237
238 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
239 peek(memQueue_in, MemoryMsg) {
240 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
241 out_msg.PhysicalAddress := address;
242 out_msg.LineAddress := address;
243 out_msg.Type := DMAResponseType:DATA;
244 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
245 out_msg.Destination.add(mapAddressToRange(address, MachineType:DMA,
246 dma_select_low_bit, dma_select_num_bits));
247 out_msg.MessageSize := MessageSizeType:Response_Data;
248 }
249 }
250 }
251
252
253
254 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
255 peek(requestQueue_in, RequestMsg) {
256 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
257 out_msg.PhysicalAddress := address;
258 out_msg.LineAddress := address;
259 out_msg.Type := DMAResponseType:DATA;
260 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
261 out_msg.Destination.add(mapAddressToRange(address, MachineType:DMA,
262 dma_select_low_bit, dma_select_num_bits));
263 out_msg.MessageSize := MessageSizeType:Response_Data;
264 }
265 }
266 }
267
268 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
269 enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
270 out_msg.PhysicalAddress := address;
271 out_msg.LineAddress := address;
272 out_msg.Type := DMAResponseType:ACK;
273 out_msg.Destination.add(mapAddressToRange(address, MachineType:DMA,
274 dma_select_low_bit, dma_select_num_bits));
275 out_msg.MessageSize := MessageSizeType:Writeback_Control;
276 }
277 }
278
279 action(d_deallocateDirectory, "\d", desc="Deallocate Directory Entry") {
280 directory.invalidateBlock(address);
281 }
282
283 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
284 peek(requestQueue_in, RequestMsg) {
285 directory[address].Owner.clear();
286 directory[address].Owner.add(in_msg.Requestor);
287 }
288 }
289
290 action(f_forwardRequest, "f", desc="Forward request to owner") {
291 peek(requestQueue_in, RequestMsg) {
292 APPEND_TRANSITION_COMMENT("Own: ");
293 APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
294 APPEND_TRANSITION_COMMENT("Req: ");
295 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
296 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
297 out_msg.Address := address;
298 out_msg.Type := in_msg.Type;
299 out_msg.Requestor := in_msg.Requestor;
300 out_msg.Destination := directory[in_msg.Address].Owner;
301 out_msg.MessageSize := MessageSizeType:Writeback_Control;
302 }
303 }
304 }
305
306 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
307 peek(dmaRequestQueue_in, DMARequestMsg) {
308 enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
309 out_msg.Address := address;
310 out_msg.Type := CoherenceRequestType:INV;
311 out_msg.Requestor := machineID;
312 out_msg.Destination := directory[in_msg.PhysicalAddress].Owner;
313 out_msg.MessageSize := MessageSizeType:Writeback_Control;
314 }
315 }
316 }
317
318 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
319 requestQueue_in.dequeue();
320 }
321
322 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
323 dmaRequestQueue_in.dequeue();
324 }
325
326 action(l_writeDataToMemory, "l", desc="Write PUTX data to memory") {
327 peek(requestQueue_in, RequestMsg) {
328 // assert(in_msg.Dirty);
329 // assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
330 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
331 DEBUG_EXPR(in_msg.Address);
332 DEBUG_EXPR(in_msg.DataBlk);
333 }
334 }
335
336 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
337 directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
338 }
339
340 action(v_allocateTBE, "v", desc="Allocate TBE") {
341 peek(dmaRequestQueue_in, DMARequestMsg) {
342 TBEs.allocate(address);
343 TBEs[address].DataBlk := in_msg.DataBlk;
344 TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
345 TBEs[address].Len := in_msg.Len;
346 }
347 }
348
349 action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
350 peek(requestQueue_in, RequestMsg) {
351 TBEs.allocate(address);
352 TBEs[address].DataBlk := in_msg.DataBlk;
353 }
354 }
355
356 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
357 TBEs.deallocate(address);
358 }
359
360 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
361 requestQueue_in.recycle();
362 }
363
364
365 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
366 peek(requestQueue_in, RequestMsg) {
367 enqueue(memQueue_out, MemoryMsg, latency="1") {
368 out_msg.Address := address;
369 out_msg.Type := MemoryRequestType:MEMORY_READ;
370 out_msg.Sender := machineID;
371 out_msg.OriginalRequestorMachId := in_msg.Requestor;
372 out_msg.MessageSize := in_msg.MessageSize;
373 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
374 DEBUG_EXPR(out_msg);
375 }
376 }
377 }
378
379 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
380 peek(dmaRequestQueue_in, DMARequestMsg) {
381 enqueue(memQueue_out, MemoryMsg, latency="1") {
382 out_msg.Address := address;
383 out_msg.Type := MemoryRequestType:MEMORY_READ;
384 out_msg.Sender := machineID;
385 //out_msg.OriginalRequestorMachId := machineID;
386 out_msg.MessageSize := in_msg.MessageSize;
387 out_msg.DataBlk := directory[address].DataBlk;
388 DEBUG_EXPR(out_msg);
389 }
390 }
391 }
392
393 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
394 peek(dmaRequestQueue_in, DMARequestMsg) {
395 enqueue(memQueue_out, MemoryMsg, latency="1") {
396 out_msg.Address := address;
397 out_msg.Type := MemoryRequestType:MEMORY_WB;
398 //out_msg.OriginalRequestorMachId := machineID;
399 //out_msg.DataBlk := in_msg.DataBlk;
400 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
401 out_msg.MessageSize := in_msg.MessageSize;
402 //out_msg.Prefetch := in_msg.Prefetch;
403
404 DEBUG_EXPR(out_msg);
405 }
406 }
407 }
408
409 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
410 peek(requestQueue_in, RequestMsg) {
411 enqueue(memQueue_out, MemoryMsg, latency="1") {
412 out_msg.Address := address;
413 out_msg.Type := MemoryRequestType:MEMORY_WB;
414 out_msg.OriginalRequestorMachId := in_msg.Requestor;
415 //out_msg.DataBlk := in_msg.DataBlk;
416 out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
417 out_msg.MessageSize := in_msg.MessageSize;
418 //out_msg.Prefetch := in_msg.Prefetch;
419
420 DEBUG_EXPR(out_msg);
421 }
422 }
423 }
424
425
426
427 action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
428 peek(requestQueue_in, RequestMsg) {
429 enqueue(memQueue_out, MemoryMsg, latency="1") {
430 out_msg.Address := address;
431 out_msg.Type := MemoryRequestType:MEMORY_WB;
432 out_msg.OriginalRequestorMachId := in_msg.Requestor;
433 out_msg.DataBlk := in_msg.DataBlk;
434 out_msg.MessageSize := in_msg.MessageSize;
435 //out_msg.Prefetch := in_msg.Prefetch;
436
437 DEBUG_EXPR(out_msg);
438 }
439 }
440 }
441
442 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
443 memQueue_in.dequeue();
444 }
445
446 action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
447 directory[address].DataBlk := TBEs[address].DataBlk;
448 }
449
450 // TRANSITIONS
451
452 transition({M_DRD, M_DWR}, GETX) {
453 z_recycleRequestQueue;
454 }
455
456 transition({IM, MI, ID, ID_W}, {GETX, GETS, DMA_READ, DMA_WRITE, PUTX, PUTX_NotOwner} ) {
457 z_recycleRequestQueue;
458 }
459
460 transition(I, GETX, IM) {
461 //d_sendData;
462 qf_queueMemoryFetchRequest;
463 e_ownerIsRequestor;
464 i_popIncomingRequestQueue;
465 }
466
467 transition(IM, Memory_Data, M) {
468 d_sendData;
469 //e_ownerIsRequestor;
470 l_popMemQueue;
471 }
472
473
474 transition(I, DMA_READ, ID) {
475 //dr_sendDMAData;
476 qf_queueMemoryFetchRequestDMA;
477 p_popIncomingDMARequestQueue;
478 }
479
480 transition(ID, Memory_Data, I) {
481 dr_sendDMAData;
482 //p_popIncomingDMARequestQueue;
483 l_popMemQueue;
484 }
485
486
487
488 transition(I, DMA_WRITE, ID_W) {
489 v_allocateTBE;
490 qw_queueMemoryWBRequest_partial;
491 p_popIncomingDMARequestQueue;
492 }
493
494 transition(ID_W, Memory_Ack, I) {
495 dwt_writeDMADataFromTBE;
496 da_sendDMAAck;
497 w_deallocateTBE;
498 l_popMemQueue;
499 }
500
501 transition(M, DMA_READ, M_DRD) {
502 inv_sendCacheInvalidate;
503 p_popIncomingDMARequestQueue;
504 }
505
506 transition(M_DRD, PUTX, I) {
507 drp_sendDMAData;
508 c_clearOwner;
509 a_sendWriteBackAck;
510 d_deallocateDirectory;
511 i_popIncomingRequestQueue;
512 }
513
514 transition(M, DMA_WRITE, M_DWR) {
515 v_allocateTBE;
516 inv_sendCacheInvalidate;
517 p_popIncomingDMARequestQueue;
518 }
519
520 transition(M_DWR, PUTX, M_DWRI) {
521 qw_queueMemoryWBRequest_partialTBE;
522 c_clearOwner;
523 i_popIncomingRequestQueue;
524 }
525
526 transition(M_DWRI, Memory_Ack, I) {
527 w_writeDataToMemoryFromTBE;
528 l_sendWriteBackAck;
529 da_sendDMAAck;
530 w_deallocateTBE;
531 d_deallocateDirectory;
532 l_popMemQueue;
533 }
534
535 transition(M, GETX, M) {
536 f_forwardRequest;
537 e_ownerIsRequestor;
538 i_popIncomingRequestQueue;
539 }
540
541 transition(M, PUTX, MI) {
542 c_clearOwner;
543 v_allocateTBEFromRequestNet;
544 l_queueMemoryWBRequest;
545 i_popIncomingRequestQueue;
546 }
547
548 transition(MI, Memory_Ack, I) {
549 w_writeDataToMemoryFromTBE;
550 l_sendWriteBackAck;
551 w_deallocateTBE;
552 d_deallocateDirectory;
553 l_popMemQueue;
554 }
555
556 transition(M, PUTX_NotOwner, M) {
557 b_sendWriteBackNack;
558 i_popIncomingRequestQueue;
559 }
560
561 transition(I, PUTX_NotOwner, I) {
562 b_sendWriteBackNack;
563 i_popIncomingRequestQueue;
564 }
565
566 }