ruby: add stdio header in SRAM.hh
[gem5.git] / src / mem / protocol / MESI_CMP_directory-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
32 */
33
34 // This file is copied from Yasuko Watanabe's prefetch / memory protocol
35 // Copied here by aep 12/14/07
36
37
38 machine(Directory, "MESI_CMP_filter_directory protocol")
39 : DirectoryMemory * directory,
40 MemoryControl * memBuffer,
41 int to_mem_ctrl_latency = 1,
42 int directory_latency = 6
43 {
44
45 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
46 MessageBuffer responseToDir, network="From", virtual_network="1", ordered="false";
47
48 MessageBuffer requestFromDir, network="To", virtual_network="0", ordered="false";
49 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
50
51 // STATES
52 enumeration(State, desc="Directory states", default="Directory_State_I") {
53 // Base states
54 I, desc="Owner";
55 ID, desc="Intermediate state for DMA_READ when in I";
56 ID_W, desc="Intermediate state for DMA_WRITE when in I";
57
58 M, desc="Modified";
59 IM, desc="Intermediate State I>M";
60 MI, desc="Intermediate State M>I";
61 M_DRD, desc="Intermediate State when there is a dma read";
62 M_DRDI, desc="Intermediate State when there is a dma read";
63 M_DWR, desc="Intermediate State when there is a dma write";
64 M_DWRI, desc="Intermediate State when there is a dma write";
65 }
66
67 // Events
68 enumeration(Event, desc="Directory events") {
69 Fetch, desc="A memory fetch arrives";
70 Data, desc="writeback data arrives";
71 Memory_Data, desc="Fetched data from memory arrives";
72 Memory_Ack, desc="Writeback Ack from memory arrives";
73 //added by SS for dma
74 DMA_READ, desc="A DMA Read memory request";
75 DMA_WRITE, desc="A DMA Write memory request";
76 CleanReplacement, desc="Clean Replacement in L2 cache";
77
78 }
79
80 // TYPES
81
82 // DirectoryEntry
83 structure(Entry, desc="...", interface="AbstractEntry") {
84 State DirectoryState, desc="Directory state";
85 DataBlock DataBlk, desc="data for the block";
86 NetDest Sharers, desc="Sharers for this block";
87 NetDest Owner, desc="Owner of this block";
88 }
89
90 // TBE entries for DMA requests
91 structure(TBE, desc="TBE entries for outstanding DMA requests") {
92 Address PhysicalAddress, desc="physical address";
93 State TBEState, desc="Transient State";
94 DataBlock DataBlk, desc="Data to be written (DMA write only)";
95 int Len, desc="...";
96 }
97
98 external_type(TBETable) {
99 TBE lookup(Address);
100 void allocate(Address);
101 void deallocate(Address);
102 bool isPresent(Address);
103 }
104
105
106 // ** OBJECTS **
107
108 TBETable TBEs, template_hack="<Directory_TBE>";
109
110 void set_tbe(TBE tbe);
111 void unset_tbe();
112
113 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
114 return static_cast(Entry, directory[addr]);
115 }
116
117 State getState(TBE tbe, Address addr) {
118 if (is_valid(tbe)) {
119 return tbe.TBEState;
120 } else if (directory.isPresent(addr)) {
121 return getDirectoryEntry(addr).DirectoryState;
122 } else {
123 return State:I;
124 }
125 }
126
127
128 void setState(TBE tbe, Address addr, State state) {
129
130 if (is_valid(tbe)) {
131 tbe.TBEState := state;
132 }
133
134 if (directory.isPresent(addr)) {
135
136 if (state == State:I) {
137 assert(getDirectoryEntry(addr).Owner.count() == 0);
138 assert(getDirectoryEntry(addr).Sharers.count() == 0);
139 } else if (state == State:M) {
140 assert(getDirectoryEntry(addr).Owner.count() == 1);
141 assert(getDirectoryEntry(addr).Sharers.count() == 0);
142 }
143
144 getDirectoryEntry(addr).DirectoryState := state;
145 }
146 }
147
148
149 bool isGETRequest(CoherenceRequestType type) {
150 return (type == CoherenceRequestType:GETS) ||
151 (type == CoherenceRequestType:GET_INSTR) ||
152 (type == CoherenceRequestType:GETX);
153 }
154
155
156 // ** OUT_PORTS **
157 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
158 out_port(memQueue_out, MemoryMsg, memBuffer);
159
160 // ** IN_PORTS **
161
162 in_port(requestNetwork_in, RequestMsg, requestToDir) {
163 if (requestNetwork_in.isReady()) {
164 peek(requestNetwork_in, RequestMsg) {
165 assert(in_msg.Destination.isElement(machineID));
166 if (isGETRequest(in_msg.Type)) {
167 trigger(Event:Fetch, in_msg.Address, TBEs[in_msg.Address]);
168 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
169 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
170 TBEs[makeLineAddress(in_msg.Address)]);
171 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
172 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
173 TBEs[makeLineAddress(in_msg.Address)]);
174 } else {
175 DPRINTF(RubySlicc, "%s\n", in_msg);
176 error("Invalid message");
177 }
178 }
179 }
180 }
181
182 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
183 if (responseNetwork_in.isReady()) {
184 peek(responseNetwork_in, ResponseMsg) {
185 assert(in_msg.Destination.isElement(machineID));
186 if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
187 trigger(Event:Data, in_msg.Address, TBEs[in_msg.Address]);
188 } else if (in_msg.Type == CoherenceResponseType:ACK) {
189 trigger(Event:CleanReplacement, in_msg.Address, TBEs[in_msg.Address]);
190 } else {
191 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
192 error("Invalid message");
193 }
194 }
195 }
196 }
197
198 // off-chip memory request/response is done
199 in_port(memQueue_in, MemoryMsg, memBuffer) {
200 if (memQueue_in.isReady()) {
201 peek(memQueue_in, MemoryMsg) {
202 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
203 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
204 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
205 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
206 } else {
207 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
208 error("Invalid message");
209 }
210 }
211 }
212 }
213
214
215
216 // Actions
217 action(a_sendAck, "a", desc="Send ack to L2") {
218 peek(responseNetwork_in, ResponseMsg) {
219 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
220 out_msg.Address := address;
221 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
222 out_msg.Sender := machineID;
223 out_msg.Destination.add(in_msg.Sender);
224 out_msg.MessageSize := MessageSizeType:Response_Control;
225 }
226 }
227 }
228
229 action(d_sendData, "d", desc="Send data to requestor") {
230 peek(memQueue_in, MemoryMsg) {
231 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
232 out_msg.Address := address;
233 out_msg.Type := CoherenceResponseType:MEMORY_DATA;
234 out_msg.Sender := machineID;
235 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
236 out_msg.DataBlk := in_msg.DataBlk;
237 out_msg.Dirty := false;
238 out_msg.MessageSize := MessageSizeType:Response_Data;
239 }
240 }
241 }
242
243 // Actions
244 action(aa_sendAck, "aa", desc="Send ack to L2") {
245 peek(memQueue_in, MemoryMsg) {
246 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
247 out_msg.Address := address;
248 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
249 out_msg.Sender := machineID;
250 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
251 out_msg.MessageSize := MessageSizeType:Response_Control;
252 }
253 }
254 }
255
256 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
257 requestNetwork_in.dequeue();
258 }
259
260 action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
261 responseNetwork_in.dequeue();
262 }
263
264 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
265 memQueue_in.dequeue();
266 }
267
268 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
269 peek(requestNetwork_in, RequestMsg) {
270 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
271 out_msg.Address := address;
272 out_msg.Type := MemoryRequestType:MEMORY_READ;
273 out_msg.Sender := machineID;
274 out_msg.OriginalRequestorMachId := in_msg.Requestor;
275 out_msg.MessageSize := in_msg.MessageSize;
276 out_msg.Prefetch := in_msg.Prefetch;
277 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
278
279 DPRINTF(RubySlicc, "%s\n", out_msg);
280 }
281 }
282 }
283
284 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
285 peek(responseNetwork_in, ResponseMsg) {
286 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
287 out_msg.Address := address;
288 out_msg.Type := MemoryRequestType:MEMORY_WB;
289 out_msg.Sender := machineID;
290 out_msg.OriginalRequestorMachId := in_msg.Sender;
291 out_msg.DataBlk := in_msg.DataBlk;
292 out_msg.MessageSize := in_msg.MessageSize;
293 //out_msg.Prefetch := in_msg.Prefetch;
294
295 DPRINTF(RubySlicc, "%s\n", out_msg);
296 }
297 }
298 }
299
300 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
301 peek(responseNetwork_in, ResponseMsg) {
302 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
303 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
304 in_msg.Address, in_msg.DataBlk);
305 }
306 }
307 //added by SS for dma
308 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
309 peek(requestNetwork_in, RequestMsg) {
310 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
311 out_msg.Address := address;
312 out_msg.Type := MemoryRequestType:MEMORY_READ;
313 out_msg.Sender := machineID;
314 out_msg.OriginalRequestorMachId := machineID;
315 out_msg.MessageSize := in_msg.MessageSize;
316 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
317 DPRINTF(RubySlicc, "%s\n", out_msg);
318 }
319 }
320 }
321
322 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
323 requestNetwork_in.dequeue();
324 }
325
326 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
327 peek(memQueue_in, MemoryMsg) {
328 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
329 out_msg.Address := address;
330 out_msg.Type := CoherenceResponseType:DATA;
331 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
332 out_msg.Destination.add(map_Address_to_DMA(address));
333 out_msg.MessageSize := MessageSizeType:Response_Data;
334 }
335 }
336 }
337
338 action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
339 peek(requestNetwork_in, RequestMsg) {
340 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
341 }
342 }
343
344 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
345 peek(requestNetwork_in, RequestMsg) {
346 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
347 out_msg.Address := address;
348 out_msg.Type := MemoryRequestType:MEMORY_WB;
349 out_msg.OriginalRequestorMachId := machineID;
350 //out_msg.DataBlk := in_msg.DataBlk;
351 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
352
353
354 out_msg.MessageSize := in_msg.MessageSize;
355 //out_msg.Prefetch := in_msg.Prefetch;
356
357 DPRINTF(RubySlicc, "%s\n", out_msg);
358 }
359 }
360 }
361
362 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
363 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
364 out_msg.Address := address;
365 out_msg.Type := CoherenceResponseType:ACK;
366 out_msg.Destination.add(map_Address_to_DMA(address));
367 out_msg.MessageSize := MessageSizeType:Writeback_Control;
368 }
369 }
370
371 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
372 requestNetwork_in.recycle();
373 }
374
375 action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
376 requestNetwork_in.recycle();
377 }
378
379
380 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
381 peek(requestNetwork_in, RequestMsg) {
382 getDirectoryEntry(address).Owner.clear();
383 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
384 }
385 }
386
387
388 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
389 peek(requestNetwork_in, RequestMsg) {
390 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {
391 out_msg.Address := address;
392 out_msg.Type := CoherenceResponseType:INV;
393 out_msg.Sender := machineID;
394 out_msg.Destination := getDirectoryEntry(address).Owner;
395 out_msg.MessageSize := MessageSizeType:Response_Control;
396 }
397 }
398 }
399
400
401 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
402 peek(responseNetwork_in, ResponseMsg) {
403 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
404 out_msg.Address := address;
405 out_msg.Type := CoherenceResponseType:DATA;
406 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
407 out_msg.Destination.add(map_Address_to_DMA(address));
408 out_msg.MessageSize := MessageSizeType:Response_Data;
409 }
410 }
411 }
412
413 action(c_clearOwner, "c", desc="Clear the owner field") {
414 getDirectoryEntry(address).Owner.clear();
415 }
416
417 action(v_allocateTBE, "v", desc="Allocate TBE") {
418 peek(requestNetwork_in, RequestMsg) {
419 TBEs.allocate(address);
420 set_tbe(TBEs[address]);
421 tbe.DataBlk := in_msg.DataBlk;
422 tbe.PhysicalAddress := in_msg.Address;
423 tbe.Len := in_msg.Len;
424 }
425 }
426
427 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
428 assert(is_valid(tbe));
429 //getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
430 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
431
432
433 }
434
435
436 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
437 peek(responseNetwork_in, ResponseMsg) {
438 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
439 assert(is_valid(tbe));
440 out_msg.Address := address;
441 out_msg.Type := MemoryRequestType:MEMORY_WB;
442 out_msg.OriginalRequestorMachId := in_msg.Sender;
443 //out_msg.DataBlk := in_msg.DataBlk;
444 //out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
445 out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
446
447 out_msg.MessageSize := in_msg.MessageSize;
448 //out_msg.Prefetch := in_msg.Prefetch;
449
450 DPRINTF(RubySlicc, "%s\n", out_msg);
451 }
452 }
453 }
454
455 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
456 TBEs.deallocate(address);
457 unset_tbe();
458 }
459
460
461 // TRANSITIONS
462
463
464 transition(I, Fetch, IM) {
465 qf_queueMemoryFetchRequest;
466 e_ownerIsRequestor;
467 j_popIncomingRequestQueue;
468 }
469
470 transition(IM, Memory_Data, M) {
471 d_sendData;
472 l_popMemQueue;
473 }
474 //added by SS
475 transition(M, CleanReplacement, I) {
476 c_clearOwner;
477 a_sendAck;
478 k_popIncomingResponseQueue;
479 }
480
481 transition(M, Data, MI) {
482 m_writeDataToMemory;
483 qw_queueMemoryWBRequest;
484 k_popIncomingResponseQueue;
485 }
486
487 transition(MI, Memory_Ack, I) {
488 c_clearOwner;
489 aa_sendAck;
490 l_popMemQueue;
491 }
492
493
494 //added by SS for dma support
495 transition(I, DMA_READ, ID) {
496 qf_queueMemoryFetchRequestDMA;
497 j_popIncomingRequestQueue;
498 }
499
500 transition(ID, Memory_Data, I) {
501 dr_sendDMAData;
502 l_popMemQueue;
503 }
504
505 transition(I, DMA_WRITE, ID_W) {
506 dw_writeDMAData;
507 qw_queueMemoryWBRequest_partial;
508 j_popIncomingRequestQueue;
509 }
510
511 transition(ID_W, Memory_Ack, I) {
512 da_sendDMAAck;
513 l_popMemQueue;
514 }
515
516 transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
517 z_recycleRequestQueue;
518 }
519
520 transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
521 zz_recycleDMAQueue;
522 }
523
524
525 transition(M, DMA_READ, M_DRD) {
526 inv_sendCacheInvalidate;
527 j_popIncomingRequestQueue;
528 }
529
530 transition(M_DRD, Data, M_DRDI) {
531 drp_sendDMAData;
532 m_writeDataToMemory;
533 qw_queueMemoryWBRequest;
534 k_popIncomingResponseQueue;
535 }
536
537 transition(M_DRDI, Memory_Ack, I) {
538 aa_sendAck;
539 c_clearOwner;
540 l_popMemQueue;
541 }
542
543 transition(M, DMA_WRITE, M_DWR) {
544 v_allocateTBE;
545 inv_sendCacheInvalidate;
546 j_popIncomingRequestQueue;
547 }
548
549 transition(M_DWR, Data, M_DWRI) {
550 m_writeDataToMemory;
551 qw_queueMemoryWBRequest_partialTBE;
552 k_popIncomingResponseQueue;
553 }
554
555 transition(M_DWRI, Memory_Ack, I) {
556 dwt_writeDMADataFromTBE;
557 aa_sendAck;
558 c_clearOwner;
559 da_sendDMAAck;
560 w_deallocateTBE;
561 l_popMemQueue;
562 }
563
564 }