This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MESI_CMP_directory-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
32 */
33
34 // This file is copied from Yasuko Watanabe's prefetch / memory protocol
35 // Copied here by aep 12/14/07
36
37
38 machine(Directory, "MESI_CMP_filter_directory protocol")
39 : DirectoryMemory * directory,
40 MemoryControl * memBuffer,
41 int to_mem_ctrl_latency = 1,
42 int directory_latency = 6
43 {
44
45 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false";
46 MessageBuffer responseToDir, network="From", virtual_network="1", ordered="false";
47
48 MessageBuffer requestFromDir, network="To", virtual_network="0", ordered="false";
49 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
50
51 // STATES
52 enumeration(State, desc="Directory states", default="Directory_State_I") {
53 // Base states
54 I, desc="Owner";
55 ID, desc="Intermediate state for DMA_READ when in I";
56 ID_W, desc="Intermediate state for DMA_WRITE when in I";
57
58 M, desc="Modified";
59 IM, desc="Intermediate State I>M";
60 MI, desc="Intermediate State M>I";
61 M_DRD, desc="Intermediate State when there is a dma read";
62 M_DRDI, desc="Intermediate State when there is a dma read";
63 M_DWR, desc="Intermediate State when there is a dma write";
64 M_DWRI, desc="Intermediate State when there is a dma write";
65 }
66
67 // Events
68 enumeration(Event, desc="Directory events") {
69 Fetch, desc="A memory fetch arrives";
70 Data, desc="writeback data arrives";
71 Memory_Data, desc="Fetched data from memory arrives";
72 Memory_Ack, desc="Writeback Ack from memory arrives";
73 //added by SS for dma
74 DMA_READ, desc="A DMA Read memory request";
75 DMA_WRITE, desc="A DMA Write memory request";
76 CleanReplacement, desc="Clean Replacement in L2 cache";
77
78 }
79
80 // TYPES
81
82 // DirectoryEntry
83 structure(Entry, desc="...", interface="AbstractEntry") {
84 State DirectoryState, desc="Directory state";
85 DataBlock DataBlk, desc="data for the block";
86 NetDest Sharers, desc="Sharers for this block";
87 NetDest Owner, desc="Owner of this block";
88 }
89
90 // TBE entries for DMA requests
91 structure(TBE, desc="TBE entries for outstanding DMA requests") {
92 Address PhysicalAddress, desc="physical address";
93 State TBEState, desc="Transient State";
94 DataBlock DataBlk, desc="Data to be written (DMA write only)";
95 int Len, desc="...";
96 }
97
98 external_type(TBETable) {
99 TBE lookup(Address);
100 void allocate(Address);
101 void deallocate(Address);
102 bool isPresent(Address);
103 }
104
105
106 // ** OBJECTS **
107
108 TBETable TBEs, template_hack="<Directory_TBE>";
109
110 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
111 return static_cast(Entry, directory[addr]);
112 }
113
114 State getState(Address addr) {
115 if (TBEs.isPresent(addr)) {
116 return TBEs[addr].TBEState;
117 } else if (directory.isPresent(addr)) {
118 return getDirectoryEntry(addr).DirectoryState;
119 } else {
120 return State:I;
121 }
122 }
123
124
125 void setState(Address addr, State state) {
126
127 if (TBEs.isPresent(addr)) {
128 TBEs[addr].TBEState := state;
129 }
130
131 if (directory.isPresent(addr)) {
132
133 if (state == State:I) {
134 assert(getDirectoryEntry(addr).Owner.count() == 0);
135 assert(getDirectoryEntry(addr).Sharers.count() == 0);
136 } else if (state == State:M) {
137 assert(getDirectoryEntry(addr).Owner.count() == 1);
138 assert(getDirectoryEntry(addr).Sharers.count() == 0);
139 }
140
141 getDirectoryEntry(addr).DirectoryState := state;
142 }
143 }
144
145
146 bool isGETRequest(CoherenceRequestType type) {
147 return (type == CoherenceRequestType:GETS) ||
148 (type == CoherenceRequestType:GET_INSTR) ||
149 (type == CoherenceRequestType:GETX);
150 }
151
152
153 // ** OUT_PORTS **
154 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
155 out_port(memQueue_out, MemoryMsg, memBuffer);
156
157 // ** IN_PORTS **
158
159 in_port(requestNetwork_in, RequestMsg, requestToDir) {
160 if (requestNetwork_in.isReady()) {
161 peek(requestNetwork_in, RequestMsg) {
162 assert(in_msg.Destination.isElement(machineID));
163 if (isGETRequest(in_msg.Type)) {
164 trigger(Event:Fetch, in_msg.Address);
165 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
166 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address));
167 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
168 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address));
169 } else {
170 DPRINTF(RubySlicc, "%s\n", in_msg);
171 error("Invalid message");
172 }
173 }
174 }
175 }
176
177 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
178 if (responseNetwork_in.isReady()) {
179 peek(responseNetwork_in, ResponseMsg) {
180 assert(in_msg.Destination.isElement(machineID));
181 if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
182 trigger(Event:Data, in_msg.Address);
183 } else if (in_msg.Type == CoherenceResponseType:ACK) {
184 trigger(Event:CleanReplacement, in_msg.Address);
185 } else {
186 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
187 error("Invalid message");
188 }
189 }
190 }
191 }
192
193 // off-chip memory request/response is done
194 in_port(memQueue_in, MemoryMsg, memBuffer) {
195 if (memQueue_in.isReady()) {
196 peek(memQueue_in, MemoryMsg) {
197 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
198 trigger(Event:Memory_Data, in_msg.Address);
199 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
200 trigger(Event:Memory_Ack, in_msg.Address);
201 } else {
202 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
203 error("Invalid message");
204 }
205 }
206 }
207 }
208
209
210
211 // Actions
212 action(a_sendAck, "a", desc="Send ack to L2") {
213 peek(responseNetwork_in, ResponseMsg) {
214 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
215 out_msg.Address := address;
216 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
217 out_msg.Sender := machineID;
218 out_msg.Destination.add(in_msg.Sender);
219 out_msg.MessageSize := MessageSizeType:Response_Control;
220 }
221 }
222 }
223
224 action(d_sendData, "d", desc="Send data to requestor") {
225 peek(memQueue_in, MemoryMsg) {
226 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
227 out_msg.Address := address;
228 out_msg.Type := CoherenceResponseType:MEMORY_DATA;
229 out_msg.Sender := machineID;
230 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
231 out_msg.DataBlk := in_msg.DataBlk;
232 out_msg.Dirty := false;
233 out_msg.MessageSize := MessageSizeType:Response_Data;
234 }
235 }
236 }
237
238 // Actions
239 action(aa_sendAck, "aa", desc="Send ack to L2") {
240 peek(memQueue_in, MemoryMsg) {
241 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
242 out_msg.Address := address;
243 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
244 out_msg.Sender := machineID;
245 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
246 out_msg.MessageSize := MessageSizeType:Response_Control;
247 }
248 }
249 }
250
251 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
252 requestNetwork_in.dequeue();
253 }
254
255 action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
256 responseNetwork_in.dequeue();
257 }
258
259 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
260 memQueue_in.dequeue();
261 }
262
263 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
264 peek(requestNetwork_in, RequestMsg) {
265 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
266 out_msg.Address := address;
267 out_msg.Type := MemoryRequestType:MEMORY_READ;
268 out_msg.Sender := machineID;
269 out_msg.OriginalRequestorMachId := in_msg.Requestor;
270 out_msg.MessageSize := in_msg.MessageSize;
271 out_msg.Prefetch := in_msg.Prefetch;
272 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
273
274 DPRINTF(RubySlicc, "%s\n", out_msg);
275 }
276 }
277 }
278
279 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
280 peek(responseNetwork_in, ResponseMsg) {
281 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
282 out_msg.Address := address;
283 out_msg.Type := MemoryRequestType:MEMORY_WB;
284 out_msg.Sender := machineID;
285 out_msg.OriginalRequestorMachId := in_msg.Sender;
286 out_msg.DataBlk := in_msg.DataBlk;
287 out_msg.MessageSize := in_msg.MessageSize;
288 //out_msg.Prefetch := in_msg.Prefetch;
289
290 DPRINTF(RubySlicc, "%s\n", out_msg);
291 }
292 }
293 }
294
295 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
296 peek(responseNetwork_in, ResponseMsg) {
297 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
298 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
299 in_msg.Address, in_msg.DataBlk);
300 }
301 }
302 //added by SS for dma
303 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
304 peek(requestNetwork_in, RequestMsg) {
305 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
306 out_msg.Address := address;
307 out_msg.Type := MemoryRequestType:MEMORY_READ;
308 out_msg.Sender := machineID;
309 out_msg.OriginalRequestorMachId := machineID;
310 out_msg.MessageSize := in_msg.MessageSize;
311 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
312 DPRINTF(RubySlicc, "%s\n", out_msg);
313 }
314 }
315 }
316
317 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
318 requestNetwork_in.dequeue();
319 }
320
321 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
322 peek(memQueue_in, MemoryMsg) {
323 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
324 out_msg.Address := address;
325 out_msg.Type := CoherenceResponseType:DATA;
326 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
327 out_msg.Destination.add(map_Address_to_DMA(address));
328 out_msg.MessageSize := MessageSizeType:Response_Data;
329 }
330 }
331 }
332
333 action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
334 peek(requestNetwork_in, RequestMsg) {
335 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
336 }
337 }
338
339 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
340 peek(requestNetwork_in, RequestMsg) {
341 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
342 out_msg.Address := address;
343 out_msg.Type := MemoryRequestType:MEMORY_WB;
344 out_msg.OriginalRequestorMachId := machineID;
345 //out_msg.DataBlk := in_msg.DataBlk;
346 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
347
348
349 out_msg.MessageSize := in_msg.MessageSize;
350 //out_msg.Prefetch := in_msg.Prefetch;
351
352 DPRINTF(RubySlicc, "%s\n", out_msg);
353 }
354 }
355 }
356
357 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
358 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
359 out_msg.Address := address;
360 out_msg.Type := CoherenceResponseType:ACK;
361 out_msg.Destination.add(map_Address_to_DMA(address));
362 out_msg.MessageSize := MessageSizeType:Writeback_Control;
363 }
364 }
365
366 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
367 requestNetwork_in.recycle();
368 }
369
370 action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
371 requestNetwork_in.recycle();
372 }
373
374
375 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
376 peek(requestNetwork_in, RequestMsg) {
377 getDirectoryEntry(address).Owner.clear();
378 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
379 }
380 }
381
382
383 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
384 peek(requestNetwork_in, RequestMsg) {
385 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {
386 out_msg.Address := address;
387 out_msg.Type := CoherenceResponseType:INV;
388 out_msg.Sender := machineID;
389 out_msg.Destination := getDirectoryEntry(address).Owner;
390 out_msg.MessageSize := MessageSizeType:Response_Control;
391 }
392 }
393 }
394
395
396 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
397 peek(responseNetwork_in, ResponseMsg) {
398 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
399 out_msg.Address := address;
400 out_msg.Type := CoherenceResponseType:DATA;
401 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
402 out_msg.Destination.add(map_Address_to_DMA(address));
403 out_msg.MessageSize := MessageSizeType:Response_Data;
404 }
405 }
406 }
407
408 action(c_clearOwner, "c", desc="Clear the owner field") {
409 getDirectoryEntry(address).Owner.clear();
410 }
411
412 action(v_allocateTBE, "v", desc="Allocate TBE") {
413 peek(requestNetwork_in, RequestMsg) {
414 TBEs.allocate(address);
415 TBEs[address].DataBlk := in_msg.DataBlk;
416 TBEs[address].PhysicalAddress := in_msg.Address;
417 TBEs[address].Len := in_msg.Len;
418 }
419 }
420
421 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
422 //getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
423 getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
424
425
426 }
427
428
429 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
430 peek(responseNetwork_in, ResponseMsg) {
431 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
432 out_msg.Address := address;
433 out_msg.Type := MemoryRequestType:MEMORY_WB;
434 out_msg.OriginalRequestorMachId := in_msg.Sender;
435 //out_msg.DataBlk := in_msg.DataBlk;
436 //out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
437 out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
438
439 out_msg.MessageSize := in_msg.MessageSize;
440 //out_msg.Prefetch := in_msg.Prefetch;
441
442 DPRINTF(RubySlicc, "%s\n", out_msg);
443 }
444 }
445 }
446
447 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
448 TBEs.deallocate(address);
449 }
450
451
452 // TRANSITIONS
453
454
455 transition(I, Fetch, IM) {
456 qf_queueMemoryFetchRequest;
457 e_ownerIsRequestor;
458 j_popIncomingRequestQueue;
459 }
460
461 transition(IM, Memory_Data, M) {
462 d_sendData;
463 l_popMemQueue;
464 }
465 //added by SS
466 transition(M, CleanReplacement, I) {
467 c_clearOwner;
468 a_sendAck;
469 k_popIncomingResponseQueue;
470 }
471
472 transition(M, Data, MI) {
473 m_writeDataToMemory;
474 qw_queueMemoryWBRequest;
475 k_popIncomingResponseQueue;
476 }
477
478 transition(MI, Memory_Ack, I) {
479 c_clearOwner;
480 aa_sendAck;
481 l_popMemQueue;
482 }
483
484
485 //added by SS for dma support
486 transition(I, DMA_READ, ID) {
487 qf_queueMemoryFetchRequestDMA;
488 j_popIncomingRequestQueue;
489 }
490
491 transition(ID, Memory_Data, I) {
492 dr_sendDMAData;
493 l_popMemQueue;
494 }
495
496 transition(I, DMA_WRITE, ID_W) {
497 dw_writeDMAData;
498 qw_queueMemoryWBRequest_partial;
499 j_popIncomingRequestQueue;
500 }
501
502 transition(ID_W, Memory_Ack, I) {
503 da_sendDMAAck;
504 l_popMemQueue;
505 }
506
507 transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
508 z_recycleRequestQueue;
509 }
510
511 transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
512 zz_recycleDMAQueue;
513 }
514
515
516 transition(M, DMA_READ, M_DRD) {
517 inv_sendCacheInvalidate;
518 j_popIncomingRequestQueue;
519 }
520
521 transition(M_DRD, Data, M_DRDI) {
522 drp_sendDMAData;
523 m_writeDataToMemory;
524 qw_queueMemoryWBRequest;
525 k_popIncomingResponseQueue;
526 }
527
528 transition(M_DRDI, Memory_Ack, I) {
529 aa_sendAck;
530 c_clearOwner;
531 l_popMemQueue;
532 }
533
534 transition(M, DMA_WRITE, M_DWR) {
535 v_allocateTBE;
536 inv_sendCacheInvalidate;
537 j_popIncomingRequestQueue;
538 }
539
540 transition(M_DWR, Data, M_DWRI) {
541 m_writeDataToMemory;
542 qw_queueMemoryWBRequest_partialTBE;
543 k_popIncomingResponseQueue;
544 }
545
546 transition(M_DWRI, Memory_Ack, I) {
547 dwt_writeDMADataFromTBE;
548 aa_sendAck;
549 c_clearOwner;
550 da_sendDMAAck;
551 w_deallocateTBE;
552 l_popMemQueue;
553 }
554
555 }