Ruby: Correctly set access permissions for directory entries
[gem5.git] / src / mem / protocol / MESI_CMP_directory-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
32 */
33
34 // This file is copied from Yasuko Watanabe's prefetch / memory protocol
35 // Copied here by aep 12/14/07
36
37
38 machine(Directory, "MESI_CMP_filter_directory protocol")
39 : DirectoryMemory * directory,
40 MemoryControl * memBuffer,
41 int to_mem_ctrl_latency = 1,
42 int directory_latency = 6
43 {
44
45 MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type="request";
46 MessageBuffer responseToDir, network="From", virtual_network="1", ordered="false", vnet_type="response";
47
48 MessageBuffer requestFromDir, network="To", virtual_network="0", ordered="false", vnet_type="request";
49 MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false", vnet_type="response";
50
51 // STATES
52 state_declaration(State, desc="Directory states", default="Directory_State_I") {
53 // Base states
54 I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
55 ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
56 ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
57
58 M, AccessPermission:Invalid, desc="memory copy may be stale, i.e. other modified copies may exist";
59 IM, AccessPermission:Busy, desc="Intermediate State I>M";
60 MI, AccessPermission:Busy, desc="Intermediate State M>I";
61 M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
62 M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
63 M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
64 M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
65 }
66
67 // Events
68 enumeration(Event, desc="Directory events") {
69 Fetch, desc="A memory fetch arrives";
70 Data, desc="writeback data arrives";
71 Memory_Data, desc="Fetched data from memory arrives";
72 Memory_Ack, desc="Writeback Ack from memory arrives";
73 //added by SS for dma
74 DMA_READ, desc="A DMA Read memory request";
75 DMA_WRITE, desc="A DMA Write memory request";
76 CleanReplacement, desc="Clean Replacement in L2 cache";
77
78 }
79
80 // TYPES
81
82 // DirectoryEntry
83 structure(Entry, desc="...", interface="AbstractEntry") {
84 State DirectoryState, desc="Directory state";
85 DataBlock DataBlk, desc="data for the block";
86 NetDest Sharers, desc="Sharers for this block";
87 NetDest Owner, desc="Owner of this block";
88 }
89
90 // TBE entries for DMA requests
91 structure(TBE, desc="TBE entries for outstanding DMA requests") {
92 Address PhysicalAddress, desc="physical address";
93 State TBEState, desc="Transient State";
94 DataBlock DataBlk, desc="Data to be written (DMA write only)";
95 int Len, desc="...";
96 }
97
98 structure(TBETable, external="yes") {
99 TBE lookup(Address);
100 void allocate(Address);
101 void deallocate(Address);
102 bool isPresent(Address);
103 }
104
105
106 // ** OBJECTS **
107
108 TBETable TBEs, template_hack="<Directory_TBE>";
109
110 void set_tbe(TBE tbe);
111 void unset_tbe();
112
113 Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
114 return static_cast(Entry, directory[addr]);
115 }
116
117 State getState(TBE tbe, Address addr) {
118 if (is_valid(tbe)) {
119 return tbe.TBEState;
120 } else if (directory.isPresent(addr)) {
121 return getDirectoryEntry(addr).DirectoryState;
122 } else {
123 return State:I;
124 }
125 }
126
127 void setState(TBE tbe, Address addr, State state) {
128
129 if (is_valid(tbe)) {
130 tbe.TBEState := state;
131 }
132
133 if (directory.isPresent(addr)) {
134
135 if (state == State:I) {
136 assert(getDirectoryEntry(addr).Owner.count() == 0);
137 assert(getDirectoryEntry(addr).Sharers.count() == 0);
138 } else if (state == State:M) {
139 assert(getDirectoryEntry(addr).Owner.count() == 1);
140 assert(getDirectoryEntry(addr).Sharers.count() == 0);
141 }
142
143 getDirectoryEntry(addr).DirectoryState := state;
144 }
145 }
146
147 AccessPermission getAccessPermission(Address addr) {
148 TBE tbe := TBEs[addr];
149 if(is_valid(tbe)) {
150 return Directory_State_to_permission(tbe.TBEState);
151 }
152
153 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
154 }
155
156 void setAccessPermission(Address addr, State state) {
157 if (directory.isPresent(addr)) {
158 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
159 }
160 }
161
162 bool isGETRequest(CoherenceRequestType type) {
163 return (type == CoherenceRequestType:GETS) ||
164 (type == CoherenceRequestType:GET_INSTR) ||
165 (type == CoherenceRequestType:GETX);
166 }
167
168
169 // ** OUT_PORTS **
170 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
171 out_port(memQueue_out, MemoryMsg, memBuffer);
172
173 // ** IN_PORTS **
174
175 in_port(requestNetwork_in, RequestMsg, requestToDir) {
176 if (requestNetwork_in.isReady()) {
177 peek(requestNetwork_in, RequestMsg) {
178 assert(in_msg.Destination.isElement(machineID));
179 if (isGETRequest(in_msg.Type)) {
180 trigger(Event:Fetch, in_msg.Address, TBEs[in_msg.Address]);
181 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
182 trigger(Event:DMA_READ, makeLineAddress(in_msg.Address),
183 TBEs[makeLineAddress(in_msg.Address)]);
184 } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
185 trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address),
186 TBEs[makeLineAddress(in_msg.Address)]);
187 } else {
188 DPRINTF(RubySlicc, "%s\n", in_msg);
189 error("Invalid message");
190 }
191 }
192 }
193 }
194
195 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
196 if (responseNetwork_in.isReady()) {
197 peek(responseNetwork_in, ResponseMsg) {
198 assert(in_msg.Destination.isElement(machineID));
199 if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
200 trigger(Event:Data, in_msg.Address, TBEs[in_msg.Address]);
201 } else if (in_msg.Type == CoherenceResponseType:ACK) {
202 trigger(Event:CleanReplacement, in_msg.Address, TBEs[in_msg.Address]);
203 } else {
204 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
205 error("Invalid message");
206 }
207 }
208 }
209 }
210
211 // off-chip memory request/response is done
212 in_port(memQueue_in, MemoryMsg, memBuffer) {
213 if (memQueue_in.isReady()) {
214 peek(memQueue_in, MemoryMsg) {
215 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
216 trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]);
217 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
218 trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]);
219 } else {
220 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
221 error("Invalid message");
222 }
223 }
224 }
225 }
226
227
228
229 // Actions
230 action(a_sendAck, "a", desc="Send ack to L2") {
231 peek(responseNetwork_in, ResponseMsg) {
232 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
233 out_msg.Address := address;
234 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
235 out_msg.Sender := machineID;
236 out_msg.Destination.add(in_msg.Sender);
237 out_msg.MessageSize := MessageSizeType:Response_Control;
238 }
239 }
240 }
241
242 action(d_sendData, "d", desc="Send data to requestor") {
243 peek(memQueue_in, MemoryMsg) {
244 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
245 out_msg.Address := address;
246 out_msg.Type := CoherenceResponseType:MEMORY_DATA;
247 out_msg.Sender := machineID;
248 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
249 out_msg.DataBlk := in_msg.DataBlk;
250 out_msg.Dirty := false;
251 out_msg.MessageSize := MessageSizeType:Response_Data;
252 }
253 }
254 }
255
256 // Actions
257 action(aa_sendAck, "aa", desc="Send ack to L2") {
258 peek(memQueue_in, MemoryMsg) {
259 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
260 out_msg.Address := address;
261 out_msg.Type := CoherenceResponseType:MEMORY_ACK;
262 out_msg.Sender := machineID;
263 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
264 out_msg.MessageSize := MessageSizeType:Response_Control;
265 }
266 }
267 }
268
269 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
270 requestNetwork_in.dequeue();
271 }
272
273 action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
274 responseNetwork_in.dequeue();
275 }
276
277 action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
278 memQueue_in.dequeue();
279 }
280
281 action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
282 peek(requestNetwork_in, RequestMsg) {
283 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
284 out_msg.Address := address;
285 out_msg.Type := MemoryRequestType:MEMORY_READ;
286 out_msg.Sender := machineID;
287 out_msg.OriginalRequestorMachId := in_msg.Requestor;
288 out_msg.MessageSize := in_msg.MessageSize;
289 out_msg.Prefetch := in_msg.Prefetch;
290 out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
291
292 DPRINTF(RubySlicc, "%s\n", out_msg);
293 }
294 }
295 }
296
297 action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
298 peek(responseNetwork_in, ResponseMsg) {
299 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
300 out_msg.Address := address;
301 out_msg.Type := MemoryRequestType:MEMORY_WB;
302 out_msg.Sender := machineID;
303 out_msg.OriginalRequestorMachId := in_msg.Sender;
304 out_msg.DataBlk := in_msg.DataBlk;
305 out_msg.MessageSize := in_msg.MessageSize;
306 //out_msg.Prefetch := in_msg.Prefetch;
307
308 DPRINTF(RubySlicc, "%s\n", out_msg);
309 }
310 }
311 }
312
313 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
314 peek(responseNetwork_in, ResponseMsg) {
315 getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
316 DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
317 in_msg.Address, in_msg.DataBlk);
318 }
319 }
320 //added by SS for dma
321 action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
322 peek(requestNetwork_in, RequestMsg) {
323 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
324 out_msg.Address := address;
325 out_msg.Type := MemoryRequestType:MEMORY_READ;
326 out_msg.Sender := machineID;
327 out_msg.OriginalRequestorMachId := machineID;
328 out_msg.MessageSize := in_msg.MessageSize;
329 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
330 DPRINTF(RubySlicc, "%s\n", out_msg);
331 }
332 }
333 }
334
335 action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
336 requestNetwork_in.dequeue();
337 }
338
339 action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
340 peek(memQueue_in, MemoryMsg) {
341 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
342 out_msg.Address := address;
343 out_msg.Type := CoherenceResponseType:DATA;
344 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
345 out_msg.Destination.add(map_Address_to_DMA(address));
346 out_msg.MessageSize := MessageSizeType:Response_Data;
347 }
348 }
349 }
350
351 action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
352 peek(requestNetwork_in, RequestMsg) {
353 getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
354 }
355 }
356
357 action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
358 peek(requestNetwork_in, RequestMsg) {
359 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
360 out_msg.Address := address;
361 out_msg.Type := MemoryRequestType:MEMORY_WB;
362 out_msg.OriginalRequestorMachId := machineID;
363 //out_msg.DataBlk := in_msg.DataBlk;
364 out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
365
366
367 out_msg.MessageSize := in_msg.MessageSize;
368 //out_msg.Prefetch := in_msg.Prefetch;
369
370 DPRINTF(RubySlicc, "%s\n", out_msg);
371 }
372 }
373 }
374
375 action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
376 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
377 out_msg.Address := address;
378 out_msg.Type := CoherenceResponseType:ACK;
379 out_msg.Destination.add(map_Address_to_DMA(address));
380 out_msg.MessageSize := MessageSizeType:Writeback_Control;
381 }
382 }
383
384 action(z_recycleRequestQueue, "z", desc="recycle request queue") {
385 requestNetwork_in.recycle();
386 }
387
388 action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
389 requestNetwork_in.recycle();
390 }
391
392
393 action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
394 peek(requestNetwork_in, RequestMsg) {
395 getDirectoryEntry(address).Owner.clear();
396 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
397 }
398 }
399
400
401 action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
402 peek(requestNetwork_in, RequestMsg) {
403 enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) {
404 out_msg.Address := address;
405 out_msg.Type := CoherenceResponseType:INV;
406 out_msg.Sender := machineID;
407 out_msg.Destination := getDirectoryEntry(address).Owner;
408 out_msg.MessageSize := MessageSizeType:Response_Control;
409 }
410 }
411 }
412
413
414 action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
415 peek(responseNetwork_in, ResponseMsg) {
416 enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) {
417 out_msg.Address := address;
418 out_msg.Type := CoherenceResponseType:DATA;
419 out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
420 out_msg.Destination.add(map_Address_to_DMA(address));
421 out_msg.MessageSize := MessageSizeType:Response_Data;
422 }
423 }
424 }
425
426 action(c_clearOwner, "c", desc="Clear the owner field") {
427 getDirectoryEntry(address).Owner.clear();
428 }
429
430 action(v_allocateTBE, "v", desc="Allocate TBE") {
431 peek(requestNetwork_in, RequestMsg) {
432 TBEs.allocate(address);
433 set_tbe(TBEs[address]);
434 tbe.DataBlk := in_msg.DataBlk;
435 tbe.PhysicalAddress := in_msg.Address;
436 tbe.Len := in_msg.Len;
437 }
438 }
439
440 action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
441 assert(is_valid(tbe));
442 //getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
443 getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
444
445
446 }
447
448
449 action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
450 peek(responseNetwork_in, ResponseMsg) {
451 enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) {
452 assert(is_valid(tbe));
453 out_msg.Address := address;
454 out_msg.Type := MemoryRequestType:MEMORY_WB;
455 out_msg.OriginalRequestorMachId := in_msg.Sender;
456 //out_msg.DataBlk := in_msg.DataBlk;
457 //out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
458 out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
459
460 out_msg.MessageSize := in_msg.MessageSize;
461 //out_msg.Prefetch := in_msg.Prefetch;
462
463 DPRINTF(RubySlicc, "%s\n", out_msg);
464 }
465 }
466 }
467
468 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
469 TBEs.deallocate(address);
470 unset_tbe();
471 }
472
473
474 // TRANSITIONS
475
476
477 transition(I, Fetch, IM) {
478 qf_queueMemoryFetchRequest;
479 e_ownerIsRequestor;
480 j_popIncomingRequestQueue;
481 }
482
483 transition(IM, Memory_Data, M) {
484 d_sendData;
485 l_popMemQueue;
486 }
487 //added by SS
488 transition(M, CleanReplacement, I) {
489 c_clearOwner;
490 a_sendAck;
491 k_popIncomingResponseQueue;
492 }
493
494 transition(M, Data, MI) {
495 m_writeDataToMemory;
496 qw_queueMemoryWBRequest;
497 k_popIncomingResponseQueue;
498 }
499
500 transition(MI, Memory_Ack, I) {
501 c_clearOwner;
502 aa_sendAck;
503 l_popMemQueue;
504 }
505
506
507 //added by SS for dma support
508 transition(I, DMA_READ, ID) {
509 qf_queueMemoryFetchRequestDMA;
510 j_popIncomingRequestQueue;
511 }
512
513 transition(ID, Memory_Data, I) {
514 dr_sendDMAData;
515 l_popMemQueue;
516 }
517
518 transition(I, DMA_WRITE, ID_W) {
519 dw_writeDMAData;
520 qw_queueMemoryWBRequest_partial;
521 j_popIncomingRequestQueue;
522 }
523
524 transition(ID_W, Memory_Ack, I) {
525 da_sendDMAAck;
526 l_popMemQueue;
527 }
528
529 transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
530 z_recycleRequestQueue;
531 }
532
533 transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
534 zz_recycleDMAQueue;
535 }
536
537
538 transition(M, DMA_READ, M_DRD) {
539 inv_sendCacheInvalidate;
540 j_popIncomingRequestQueue;
541 }
542
543 transition(M_DRD, Data, M_DRDI) {
544 drp_sendDMAData;
545 m_writeDataToMemory;
546 qw_queueMemoryWBRequest;
547 k_popIncomingResponseQueue;
548 }
549
550 transition(M_DRDI, Memory_Ack, I) {
551 aa_sendAck;
552 c_clearOwner;
553 l_popMemQueue;
554 }
555
556 transition(M, DMA_WRITE, M_DWR) {
557 v_allocateTBE;
558 inv_sendCacheInvalidate;
559 j_popIncomingRequestQueue;
560 }
561
562 transition(M_DWR, Data, M_DWRI) {
563 m_writeDataToMemory;
564 qw_queueMemoryWBRequest_partialTBE;
565 k_popIncomingResponseQueue;
566 }
567
568 transition(M_DWRI, Memory_Ack, I) {
569 dwt_writeDMADataFromTBE;
570 aa_sendAck;
571 c_clearOwner;
572 da_sendDMAAck;
573 w_deallocateTBE;
574 l_popMemQueue;
575 }
576
577 }