mem-ruby: Replace SLICC queueMemory calls with enqueue
[gem5.git] / src / learning_gem5 / part3 / MSI-dir.sm
1 /*
2 * Copyright (c) 2017 Jason Lowe-Power
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /**
30 * This file contains the directory controller of a simple example MSI protocol
31 *
32 * In Ruby the directory controller both contains the directory coherence state
33 * but also functions as the memory controller in many ways. There are states
34 * in the directory that are both memory-centric and cache-centric. Be careful!
35 *
36 * The protocol in this file is based off of the MSI protocol found in
37 * A Primer on Memory Consistency and Cache Coherence
38 * Daniel J. Sorin, Mark D. Hill, and David A. Wood
39 * Synthesis Lectures on Computer Architecture 2011 6:3, 141-149
40 *
41 * Table 8.2 contains the transitions and actions found in this file and
42 * section 8.2.4 explains the protocol in detail.
43 *
44 * See Learning gem5 Part 3: Ruby for more details.
45 */
46
47 machine(MachineType:Directory, "Directory protocol")
48 :
49 // This "DirectoryMemory" is a little weird. It is initially allocated
50 // so that it *can* cover all of memory (i.e., there are pointers for
51 // every 64-byte block in memory). However, the entries are lazily
52 // created in getDirEntry()
53 DirectoryMemory * directory;
54 // You can put any parameters you want here. They will be exported as
55 // normal SimObject parameters (like in the SimObject description file)
56 // and you can set these parameters at runtime via the python config
57 // file. If there is no default here (like directory), it is mandatory
58 // to set the parameter in the python config. Otherwise, it uses the
59 // default value set here.
60 Cycles toMemLatency := 1;
61
62 // Forwarding requests from the directory *to* the caches.
63 MessageBuffer *forwardToCache, network="To", virtual_network="1",
64 vnet_type="forward";
65 // Response from the directory *to* the cache.
66 MessageBuffer *responseToCache, network="To", virtual_network="2",
67 vnet_type="response";
68
69 // Requests *from* the cache to the directory
70 MessageBuffer *requestFromCache, network="From", virtual_network="0",
71 vnet_type="request";
72
73 // Responses *from* the cache to the directory
74 MessageBuffer *responseFromCache, network="From", virtual_network="2",
75 vnet_type="response";
76
77 // Special buffer for memory requests. Kind of like the mandatory queue
78 MessageBuffer *requestToMemory;
79
80 // Special buffer for memory responses. Kind of like the mandatory queue
81 MessageBuffer *responseFromMemory;
82
83 {
84 // For many things in SLICC you can specify a default. However, this
85 // default must use the C++ name (mangled SLICC name). For the state below
86 // you have to use the controller name and the name we use for states.
87 state_declaration(State, desc="Directory states",
88 default="Directory_State_I") {
89 // Stable states.
90 // NOTE: Thise are "cache-centric" states like in Sorin et al.
91 // However, The access permissions are memory-centric.
92 I, AccessPermission:Read_Write, desc="Invalid in the caches.";
93 S, AccessPermission:Read_Only, desc="At least one cache has the blk";
94 M, AccessPermission:Invalid, desc="A cache has the block in M";
95
96 // Transient states
97 S_D, AccessPermission:Busy, desc="Moving to S, but need data";
98
99 // Waiting for data from memory
100 S_m, AccessPermission:Read_Write, desc="In S waiting for mem";
101 M_m, AccessPermission:Read_Write, desc="Moving to M waiting for mem";
102
103 // Waiting for write-ack from memory
104 MI_m, AccessPermission:Busy, desc="Moving to I waiting for ack";
105 SS_m, AccessPermission:Busy, desc="Moving to S waiting for ack";
106 }
107
108 enumeration(Event, desc="Directory events") {
109 // Data requests from the cache
110 GetS, desc="Request for read-only data from cache";
111 GetM, desc="Request for read-write data from cache";
112
113 // Writeback requests from the cache
114 PutSNotLast, desc="PutS and the block has other sharers";
115 PutSLast, desc="PutS and the block has no other sharers";
116 PutMOwner, desc="Dirty data writeback from the owner";
117 PutMNonOwner, desc="Dirty data writeback from non-owner";
118
119 // Cache responses
120 Data, desc="Response to fwd request with data";
121
122 // From Memory
123 MemData, desc="Data from memory";
124 MemAck, desc="Ack from memory that write is complete";
125 }
126
127 // NOTE: We use a netdest for the sharers and the owner so we can simply
128 // copy the structure into the message we send as a response.
129 structure(Entry, desc="...", interface="AbstractCacheEntry", main="false") {
130 State DirState, desc="Directory state";
131 NetDest Sharers, desc="Sharers for this block";
132 NetDest Owner, desc="Owner of this block";
133 }
134
135 Tick clockEdge();
136
137 // This either returns the valid directory entry, or, if it hasn't been
138 // allocated yet, this allocates the entry. This may save some host memory
139 // since this is lazily populated.
140 Entry getDirectoryEntry(Addr addr), return_by_pointer = "yes" {
141 Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
142 if (is_invalid(dir_entry)) {
143 // This first time we see this address allocate an entry for it.
144 dir_entry := static_cast(Entry, "pointer",
145 directory.allocate(addr, new Entry));
146 }
147 return dir_entry;
148 }
149
150 /*************************************************************************/
151 // Functions that we need to define/override to use our specific structures
152 // in this implementation.
153 // NOTE: we don't have TBE in this machine, so we don't need to pass it
154 // to these overridden functions.
155
156 State getState(Addr addr) {
157 if (directory.isPresent(addr)) {
158 return getDirectoryEntry(addr).DirState;
159 } else {
160 return State:I;
161 }
162 }
163
164 void setState(Addr addr, State state) {
165 if (directory.isPresent(addr)) {
166 if (state == State:M) {
167 DPRINTF(RubySlicc, "Owner %s\n", getDirectoryEntry(addr).Owner);
168 assert(getDirectoryEntry(addr).Owner.count() == 1);
169 assert(getDirectoryEntry(addr).Sharers.count() == 0);
170 }
171 getDirectoryEntry(addr).DirState := state;
172 if (state == State:I) {
173 assert(getDirectoryEntry(addr).Owner.count() == 0);
174 assert(getDirectoryEntry(addr).Sharers.count() == 0);
175 }
176 }
177 }
178
179 // This is really the access permissions of memory.
180 // TODO: I don't understand this at the directory.
181 AccessPermission getAccessPermission(Addr addr) {
182 if (directory.isPresent(addr)) {
183 Entry e := getDirectoryEntry(addr);
184 return Directory_State_to_permission(e.DirState);
185 } else {
186 return AccessPermission:NotPresent;
187 }
188 }
189 void setAccessPermission(Addr addr, State state) {
190 if (directory.isPresent(addr)) {
191 Entry e := getDirectoryEntry(addr);
192 e.changePermission(Directory_State_to_permission(state));
193 }
194 }
195
196 void functionalRead(Addr addr, Packet *pkt) {
197 functionalMemoryRead(pkt);
198 }
199
200 // This returns the number of writes. So, if we write then return 1
201 int functionalWrite(Addr addr, Packet *pkt) {
202 if (functionalMemoryWrite(pkt)) {
203 return 1;
204 } else {
205 return 0;
206 }
207 }
208
209
210 /*************************************************************************/
211 // Network ports
212
213 out_port(forward_out, RequestMsg, forwardToCache);
214 out_port(response_out, ResponseMsg, responseToCache);
215 out_port(memQueue_out, MemoryMsg, requestToMemory);
216
217 in_port(memQueue_in, MemoryMsg, responseFromMemory) {
218 if (memQueue_in.isReady(clockEdge())) {
219 peek(memQueue_in, MemoryMsg) {
220 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
221 trigger(Event:MemData, in_msg.addr);
222 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
223 trigger(Event:MemAck, in_msg.addr);
224 } else {
225 error("Invalid message");
226 }
227 }
228 }
229 }
230
231 in_port(response_in, ResponseMsg, responseFromCache) {
232 if (response_in.isReady(clockEdge())) {
233 peek(response_in, ResponseMsg) {
234 if (in_msg.Type == CoherenceResponseType:Data) {
235 trigger(Event:Data, in_msg.addr);
236 } else {
237 error("Unexpected message type.");
238 }
239 }
240 }
241 }
242
243 in_port(request_in, RequestMsg, requestFromCache) {
244 if (request_in.isReady(clockEdge())) {
245 peek(request_in, RequestMsg) {
246 Entry entry := getDirectoryEntry(in_msg.addr);
247 if (in_msg.Type == CoherenceRequestType:GetS) {
248 // NOTE: Since we don't have a TBE in this machine, there
249 // is no need to pass a TBE into trigger. Also, for the
250 // directory there is no cache entry.
251 trigger(Event:GetS, in_msg.addr);
252 } else if (in_msg.Type == CoherenceRequestType:GetM) {
253 trigger(Event:GetM, in_msg.addr);
254 } else if (in_msg.Type == CoherenceRequestType:PutS) {
255 assert(is_valid(entry));
256 // If there is only a single sharer (i.e., the requestor)
257 if (entry.Sharers.count() == 1) {
258 assert(entry.Sharers.isElement(in_msg.Requestor));
259 trigger(Event:PutSLast, in_msg.addr);
260 } else {
261 trigger(Event:PutSNotLast, in_msg.addr);
262 }
263 } else if (in_msg.Type == CoherenceRequestType:PutM) {
264 assert(is_valid(entry));
265 if (entry.Owner.isElement(in_msg.Requestor)) {
266 trigger(Event:PutMOwner, in_msg.addr);
267 } else {
268 trigger(Event:PutMNonOwner, in_msg.addr);
269 }
270 } else {
271 error("Unexpected message type.");
272 }
273 }
274 }
275 }
276
277
278
279 /*************************************************************************/
280 // Actions
281
282 // Memory actions.
283
284 action(sendMemRead, "r", desc="Send a memory read request") {
285 peek(request_in, RequestMsg) {
286 // Send request through special memory request queue. At some
287 // point the response will be on the memory response queue.
288 // Like enqueue, this takes a latency for the request.
289 enqueue(memQueue_out, MemoryMsg, toMemLatency) {
290 out_msg.addr := address;
291 out_msg.Type := MemoryRequestType:MEMORY_READ;
292 out_msg.Sender := in_msg.Requestor;
293 out_msg.MessageSize := MessageSizeType:Request_Control;
294 out_msg.Len := 0;
295 }
296 }
297 }
298
299 action(sendDataToMem, "w", desc="Write data to memory") {
300 peek(request_in, RequestMsg) {
301 DPRINTF(RubySlicc, "Writing memory for %#x\n", address);
302 DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk);
303 enqueue(memQueue_out, MemoryMsg, toMemLatency) {
304 out_msg.addr := address;
305 out_msg.Type := MemoryRequestType:MEMORY_WB;
306 out_msg.Sender := in_msg.Requestor;
307 out_msg.MessageSize := MessageSizeType:Writeback_Data;
308 out_msg.DataBlk := in_msg.DataBlk;
309 out_msg.Len := 0;
310 }
311 }
312 }
313
314 action(sendRespDataToMem, "rw", desc="Write data to memory from resp") {
315 peek(response_in, ResponseMsg) {
316 DPRINTF(RubySlicc, "Writing memory for %#x\n", address);
317 DPRINTF(RubySlicc, "Writing %s\n", in_msg.DataBlk);
318 enqueue(memQueue_out, MemoryMsg, toMemLatency) {
319 out_msg.addr := address;
320 out_msg.Type := MemoryRequestType:MEMORY_WB;
321 out_msg.Sender := in_msg.Sender;
322 out_msg.MessageSize := MessageSizeType:Writeback_Data;
323 out_msg.DataBlk := in_msg.DataBlk;
324 out_msg.Len := 0;
325 }
326 }
327 }
328
329 // Sharer/owner actions
330
331 action(addReqToSharers, "aS", desc="Add requestor to sharer list") {
332 peek(request_in, RequestMsg) {
333 getDirectoryEntry(address).Sharers.add(in_msg.Requestor);
334 }
335 }
336
337 action(setOwner, "sO", desc="Set the owner") {
338 peek(request_in, RequestMsg) {
339 getDirectoryEntry(address).Owner.add(in_msg.Requestor);
340 }
341 }
342
343 action(addOwnerToSharers, "oS", desc="Add the owner to sharers") {
344 Entry e := getDirectoryEntry(address);
345 assert(e.Owner.count() == 1);
346 e.Sharers.addNetDest(e.Owner);
347 }
348
349 action(removeReqFromSharers, "rS", desc="Remove requestor from sharers") {
350 peek(request_in, RequestMsg) {
351 getDirectoryEntry(address).Sharers.remove(in_msg.Requestor);
352 }
353 }
354
355 action(clearSharers, "cS", desc="Clear the sharer list") {
356 getDirectoryEntry(address).Sharers.clear();
357 }
358
359 action(clearOwner, "cO", desc="Clear the owner") {
360 getDirectoryEntry(address).Owner.clear();
361 }
362
363 // Invalidates and forwards
364
365 action(sendInvToSharers, "i", desc="Send invalidate to all sharers") {
366 peek(request_in, RequestMsg) {
367 enqueue(forward_out, RequestMsg, 1) {
368 out_msg.addr := address;
369 out_msg.Type := CoherenceRequestType:Inv;
370 out_msg.Requestor := in_msg.Requestor;
371 out_msg.Destination := getDirectoryEntry(address).Sharers;
372 out_msg.MessageSize := MessageSizeType:Control;
373 }
374 }
375 }
376
377 action(sendFwdGetS, "fS", desc="Send forward getS to owner") {
378 assert(getDirectoryEntry(address).Owner.count() == 1);
379 peek(request_in, RequestMsg) {
380 enqueue(forward_out, RequestMsg, 1) {
381 out_msg.addr := address;
382 out_msg.Type := CoherenceRequestType:GetS;
383 out_msg.Requestor := in_msg.Requestor;
384 out_msg.Destination := getDirectoryEntry(address).Owner;
385 out_msg.MessageSize := MessageSizeType:Control;
386 }
387 }
388 }
389
390 action(sendFwdGetM, "fM", desc="Send forward getM to owner") {
391 assert(getDirectoryEntry(address).Owner.count() == 1);
392 peek(request_in, RequestMsg) {
393 enqueue(forward_out, RequestMsg, 1) {
394 out_msg.addr := address;
395 out_msg.Type := CoherenceRequestType:GetM;
396 out_msg.Requestor := in_msg.Requestor;
397 out_msg.Destination := getDirectoryEntry(address).Owner;
398 out_msg.MessageSize := MessageSizeType:Control;
399 }
400 }
401 }
402
403 // Responses to requests
404
405 // This also needs to send along the number of sharers!!!!
406 action(sendDataToReq, "d", desc="Send data from memory to requestor. ") {
407 //"May need to send sharer number, too") {
408 peek(memQueue_in, MemoryMsg) {
409 enqueue(response_out, ResponseMsg, 1) {
410 out_msg.addr := address;
411 out_msg.Type := CoherenceResponseType:Data;
412 out_msg.Sender := machineID;
413 out_msg.Destination.add(in_msg.OriginalRequestorMachId);
414 out_msg.DataBlk := in_msg.DataBlk;
415 out_msg.MessageSize := MessageSizeType:Data;
416 Entry e := getDirectoryEntry(address);
417 // Only need to include acks if we are the owner.
418 if (e.Owner.isElement(in_msg.OriginalRequestorMachId)) {
419 out_msg.Acks := e.Sharers.count();
420 } else {
421 out_msg.Acks := 0;
422 }
423 assert(out_msg.Acks >= 0);
424 }
425 }
426 }
427
428 action(sendPutAck, "a", desc="Send the put ack") {
429 peek(request_in, RequestMsg) {
430 enqueue(forward_out, RequestMsg, 1) {
431 out_msg.addr := address;
432 out_msg.Type := CoherenceRequestType:PutAck;
433 out_msg.Requestor := machineID;
434 out_msg.Destination.add(in_msg.Requestor);
435 out_msg.MessageSize := MessageSizeType:Control;
436 }
437 }
438 }
439
440 // Queue management
441
442 action(popResponseQueue, "pR", desc="Pop the response queue") {
443 response_in.dequeue(clockEdge());
444 }
445
446 action(popRequestQueue, "pQ", desc="Pop the request queue") {
447 request_in.dequeue(clockEdge());
448 }
449
450 action(popMemQueue, "pM", desc="Pop the memory queue") {
451 memQueue_in.dequeue(clockEdge());
452 }
453
454 // Stalling actions
455 action(stall, "z", desc="Stall the incoming request") {
456 // Do nothing.
457 }
458
459
460 /*************************************************************************/
461 // transitions
462
463 transition({I, S}, GetS, S_m) {
464 sendMemRead;
465 addReqToSharers;
466 popRequestQueue;
467 }
468
469 transition(I, {PutSNotLast, PutSLast, PutMNonOwner}) {
470 sendPutAck;
471 popRequestQueue;
472 }
473
474 transition(S_m, MemData, S) {
475 sendDataToReq;
476 popMemQueue;
477 }
478
479 transition(I, GetM, M_m) {
480 sendMemRead;
481 setOwner;
482 popRequestQueue;
483 }
484
485 transition(M_m, MemData, M) {
486 sendDataToReq;
487 clearSharers; // NOTE: This isn't *required* in some cases.
488 popMemQueue;
489 }
490
491 transition(S, GetM, M_m) {
492 sendMemRead;
493 removeReqFromSharers;
494 sendInvToSharers;
495 setOwner;
496 popRequestQueue;
497 }
498
499 transition({S, S_D, SS_m, S_m}, {PutSNotLast, PutMNonOwner}) {
500 removeReqFromSharers;
501 sendPutAck;
502 popRequestQueue;
503 }
504
505 transition(S, PutSLast, I) {
506 removeReqFromSharers;
507 sendPutAck;
508 popRequestQueue;
509 }
510
511 transition(M, GetS, S_D) {
512 sendFwdGetS;
513 addReqToSharers;
514 addOwnerToSharers;
515 clearOwner;
516 popRequestQueue;
517 }
518
519 transition(M, GetM) {
520 sendFwdGetM;
521 clearOwner;
522 setOwner;
523 popRequestQueue;
524 }
525
526 transition({M, M_m, MI_m}, {PutSNotLast, PutSLast, PutMNonOwner}) {
527 sendPutAck;
528 popRequestQueue;
529 }
530
531 transition(M, PutMOwner, MI_m) {
532 sendDataToMem;
533 clearOwner;
534 sendPutAck;
535 popRequestQueue;
536 }
537
538 transition(MI_m, MemAck, I) {
539 popMemQueue;
540 }
541
542 transition(S_D, {GetS, GetM}) {
543 stall;
544 }
545
546 transition(S_D, PutSLast) {
547 removeReqFromSharers;
548 sendPutAck;
549 popRequestQueue;
550 }
551
552 transition(S_D, Data, SS_m) {
553 sendRespDataToMem;
554 popResponseQueue;
555 }
556
557 transition(SS_m, MemAck, S) {
558 popMemQueue;
559 }
560
561 // If we get another request for a block that's waiting on memory,
562 // stall that request.
563 transition({MI_m, SS_m, S_m, M_m}, {GetS, GetM}) {
564 stall;
565 }
566
567 }