ruby: Make ruby #includes use full paths to the files they're including.
[gem5.git] / src / mem / protocol / MOESI_SMP_token-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOESI_token-dir.sm 1.5 04/11/17 14:07:50-06:00 mikem@emperor15.cs.wisc.edu $
32 */
33
34 machine(Directory, "Token protocol") {
35
36 MessageBuffer responseFromDir, network="To", virtual_network="0", ordered="false";
37
38 MessageBuffer responseToDir, network="From", virtual_network="0", ordered="false";
39 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false";
40 MessageBuffer persistentToDir, network="From", virtual_network="2", ordered="true";
41
42
43 // STATES
44 enumeration(State, desc="Directory states", default="Directory_State_O") {
45 // Base states
46 O, desc="Owner";
47 NO, desc="Not Owner";
48 L, desc="Locked";
49 }
50
51 // Events
52 enumeration(Event, desc="Directory events") {
53 GETX, desc="A GETX arrives";
54 GETS, desc="A GETS arrives";
55 Lockdown, desc="A lockdown request arrives";
56 Unlockdown, desc="An un-lockdown request arrives";
57 Data_Owner, desc="Data arrive, includes the owner token";
58 Data_Shared, desc="Data arrive, does not include the owner token";
59 Ack, desc="Tokens arrive";
60 Ack_Owner, desc="Tokens arrive, including the owner token";
61 }
62
63 // TYPES
64
65 // DirectoryEntry
66 structure(Entry, desc="...") {
67 State DirectoryState, desc="Directory state";
68 DataBlock DataBlk, desc="data for the block";
69 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
70 }
71
72 external_type(DirectoryMemory) {
73 Entry lookup(Address);
74 bool isPresent(Address);
75 }
76
77 // ** OBJECTS **
78
79 DirectoryMemory directory, constructor_hack="i";
80
81 PersistentTable persistentTable, constructor_hack="i";
82
83 State getState(Address addr) {
84 return directory[addr].DirectoryState;
85 }
86
87 void setState(Address addr, State state) {
88 directory[addr].DirectoryState := state;
89
90 if (state == State:L) {
91 assert(directory[addr].Tokens == 0);
92 }
93
94 // Make sure the token count is in range
95 assert(directory[addr].Tokens >= 0);
96 assert(directory[addr].Tokens <= max_tokens());
97
98 if (state == State:O) {
99 assert(directory[addr].Tokens >= 1); // Must have at least one token
100 assert(directory[addr].Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
101 }
102 }
103
104 // ** OUT_PORTS **
105 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
106
107 // ** IN_PORTS **
108
109 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
110 if (persistentNetwork_in.isReady()) {
111 peek(persistentNetwork_in, PersistentMsg) {
112
113 // Apply the lockdown or unlockdown message to the table
114 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
115 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
116 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
117 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
118 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
119 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
120 } else {
121 error("Invalid message");
122 }
123
124 // React to the message based on the current state of the table
125 if (persistentTable.isLocked(in_msg.Address)) {
126 trigger(Event:Lockdown, in_msg.Address); // locked
127 } else {
128 trigger(Event:Unlockdown, in_msg.Address); // unlocked
129 }
130 }
131 }
132 }
133
134 in_port(requestNetwork_in, RequestMsg, requestToDir) {
135 if (requestNetwork_in.isReady()) {
136 peek(requestNetwork_in, RequestMsg) {
137 if (in_msg.Type == CoherenceRequestType:GETS) {
138 trigger(Event:GETS, in_msg.Address);
139 } else if (in_msg.Type == CoherenceRequestType:GETX) {
140 trigger(Event:GETX, in_msg.Address);
141 } else {
142 error("Invalid message");
143 }
144 }
145 }
146 }
147
148 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
149 if (responseNetwork_in.isReady()) {
150 peek(responseNetwork_in, ResponseMsg) {
151 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
152 trigger(Event:Data_Owner, in_msg.Address);
153 } else if (in_msg.Type == CoherenceResponseType:ACK) {
154 trigger(Event:Ack, in_msg.Address);
155 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
156 trigger(Event:Data_Shared, in_msg.Address);
157 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
158 trigger(Event:Ack_Owner, in_msg.Address);
159 } else {
160 error("Invalid message");
161 }
162 }
163 }
164 }
165
166
167 // Actions
168
169 action(a_sendTokens, "a", desc="Send tokens to requestor") {
170 // Only send a message if we have tokens to send
171 if (directory[address].Tokens > 0) {
172 peek(requestNetwork_in, RequestMsg) {
173 enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
174 out_msg.Address := address;
175 out_msg.Type := CoherenceResponseType:ACK;
176 out_msg.Sender := machineID;
177 out_msg.SenderMachine := MachineType:Directory;
178 out_msg.Destination.add(in_msg.Requestor);
179 out_msg.DestMachine := MachineType:L1Cache;
180 out_msg.Tokens := directory[in_msg.Address].Tokens;
181 out_msg.MessageSize := MessageSizeType:Response_Control;
182 }
183 }
184 directory[address].Tokens := 0;
185 }
186 }
187
188 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
189 // Only send a message if we have tokens to send
190 if (directory[address].Tokens > 0) {
191 enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {
192 out_msg.Address := address;
193 out_msg.Type := CoherenceResponseType:ACK;
194 out_msg.Sender := machineID;
195 out_msg.SenderMachine := MachineType:Directory;
196 out_msg.Destination.add(persistentTable.findSmallest(address));
197 out_msg.DestMachine := MachineType:L1Cache;
198 out_msg.Tokens := directory[address].Tokens;
199 out_msg.MessageSize := MessageSizeType:Response_Control;
200 }
201 directory[address].Tokens := 0;
202 }
203 }
204
205
206 action(d_sendDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
207 peek(requestNetwork_in, RequestMsg) {
208 enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
209 out_msg.Address := address;
210 out_msg.Type := CoherenceResponseType:DATA_OWNER;
211 out_msg.Sender := machineID;
212 out_msg.SenderMachine := MachineType:Directory;
213 out_msg.Destination.add(in_msg.Requestor);
214 out_msg.DestMachine := MachineType:L1Cache;
215 assert(directory[address].Tokens > 0);
216 out_msg.Tokens := directory[in_msg.Address].Tokens;
217 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
218 out_msg.Dirty := false;
219 out_msg.MessageSize := MessageSizeType:Response_Data;
220 }
221 }
222 directory[address].Tokens := 0;
223 }
224
225 action(dd_sendDataWithAllTokensToStarver, "\d", desc="Send data and tokens to starver") {
226 enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
227 out_msg.Address := address;
228 out_msg.Type := CoherenceResponseType:DATA_OWNER;
229 out_msg.Sender := machineID;
230 out_msg.SenderMachine := MachineType:Directory;
231 out_msg.Destination.add(persistentTable.findSmallest(address));
232 out_msg.DestMachine := MachineType:L1Cache;
233 assert(directory[address].Tokens > 0);
234 out_msg.Tokens := directory[address].Tokens;
235 out_msg.DataBlk := directory[address].DataBlk;
236 out_msg.Dirty := false;
237 out_msg.MessageSize := MessageSizeType:Response_Data;
238 }
239 directory[address].Tokens := 0;
240 }
241
242 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
243 peek(responseNetwork_in, ResponseMsg) {
244 assert(in_msg.Tokens >= 1);
245 directory[address].Tokens := directory[address].Tokens + in_msg.Tokens;
246 }
247 }
248
249
250 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
251 requestNetwork_in.dequeue();
252 }
253
254 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
255 responseNetwork_in.dequeue();
256 }
257
258 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
259 persistentNetwork_in.dequeue();
260 }
261
262 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
263 peek(responseNetwork_in, ResponseMsg) {
264 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
265 DEBUG_EXPR(in_msg.Address);
266 DEBUG_EXPR(in_msg.DataBlk);
267 }
268 }
269
270 action(n_checkIncomingMsg, "n", desc="Check incoming token message") {
271 peek(responseNetwork_in, ResponseMsg) {
272 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
273 assert(in_msg.Dirty == false);
274 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
275 assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
276 }
277 }
278
279 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
280 peek(responseNetwork_in, ResponseMsg) {
281 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
282 out_msg.Address := address;
283 out_msg.Type := in_msg.Type;
284 out_msg.Sender := machineID;
285 out_msg.SenderMachine := MachineType:Directory;
286 out_msg.Destination.add(persistentTable.findSmallest(address));
287 out_msg.DestMachine := MachineType:L1Cache;
288 out_msg.Tokens := in_msg.Tokens;
289 out_msg.DataBlk := in_msg.DataBlk;
290 out_msg.Dirty := in_msg.Dirty;
291 out_msg.MessageSize := in_msg.MessageSize;
292 }
293 }
294 }
295
296 action(s_bounceDatalessOwnerToken, "s", desc="Bounce clean owner token to starving processor") {
297 peek(responseNetwork_in, ResponseMsg) {
298 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
299 assert(in_msg.Dirty == false);
300 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
301
302 // NOTE: The following check would not be valid in a real
303 // implementation. We include the data in the "dataless"
304 // message so we can assert the clean data matches the datablock
305 // in memory
306 assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
307
308 // Bounce the message, but "re-associate" the data and the owner
309 // token. In essence we're converting an ACK_OWNER message to a
310 // DATA_OWNER message, keeping the number of tokens the same.
311 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
312 out_msg.Address := address;
313 out_msg.Type := CoherenceResponseType:DATA_OWNER;
314 out_msg.Sender := machineID;
315 out_msg.SenderMachine := MachineType:Directory;
316 out_msg.Destination.add(persistentTable.findSmallest(address));
317 out_msg.DestMachine := MachineType:L1Cache;
318 out_msg.Tokens := in_msg.Tokens;
319 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
320 out_msg.Dirty := in_msg.Dirty;
321 out_msg.MessageSize := MessageSizeType:Response_Data;
322 }
323 }
324 }
325
326 // TRANSITIONS
327
328 // Trans. from O
329 transition(O, GETX, NO) {
330 d_sendDataWithAllTokens;
331 j_popIncomingRequestQueue;
332 }
333
334 transition(O, GETS, NO) {
335 d_sendDataWithAllTokens;
336 // Since we found the owner, no need to forward
337 j_popIncomingRequestQueue;
338 }
339
340 transition(O, Lockdown, L) {
341 dd_sendDataWithAllTokensToStarver;
342 l_popIncomingPersistentQueue;
343 }
344
345 transition(O, {Data_Shared, Ack}) {
346 f_incrementTokens;
347 k_popIncomingResponseQueue;
348 }
349
350 // Trans. from NO
351 transition(NO, GETX) {
352 a_sendTokens;
353 j_popIncomingRequestQueue;
354 }
355
356 transition(NO, GETS) {
357 j_popIncomingRequestQueue;
358 }
359
360 transition(NO, Lockdown, L) {
361 aa_sendTokensToStarver;
362 l_popIncomingPersistentQueue;
363 }
364
365 transition(NO, Data_Owner, O) {
366 m_writeDataToMemory;
367 f_incrementTokens;
368 k_popIncomingResponseQueue;
369 }
370
371 transition(NO, Ack_Owner, O) {
372 n_checkIncomingMsg;
373 f_incrementTokens;
374 k_popIncomingResponseQueue;
375 }
376
377 transition(NO, {Data_Shared, Ack}) {
378 f_incrementTokens;
379 k_popIncomingResponseQueue;
380 }
381
382 // Trans. from L
383 transition(L, {GETX, GETS}) {
384 j_popIncomingRequestQueue;
385 }
386
387 transition(L, Lockdown) {
388 l_popIncomingPersistentQueue;
389 }
390
391 transition(L, {Data_Owner, Data_Shared, Ack}) {
392 r_bounceResponse;
393 k_popIncomingResponseQueue;
394 }
395
396 transition(L, Ack_Owner) {
397 s_bounceDatalessOwnerToken;
398 k_popIncomingResponseQueue;
399 }
400
401 transition(L, Unlockdown, NO) {
402 l_popIncomingPersistentQueue;
403 }
404
405 }