ruby: Make ruby #includes use full paths to the files they're including.
[gem5.git] / src / mem / protocol / MOESI_CMP_token-dir.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 */
33
34
35 machine(Directory, "Token protocol") {
36
37 MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false";
38 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false";
39
40 MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true";
41 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false";
42 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false";
43
44 // STATES
45 enumeration(State, desc="Directory states", default="Directory_State_O") {
46 // Base states
47 O, desc="Owner";
48 NO, desc="Not Owner";
49 L, desc="Locked";
50 }
51
52 // Events
53 enumeration(Event, desc="Directory events") {
54 GETX, desc="A GETX arrives";
55 GETS, desc="A GETS arrives";
56 Lockdown, desc="A lockdown request arrives";
57 Unlockdown, desc="An un-lockdown request arrives";
58 Data_Owner, desc="Data arrive";
59 Ack_Owner, desc="Owner token arrived without data because it was clean";
60 Tokens, desc="Tokens arrive";
61 }
62
63 // TYPES
64
65 // DirectoryEntry
66 structure(Entry, desc="...") {
67 State DirectoryState, desc="Directory state";
68 DataBlock DataBlk, desc="data for the block";
69 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
70
71 // The following state is provided to allow for bandwidth
72 // efficient directory-like operation. However all of this state
73 // is 'soft state' that does not need to be correct (as long as
74 // you're eventually willing to resort to broadcast.)
75
76 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
77 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
78 }
79
80 external_type(DirectoryMemory) {
81 Entry lookup(Address);
82 bool isPresent(Address);
83 }
84
85
86 // ** OBJECTS **
87
88 DirectoryMemory directory, constructor_hack="i";
89
90 PersistentTable persistentTable, constructor_hack="i";
91
92 State getState(Address addr) {
93 return directory[addr].DirectoryState;
94 }
95
96 void setState(Address addr, State state) {
97 directory[addr].DirectoryState := state;
98
99 if (state == State:L) {
100 assert(directory[addr].Tokens == 0);
101 }
102
103 // We have one or zero owners
104 assert((directory[addr].Owner.count() == 0) || (directory[addr].Owner.count() == 1));
105
106 // Make sure the token count is in range
107 assert(directory[addr].Tokens >= 0);
108 assert(directory[addr].Tokens <= max_tokens());
109
110 if (state == State:O) {
111 assert(directory[addr].Tokens >= 1); // Must have at least one token
112 // assert(directory[addr].Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
113 }
114 }
115
116 // ** OUT_PORTS **
117 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
118 out_port(requestNetwork_out, RequestMsg, requestFromDir);
119
120 // ** IN_PORTS **
121
122 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
123 if (persistentNetwork_in.isReady()) {
124 peek(persistentNetwork_in, PersistentMsg) {
125 assert(in_msg.Destination.isElement(machineID));
126
127 if (distributedPersistentEnabled()) {
128 // Apply the lockdown or unlockdown message to the table
129 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
130 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
131 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
132 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
133 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
134 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
135 } else {
136 error("Invalid message");
137 }
138
139 // React to the message based on the current state of the table
140 if (persistentTable.isLocked(in_msg.Address)) {
141 trigger(Event:Lockdown, in_msg.Address); // locked
142 } else {
143 trigger(Event:Unlockdown, in_msg.Address); // unlocked
144 }
145 }
146 else {
147 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
148 trigger(Event:Lockdown, in_msg.Address); // locked
149 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
150 trigger(Event:Lockdown, in_msg.Address); // locked
151 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
152 trigger(Event:Unlockdown, in_msg.Address); // unlocked
153 } else {
154 error("Invalid message");
155 }
156 }
157 }
158 }
159 }
160
161 in_port(requestNetwork_in, RequestMsg, requestToDir) {
162 if (requestNetwork_in.isReady()) {
163 peek(requestNetwork_in, RequestMsg) {
164 assert(in_msg.Destination.isElement(machineID));
165 if (in_msg.Type == CoherenceRequestType:GETS) {
166 trigger(Event:GETS, in_msg.Address);
167 } else if (in_msg.Type == CoherenceRequestType:GETX) {
168 trigger(Event:GETX, in_msg.Address);
169 } else {
170 error("Invalid message");
171 }
172 }
173 }
174 }
175
176 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
177 if (responseNetwork_in.isReady()) {
178 peek(responseNetwork_in, ResponseMsg) {
179 assert(in_msg.Destination.isElement(machineID));
180 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
181 trigger(Event:Data_Owner, in_msg.Address);
182 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
183 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
184 trigger(Event:Tokens, in_msg.Address);
185 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
186 trigger(Event:Ack_Owner, in_msg.Address);
187 } else {
188 DEBUG_EXPR(in_msg.Type);
189 error("Invalid message");
190 }
191 }
192 }
193 }
194
195 // Actions
196
197 action(a_sendTokens, "a", desc="Send tokens to requestor") {
198 // Only send a message if we have tokens to send
199 if (directory[address].Tokens > 0) {
200 peek(requestNetwork_in, RequestMsg) {
201 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
202 enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {// FIXME?
203 out_msg.Address := address;
204 out_msg.Type := CoherenceResponseType:ACK;
205 out_msg.Sender := machineID;
206 out_msg.SenderMachine := MachineType:Directory;
207 out_msg.Destination.add(in_msg.Requestor);
208 out_msg.Tokens := directory[in_msg.Address].Tokens;
209 out_msg.MessageSize := MessageSizeType:Response_Control;
210 }
211 }
212 directory[address].Tokens := 0;
213 }
214 }
215
216 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
217 // Only send a message if we have tokens to send
218 if (directory[address].Tokens > 0) {
219 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
220 enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {// FIXME?
221 out_msg.Address := address;
222 out_msg.Type := CoherenceResponseType:ACK;
223 out_msg.Sender := machineID;
224 out_msg.SenderMachine := MachineType:Directory;
225 out_msg.Destination.add(persistentTable.findSmallest(address));
226 out_msg.Tokens := directory[address].Tokens;
227 out_msg.MessageSize := MessageSizeType:Response_Control;
228 }
229 directory[address].Tokens := 0;
230 }
231 }
232
233 action(d_sendDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
234 peek(requestNetwork_in, RequestMsg) {
235 enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
236 out_msg.Address := address;
237 out_msg.Type := CoherenceResponseType:DATA_OWNER;
238 out_msg.Sender := machineID;
239 out_msg.SenderMachine := MachineType:Directory;
240 out_msg.Destination.add(in_msg.Requestor);
241 assert(directory[address].Tokens > 0);
242 out_msg.Tokens := directory[in_msg.Address].Tokens;
243 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
244 out_msg.Dirty := false;
245 out_msg.MessageSize := MessageSizeType:Response_Data;
246 }
247 }
248 directory[address].Tokens := 0;
249 }
250
251 action(dd_sendDataWithAllTokensToStarver, "\d", desc="Send data and tokens to starver") {
252 enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
253 out_msg.Address := address;
254 out_msg.Type := CoherenceResponseType:DATA_OWNER;
255 out_msg.Sender := machineID;
256 out_msg.SenderMachine := MachineType:Directory;
257 out_msg.Destination.add(persistentTable.findSmallest(address));
258 assert(directory[address].Tokens > 0);
259 out_msg.Tokens := directory[address].Tokens;
260 out_msg.DataBlk := directory[address].DataBlk;
261 out_msg.Dirty := false;
262 out_msg.MessageSize := MessageSizeType:Response_Data;
263 }
264 directory[address].Tokens := 0;
265 }
266
267 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
268 peek(responseNetwork_in, ResponseMsg) {
269 assert(in_msg.Tokens >= 1);
270 directory[address].Tokens := directory[address].Tokens + in_msg.Tokens;
271 }
272 }
273
274 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
275 requestNetwork_in.dequeue();
276 }
277
278 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
279 responseNetwork_in.dequeue();
280 }
281
282 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
283 persistentNetwork_in.dequeue();
284 }
285
286 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
287 peek(responseNetwork_in, ResponseMsg) {
288 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
289 DEBUG_EXPR(in_msg.Address);
290 DEBUG_EXPR(in_msg.DataBlk);
291 }
292 }
293
294 action(n_checkIncomingMsg, "n", desc="Check incoming token message") {
295 peek(responseNetwork_in, ResponseMsg) {
296 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
297 assert(in_msg.Dirty == false);
298 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
299 assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
300 }
301 }
302
303 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
304 peek(responseNetwork_in, ResponseMsg) {
305 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
306 out_msg.Address := address;
307 out_msg.Type := in_msg.Type;
308 out_msg.Sender := machineID;
309 out_msg.SenderMachine := MachineType:Directory;
310 out_msg.Destination.add(persistentTable.findSmallest(address));
311 out_msg.Tokens := in_msg.Tokens;
312 out_msg.MessageSize := in_msg.MessageSize;
313 out_msg.DataBlk := in_msg.DataBlk;
314 out_msg.Dirty := in_msg.Dirty;
315 }
316 }
317 }
318
319 action(s_bounceDatalessOwnerToken, "s", desc="Bounce clean owner token to starving processor") {
320 peek(responseNetwork_in, ResponseMsg) {
321 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
322 assert(in_msg.Dirty == false);
323 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
324
325 // NOTE: The following check would not be valid in a real
326 // implementation. We include the data in the "dataless"
327 // message so we can assert the clean data matches the datablock
328 // in memory
329 assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
330
331 // Bounce the message, but "re-associate" the data and the owner
332 // token. In essence we're converting an ACK_OWNER message to a
333 // DATA_OWNER message, keeping the number of tokens the same.
334 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
335 out_msg.Address := address;
336 out_msg.Type := CoherenceResponseType:DATA_OWNER;
337 out_msg.Sender := machineID;
338 out_msg.SenderMachine := MachineType:Directory;
339 out_msg.DestMachine := MachineType:L1Cache;
340 out_msg.Destination.add(persistentTable.findSmallest(address));
341 out_msg.Tokens := in_msg.Tokens;
342 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
343 out_msg.Dirty := in_msg.Dirty;
344 out_msg.MessageSize := MessageSizeType:Response_Data;
345 }
346 }
347 }
348
349
350 // TRANSITIONS
351
352 // Trans. from O
353 transition(O, GETX, NO) {
354 d_sendDataWithAllTokens;
355 j_popIncomingRequestQueue;
356 }
357
358 transition(O, GETS, NO) {
359 d_sendDataWithAllTokens;
360 j_popIncomingRequestQueue;
361 }
362
363 transition(O, Lockdown, L) {
364 dd_sendDataWithAllTokensToStarver;
365 l_popIncomingPersistentQueue;
366 }
367
368 transition(O, Tokens) {
369 f_incrementTokens;
370 k_popIncomingResponseQueue;
371 }
372
373 // Trans. from NO
374 transition(NO, GETX) {
375 a_sendTokens;
376 j_popIncomingRequestQueue;
377 }
378
379 transition(NO, GETS) {
380 j_popIncomingRequestQueue;
381 }
382
383 transition(NO, Lockdown, L) {
384 aa_sendTokensToStarver;
385 l_popIncomingPersistentQueue;
386 }
387
388 transition(NO, Data_Owner, O) {
389 m_writeDataToMemory;
390 f_incrementTokens;
391 k_popIncomingResponseQueue;
392 }
393
394 transition(NO, Ack_Owner, O) {
395 n_checkIncomingMsg;
396 f_incrementTokens;
397 k_popIncomingResponseQueue;
398 }
399
400 transition(NO, Tokens) {
401 f_incrementTokens;
402 k_popIncomingResponseQueue;
403 }
404
405 // Trans. from L
406 transition(L, {GETX, GETS}) {
407 j_popIncomingRequestQueue;
408 }
409
410 transition(L, Lockdown) {
411 l_popIncomingPersistentQueue;
412 }
413
414 // we could change this to write the data to memory and send it cleanly
415 transition(L, Data_Owner) {
416 r_bounceResponse;
417 k_popIncomingResponseQueue;
418 }
419
420 transition(L, Tokens) {
421 r_bounceResponse;
422 k_popIncomingResponseQueue;
423 }
424
425 transition(L, Ack_Owner) {
426 s_bounceDatalessOwnerToken;
427 k_popIncomingResponseQueue;
428 }
429
430
431 transition(L, Unlockdown, NO) {
432 l_popIncomingPersistentQueue;
433 }
434
435 }