3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 machine(Directory, "Token protocol") {
37 MessageBuffer requestFromDir, network="To", virtual_network="1", ordered="false";
38 MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false";
40 MessageBuffer persistentToDir, network="From", virtual_network="3", ordered="true";
41 MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false";
42 MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false";
45 enumeration(State, desc="Directory states", default="Directory_State_O") {
53 enumeration(Event, desc="Directory events") {
54 GETX, desc="A GETX arrives";
55 GETS, desc="A GETS arrives";
56 Lockdown, desc="A lockdown request arrives";
57 Unlockdown, desc="An un-lockdown request arrives";
58 Data_Owner, desc="Data arrive";
59 Ack_Owner, desc="Owner token arrived without data because it was clean";
60 Tokens, desc="Tokens arrive";
66 structure(Entry, desc="...") {
67 State DirectoryState, desc="Directory state";
68 DataBlock DataBlk, desc="data for the block";
69 int Tokens, default="max_tokens()", desc="Number of tokens for the line we're holding";
71 // The following state is provided to allow for bandwidth
72 // efficient directory-like operation. However all of this state
73 // is 'soft state' that does not need to be correct (as long as
74 // you're eventually willing to resort to broadcast.)
76 Set Owner, desc="Probable Owner of the line. More accurately, the set of processors who need to see a GetS or GetO. We use a Set for convenience, but only one bit is set at a time.";
77 Set Sharers, desc="Probable sharers of the line. More accurately, the set of processors who need to see a GetX";
80 external_type(DirectoryMemory) {
81 Entry lookup(Address);
82 bool isPresent(Address);
88 DirectoryMemory directory, constructor_hack="i";
90 PersistentTable persistentTable, constructor_hack="i";
92 State getState(Address addr) {
93 return directory[addr].DirectoryState;
96 void setState(Address addr, State state) {
97 directory[addr].DirectoryState := state;
99 if (state == State:L) {
100 assert(directory[addr].Tokens == 0);
103 // We have one or zero owners
104 assert((directory[addr].Owner.count() == 0) || (directory[addr].Owner.count() == 1));
106 // Make sure the token count is in range
107 assert(directory[addr].Tokens >= 0);
108 assert(directory[addr].Tokens <= max_tokens());
110 if (state == State:O) {
111 assert(directory[addr].Tokens >= 1); // Must have at least one token
112 // assert(directory[addr].Tokens >= (max_tokens() / 2)); // Only mostly true; this might not always hold
117 out_port(responseNetwork_out, ResponseMsg, responseFromDir);
118 out_port(requestNetwork_out, RequestMsg, requestFromDir);
122 in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
123 if (persistentNetwork_in.isReady()) {
124 peek(persistentNetwork_in, PersistentMsg) {
125 assert(in_msg.Destination.isElement(machineID));
127 if (distributedPersistentEnabled()) {
128 // Apply the lockdown or unlockdown message to the table
129 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
130 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Write);
131 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
132 persistentTable.persistentRequestLock(in_msg.Address, in_msg.Requestor, AccessType:Read);
133 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
134 persistentTable.persistentRequestUnlock(in_msg.Address, in_msg.Requestor);
136 error("Invalid message");
139 // React to the message based on the current state of the table
140 if (persistentTable.isLocked(in_msg.Address)) {
141 trigger(Event:Lockdown, in_msg.Address); // locked
143 trigger(Event:Unlockdown, in_msg.Address); // unlocked
147 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
148 trigger(Event:Lockdown, in_msg.Address); // locked
149 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
150 trigger(Event:Lockdown, in_msg.Address); // locked
151 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
152 trigger(Event:Unlockdown, in_msg.Address); // unlocked
154 error("Invalid message");
161 in_port(requestNetwork_in, RequestMsg, requestToDir) {
162 if (requestNetwork_in.isReady()) {
163 peek(requestNetwork_in, RequestMsg) {
164 assert(in_msg.Destination.isElement(machineID));
165 if (in_msg.Type == CoherenceRequestType:GETS) {
166 trigger(Event:GETS, in_msg.Address);
167 } else if (in_msg.Type == CoherenceRequestType:GETX) {
168 trigger(Event:GETX, in_msg.Address);
170 error("Invalid message");
176 in_port(responseNetwork_in, ResponseMsg, responseToDir) {
177 if (responseNetwork_in.isReady()) {
178 peek(responseNetwork_in, ResponseMsg) {
179 assert(in_msg.Destination.isElement(machineID));
180 if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
181 trigger(Event:Data_Owner, in_msg.Address);
182 } else if ((in_msg.Type == CoherenceResponseType:ACK) ||
183 (in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
184 trigger(Event:Tokens, in_msg.Address);
185 } else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
186 trigger(Event:Ack_Owner, in_msg.Address);
188 DEBUG_EXPR(in_msg.Type);
189 error("Invalid message");
197 action(a_sendTokens, "a", desc="Send tokens to requestor") {
198 // Only send a message if we have tokens to send
199 if (directory[address].Tokens > 0) {
200 peek(requestNetwork_in, RequestMsg) {
201 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
202 enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {// FIXME?
203 out_msg.Address := address;
204 out_msg.Type := CoherenceResponseType:ACK;
205 out_msg.Sender := machineID;
206 out_msg.SenderMachine := MachineType:Directory;
207 out_msg.Destination.add(in_msg.Requestor);
208 out_msg.Tokens := directory[in_msg.Address].Tokens;
209 out_msg.MessageSize := MessageSizeType:Response_Control;
212 directory[address].Tokens := 0;
216 action(aa_sendTokensToStarver, "\a", desc="Send tokens to starver") {
217 // Only send a message if we have tokens to send
218 if (directory[address].Tokens > 0) {
219 // enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_CACHE_LATENCY") {// FIXME?
220 enqueue(responseNetwork_out, ResponseMsg, latency="DIRECTORY_LATENCY") {// FIXME?
221 out_msg.Address := address;
222 out_msg.Type := CoherenceResponseType:ACK;
223 out_msg.Sender := machineID;
224 out_msg.SenderMachine := MachineType:Directory;
225 out_msg.Destination.add(persistentTable.findSmallest(address));
226 out_msg.Tokens := directory[address].Tokens;
227 out_msg.MessageSize := MessageSizeType:Response_Control;
229 directory[address].Tokens := 0;
233 action(d_sendDataWithAllTokens, "d", desc="Send data and tokens to requestor") {
234 peek(requestNetwork_in, RequestMsg) {
235 enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
236 out_msg.Address := address;
237 out_msg.Type := CoherenceResponseType:DATA_OWNER;
238 out_msg.Sender := machineID;
239 out_msg.SenderMachine := MachineType:Directory;
240 out_msg.Destination.add(in_msg.Requestor);
241 assert(directory[address].Tokens > 0);
242 out_msg.Tokens := directory[in_msg.Address].Tokens;
243 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
244 out_msg.Dirty := false;
245 out_msg.MessageSize := MessageSizeType:Response_Data;
248 directory[address].Tokens := 0;
251 action(dd_sendDataWithAllTokensToStarver, "\d", desc="Send data and tokens to starver") {
252 enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
253 out_msg.Address := address;
254 out_msg.Type := CoherenceResponseType:DATA_OWNER;
255 out_msg.Sender := machineID;
256 out_msg.SenderMachine := MachineType:Directory;
257 out_msg.Destination.add(persistentTable.findSmallest(address));
258 assert(directory[address].Tokens > 0);
259 out_msg.Tokens := directory[address].Tokens;
260 out_msg.DataBlk := directory[address].DataBlk;
261 out_msg.Dirty := false;
262 out_msg.MessageSize := MessageSizeType:Response_Data;
264 directory[address].Tokens := 0;
267 action(f_incrementTokens, "f", desc="Increment the number of tokens we're tracking") {
268 peek(responseNetwork_in, ResponseMsg) {
269 assert(in_msg.Tokens >= 1);
270 directory[address].Tokens := directory[address].Tokens + in_msg.Tokens;
274 action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
275 requestNetwork_in.dequeue();
278 action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
279 responseNetwork_in.dequeue();
282 action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
283 persistentNetwork_in.dequeue();
286 action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
287 peek(responseNetwork_in, ResponseMsg) {
288 directory[in_msg.Address].DataBlk := in_msg.DataBlk;
289 DEBUG_EXPR(in_msg.Address);
290 DEBUG_EXPR(in_msg.DataBlk);
294 action(n_checkIncomingMsg, "n", desc="Check incoming token message") {
295 peek(responseNetwork_in, ResponseMsg) {
296 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
297 assert(in_msg.Dirty == false);
298 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
299 assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
303 action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
304 peek(responseNetwork_in, ResponseMsg) {
305 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
306 out_msg.Address := address;
307 out_msg.Type := in_msg.Type;
308 out_msg.Sender := machineID;
309 out_msg.SenderMachine := MachineType:Directory;
310 out_msg.Destination.add(persistentTable.findSmallest(address));
311 out_msg.Tokens := in_msg.Tokens;
312 out_msg.MessageSize := in_msg.MessageSize;
313 out_msg.DataBlk := in_msg.DataBlk;
314 out_msg.Dirty := in_msg.Dirty;
319 action(s_bounceDatalessOwnerToken, "s", desc="Bounce clean owner token to starving processor") {
320 peek(responseNetwork_in, ResponseMsg) {
321 assert(in_msg.Type == CoherenceResponseType:ACK_OWNER);
322 assert(in_msg.Dirty == false);
323 assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
325 // NOTE: The following check would not be valid in a real
326 // implementation. We include the data in the "dataless"
327 // message so we can assert the clean data matches the datablock
329 assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
331 // Bounce the message, but "re-associate" the data and the owner
332 // token. In essence we're converting an ACK_OWNER message to a
333 // DATA_OWNER message, keeping the number of tokens the same.
334 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
335 out_msg.Address := address;
336 out_msg.Type := CoherenceResponseType:DATA_OWNER;
337 out_msg.Sender := machineID;
338 out_msg.SenderMachine := MachineType:Directory;
339 out_msg.DestMachine := MachineType:L1Cache;
340 out_msg.Destination.add(persistentTable.findSmallest(address));
341 out_msg.Tokens := in_msg.Tokens;
342 out_msg.DataBlk := directory[in_msg.Address].DataBlk;
343 out_msg.Dirty := in_msg.Dirty;
344 out_msg.MessageSize := MessageSizeType:Response_Data;
353 transition(O, GETX, NO) {
354 d_sendDataWithAllTokens;
355 j_popIncomingRequestQueue;
358 transition(O, GETS, NO) {
359 d_sendDataWithAllTokens;
360 j_popIncomingRequestQueue;
363 transition(O, Lockdown, L) {
364 dd_sendDataWithAllTokensToStarver;
365 l_popIncomingPersistentQueue;
368 transition(O, Tokens) {
370 k_popIncomingResponseQueue;
374 transition(NO, GETX) {
376 j_popIncomingRequestQueue;
379 transition(NO, GETS) {
380 j_popIncomingRequestQueue;
383 transition(NO, Lockdown, L) {
384 aa_sendTokensToStarver;
385 l_popIncomingPersistentQueue;
388 transition(NO, Data_Owner, O) {
391 k_popIncomingResponseQueue;
394 transition(NO, Ack_Owner, O) {
397 k_popIncomingResponseQueue;
400 transition(NO, Tokens) {
402 k_popIncomingResponseQueue;
406 transition(L, {GETX, GETS}) {
407 j_popIncomingRequestQueue;
410 transition(L, Lockdown) {
411 l_popIncomingPersistentQueue;
414 // we could change this to write the data to memory and send it cleanly
415 transition(L, Data_Owner) {
417 k_popIncomingResponseQueue;
420 transition(L, Tokens) {
422 k_popIncomingResponseQueue;
425 transition(L, Ack_Owner) {
426 s_bounceDatalessOwnerToken;
427 k_popIncomingResponseQueue;
431 transition(L, Unlockdown, NO) {
432 l_popIncomingPersistentQueue;