ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MSI_MOSI_CMP_directory-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L2Cache, "MOSI Directory L2 Cache CMP") {
36
37 // L2 BANK QUEUES
38 // From local bank of L2 cache TO the network
39 MessageBuffer dummyFrom0, network="To", virtual_network="0", ordered="false"; // dummy buffer that shouldn't be used
40 MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="1", ordered="false"; // this L2 bank -> mod-directory
41 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="2", ordered="true"; // this L2 bank -> a local L1
42 MessageBuffer responseFromL2Cache, network="To", virtual_network="3", ordered="false"; // this L2 bank -> a local L1 || mod-directory
43 MessageBuffer finalAckFromL2Cache, network="To", virtual_network="4", ordered="false"; // this L2 bank -> mod-directory
44
45 // FROM the network to this local bank of L2 cache
46 //MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="true"; // a local L1 -> this L2 bank
47 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="true"; // a local L1 -> this L2 bank
48 MessageBuffer dummyTo1, network="From", virtual_network="1", ordered="false"; // dummy buffer that shouldn't be used
49 MessageBuffer forwardedRequestToL2Cache, network="From", virtual_network="2", ordered="true"; // mod-directory -> this L2 bank
50 MessageBuffer responseToL2Cache, network="From", virtual_network="3", ordered="false"; // a local L1 || mod-directory -> this L2 bank
51 MessageBuffer dummyTo4, network="From", virtual_network="4", ordered="false"; // dummy buffer that shouldn't be used
52
53 // STATES
54 enumeration(State, desc="L2 Cache states", default="L2Cache_State_L2_NP") {
55 // Base states
56 L2_NP, desc="Not present in either cache";
57 L2_I, desc="L2 cache entry Idle";
58 L2_S, desc="L2 cache entry Shared, not present in any local L1s";
59 L2_O, desc="L2 cache entry Owned, not present in any local L1s";
60 L2_M, desc="L2 cache entry Modified, not present in any L1s", format="!b";
61 L2_SS, desc="L2 cache entry Shared, also present in one or more L1s";
62 L2_SO, desc="L2 cache entry Owned, also present in one or more L1s or ext L2s";
63 L2_MT, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
64
65 // Transient States
66
67 // Transient States from I
68 L2_IS, desc="L2 idle, issued GETS, have not seen response yet";
69 L2_ISZ, desc="L2 idle, issued GETS, saw a L1_GETX, have not seen data for GETS yet", format="!b";
70 L2_ISI, desc="L2 idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
71 L2_IMV, desc="L2 idle, issued GETX, valid int L1, have not seen response(s) yet";
72 L2_MV, desc="L2 modified, a valid old L1 copy exist, external world gave write permission";
73 L2_IM, desc="L2 idle, issued GETX, no valid int L1, have not seen response(s) yet";
74 L2_IMO, desc="L2 idle, issued GETX, saw forwarded GETS";
75 L2_IMI, desc="L2 idle, issued GETX, saw forwarded GETX";
76 L2_IMZ, desc="L2 idle, issued GETX, saw another L1_GETX";
77 L2_IMOI, desc="L2 idle, issued GETX, saw GETS, saw forwarded GETX";
78 L2_IMOZ, desc="L2 idle, issued GETX, saw GETS, then a L1_GETX";
79
80 // Invalidation steps for S -> I
81 L2_SIC, desc="L2 shared, L2_INV, valid L1 copies exist, issued invalidates, have not seen responses yet";
82 L2_SIV, desc="L2 shared, L2_Replacement, valid L1 copies exist, issued invalidates, have not seen responses yet";
83
84 // Invalidation steps for M -> I for L2 Repalcement
85 L2_MIV, desc="L2 modified, a valid L1 copy exist, issued forced writeback, have not seen the response yet";
86 L2_MIN, desc="L2 modified, no valid L1 copies, issued PUTX, have not seen response yet";
87
88 // Invalidation steps for M -> I for a Forwarded GetX
89 L2_MIC, desc="L2 modified, a valid L1 copy exist, issued forced writeback, have not seen the response yet";
90
91 // In MT state and see another L1_GETX request
92 L2_MIT, desc="L2 modified, a valid L1 copy exist, saw L1_GETX, issued INV, have not seen the response yet";
93
94 // Downgrade steps for M -> SO
95 L2_MO, desc="L2 modified, a valid L1 copy exist, issued downgrade request, have not seen response yet";
96 L2_MOIC, desc="L2 modified, a valid L1 copy exist, issued downgrade request, saw INV, have not seen response yet";
97 L2_MOICR, desc="L2 modified, a valid L1 copy exist, issued invalidate request, saw INV, have not seen response yet";
98 L2_MOZ, desc="L2 modified, a valid L1 copy exist, issued downgrade request, saw L1_GETX, have not seen response yet";
99
100 // Invalidation steps for O/SO -> I for L2 Replacement
101 L2_OIV, desc="L2 owned, valid L1 copies exist, issued invalidates, have not seen responses yet from L1s";
102 L2_OIN, desc="L2 owned, no valid L1 copies, issued PUTX, have not seen response yet from dir";
103
104 // Invalidation steps for SO -> I for a Forwarded GetX
105 L2_OIC, desc="L2 owned, valid L1 copies exist, issued invalidates, have not seen responses yet from L1s";
106
107 // Strange OM states
108 // Note: strange states, because it is waiting for the line
109 // to be stolen away, or look like it has been stolen away. The
110 // common case is that we see a forward from the directory that is
111 // really from us, we forwarded the data to our dataqueue, and
112 // everythings works fine.
113 L2_OMV, desc="L2 owned and valid L1 copies, issued GETX and invalidates, have not seen responses yet";
114 L2_OM, desc="L2 owned and no valid L1 copies, issued GETX, have not seen response yet";
115 }
116
117 // EVENTS
118 enumeration(Event, desc="L2 Cache events") {
119 // L2 events
120
121 // events initiated by the local L1s
122 L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
123 L1_GETS, desc="a L1D GETS request for a block maped to us";
124 L1_GETX, desc="a L1D GETX request for a block maped to us";
125 L1_UPGRADE, desc="a L1D UPGRADE request for a block maped to us";
126 L1_UPGRADE_no_others, desc="a L1D UPGRADE request for a block maped to us, requestor is the only on-chip sharer";
127 L1_PUTX, desc="a L1D PUTX request for a block maped to us (L1 replacement of a modified block)";
128 L1_PUTX_last, desc="a L1D PUTX request for a block maped to us (L1 replacement of a modified block) last sharer";
129 L1_PUTX_old, desc="an old L1D PUTX request for a block maped to us (L1 replacement of a modified block)";
130 L1_PUTS, desc="a L1 replacement of a shared block", format="!r";
131 L1_PUTS_last, desc="a L1 replacement of the last local L1 shared block", format="!r";
132 L1_PUTS_old, desc="an old L1 replacement of a shared block", format="!r";
133
134 // events of local L1 responses
135 Proc_int_ack, "Proc on-chip L1 Cache ack", desc="Ack from on-chip L1 Cache";
136 Proc_last_int_ack, "Proc last on-chip L1 Cache ack", desc="Last on-chip L1 Cache ack", format="!r";
137
138 Data_int_ack, "Data int ack", desc="Received modified data from L1 now proceed in handling miss";
139
140 // events initiated by the external L2s
141 Forwarded_GETS, "Forwarded GETS", desc="Directory forwards Inter-chip GETS to us";
142 Forwarded_GET_INSTR, "Forwarded GETINSTR", desc="Inter-chip Forwarded GETINSTR";
143 Forwarded_GETX, "Forwarded GETX", desc="Directory forwards Inter-chip GETX to us";
144 L2_INV, "L2_INV", desc="L2 Invalidation initiated from other L2", format="!r";
145
146 // events initiated by this L2
147 L2_Replacement, desc="L2 Replacement", format="!r";
148
149 // events of external L2 responses
150 Proc_ext_ack, "Proc off-chip ack", desc="Ack from off-chip";
151 Proc_last_ext_ack, "Proc last off-chip ack", desc="Last off-chip ack", format="!r";
152
153 Data_ext_ack_0, "Data ack 0", desc="Data with ack count = 0";
154 Data_ext_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
155 // Data_ext_ack_not_0_last: is when the requestor has seen all acks but the directory has not, therefore
156 // the directory must be told that we now have the data
157 Data_ext_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
158
159 Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
160 Dir_exe_ack, "Only copy", desc="Directory tells us we already have exclusive permission, go directly to MT state";
161 }
162
163 // TYPES
164
165 // CacheEntry
166 structure(Entry, desc="...", interface="AbstractCacheEntry") {
167 State CacheState, desc="cache state";
168 NetDest Sharers, desc="tracks the L1 shares on-chip";
169 DataBlock DataBlk, desc="data for the block";
170 }
171
172 // TBE fields
173 structure(TBE, desc="...") {
174 Address Address, desc="Physical address for this TBE";
175 State TBEState, desc="Transient state";
176 DataBlock DataBlk, desc="Buffer for the data block";
177 int NumPendingExtAcks, desc="Number of ext acks that this L2 bank is waiting for";
178 int NumPendingIntAcks, desc="Number of int acks that this L2 bank is waiting for";
179 NetDest Forward_GetS_IDs, desc="Set of the external processors to forward the block";
180 NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
181 MachineID Forward_GetX_ID, desc="ID of the L2 cache to forward the block";
182 MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
183 MachineID InvalidatorID, desc="ID of the L2 cache (needed for L2_SS -> L2_I)";
184 int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
185 bool isPrefetch, desc="Set if this was caused by a prefetch";
186 bool isThreeHop, desc="is this request a three hop";
187 bool validForwardedGetXId, desc="Indicate whether a forwarded GetX ID is valid";
188 bool validInvalidator, desc="Indicate whether an invalidator is valid";
189 bool isInternalRequestOnly, desc="Is internal request only, i.e. only L1s";
190 }
191
192 external_type(CacheMemory) {
193 bool cacheAvail(Address);
194 Address cacheProbe(Address);
195 void allocate(Address);
196 void deallocate(Address);
197 Entry lookup(Address);
198 void changePermission(Address, AccessPermission);
199 bool isTagPresent(Address);
200 void setMRU(Address);
201 }
202
203 external_type(TBETable) {
204 TBE lookup(Address);
205 void allocate(Address);
206 void deallocate(Address);
207 bool isPresent(Address);
208 }
209
210 TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
211
212 CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)';
213
214 // inclusive cache, returns L2 entries only
215 Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
216 return L2cacheMemory[addr];
217 }
218
219 void changeL2Permission(Address addr, AccessPermission permission) {
220 if (L2cacheMemory.isTagPresent(addr)) {
221 return L2cacheMemory.changePermission(addr, permission);
222 }
223 }
224
225 std::string getCoherenceRequestTypeStr(CoherenceRequestType type) {
226 return CoherenceRequestType_to_string(type);
227 }
228
229 bool isL2CacheTagPresent(Address addr) {
230 return (L2cacheMemory.isTagPresent(addr));
231 }
232
233 bool isOneSharerLeft(Address addr, MachineID requestor) {
234 assert(L2cacheMemory[addr].Sharers.isElement(requestor));
235 return (L2cacheMemory[addr].Sharers.count() == 1);
236 }
237
238 bool isSharer(Address addr, MachineID requestor) {
239 if (L2cacheMemory.isTagPresent(addr)) {
240 return L2cacheMemory[addr].Sharers.isElement(requestor);
241 } else {
242 return false;
243 }
244 }
245
246 void addSharer(Address addr, MachineID requestor) {
247 DEBUG_EXPR(machineID);
248 DEBUG_EXPR(requestor);
249 DEBUG_EXPR(addr);
250 assert(map_L1CacheMachId_to_L2Cache(addr, requestor) == machineID);
251 L2cacheMemory[addr].Sharers.add(requestor);
252 }
253
254 State getState(Address addr) {
255 if(L2_TBEs.isPresent(addr)) {
256 return L2_TBEs[addr].TBEState;
257 } else if (isL2CacheTagPresent(addr)) {
258 return getL2CacheEntry(addr).CacheState;
259 }
260 return State:L2_NP;
261 }
262
263 std::string getStateStr(Address addr) {
264 return L2Cache_State_to_string(getState(addr));
265 }
266
267 // when is this called
268 void setState(Address addr, State state) {
269
270 // MUST CHANGE
271 if (L2_TBEs.isPresent(addr)) {
272 L2_TBEs[addr].TBEState := state;
273 }
274
275 if (isL2CacheTagPresent(addr)) {
276 getL2CacheEntry(addr).CacheState := state;
277
278 // Set permission
279 if (state == State:L2_I ||
280 state == State:L2_SIC || state == State:L2_SIV ||
281 state == State:L2_MIV || state == State:L2_MIN || state == State:L2_MIC || state == State:L2_MIT ||
282 state == State:L2_OIV || state == State:L2_OIN || state == State:L2_OIC) {
283 changeL2Permission(addr, AccessPermission:Invalid);
284 } else if (state == State:L2_S || state == State:L2_O || state == State:L2_SS || state == State:L2_SO) {
285 changeL2Permission(addr, AccessPermission:Read_Only);
286 } else if (state == State:L2_OM || state == State:L2_OMV) {
287 changeL2Permission(addr, AccessPermission:ReadUpgradingToWrite);
288 } else if (state == State:L2_M) {
289 changeL2Permission(addr, AccessPermission:Read_Write);
290 } else if (state == State:L2_MT) {
291 changeL2Permission(addr, AccessPermission:Stale);
292 } else {
293 changeL2Permission(addr, AccessPermission:Busy);
294 }
295 }
296 }
297
298 Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr, MachineID requestor) {
299 if(type == CoherenceRequestType:GETS) {
300 return Event:L1_GETS;
301 } else if(type == CoherenceRequestType:GET_INSTR) {
302 return Event:L1_GET_INSTR;
303 } else if (type == CoherenceRequestType:GETX) {
304 return Event:L1_GETX;
305 } else if (type == CoherenceRequestType:UPGRADE) {
306 if (isSharer(addr, requestor)) {
307 if (isOneSharerLeft(addr, requestor)) {
308 return Event:L1_UPGRADE_no_others;
309 } else {
310 return Event:L1_UPGRADE;
311 }
312 } else { // possible that we removed the line from the L2 before we could process the UPGRADE request
313 return Event:L1_GETX;
314 }
315 } else if (type == CoherenceRequestType:PUTX) {
316 if (isSharer(addr, requestor)) {
317 if (isOneSharerLeft(addr, requestor)) {
318 return Event:L1_PUTX_last;
319 } else {
320 return Event:L1_PUTX;
321 }
322 } else {
323 return Event:L1_PUTX_old;
324 }
325 } else if (type == CoherenceRequestType:PUTS) {
326 if (isSharer(addr, requestor)) {
327 if (isOneSharerLeft(addr, requestor)) {
328 return Event:L1_PUTS_last;
329 } else {
330 return Event:L1_PUTS;
331 }
332 } else { // possible that we removed the line from the L2 before we could process the L1_PUTS request
333 return Event:L1_PUTS_old;
334 }
335 } else {
336 DEBUG_EXPR(addr);
337 DEBUG_EXPR(type);
338 error("Invalid L1 forwarded request type");
339 }
340 }
341
342 // ** OUT_PORTS **
343 // All ports output to the same CMP network, NI determines where to route msg
344
345 out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
346 out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
347 out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
348 out_port(finalAckIntraChipL2Network_out, ResponseMsg, finalAckFromL2Cache);
349
350 // ** IN_PORTS **
351
352 in_port(dummyTo1_in, RequestMsg, dummyTo1) {
353 if (dummyTo1_in.isReady()) {
354 peek(dummyTo1_in, RequestMsg) {
355 DEBUG_EXPR(in_msg.Address);
356 DEBUG_EXPR(id);
357 DEBUG_EXPR(in_msg.Type);
358 DEBUG_EXPR(getState(in_msg.Address));
359 DEBUG_EXPR(in_msg.RequestorMachId);
360 }
361 error("dummyTo1 port should not be used");
362 }
363 }
364
365 in_port(dummyTo4_in, ResponseMsg, dummyTo4) {
366 if (dummyTo4_in.isReady()) {
367 peek(dummyTo4_in, ResponseMsg) {
368 DEBUG_EXPR(in_msg.Address);
369 DEBUG_EXPR(id);
370 DEBUG_EXPR(in_msg.Type);
371 DEBUG_EXPR(getState(in_msg.Address));
372 DEBUG_EXPR(in_msg.SenderMachId);
373 }
374 error("dummyTo4 port should not be used");
375 }
376 }
377
378 // Response IntraChip L2 Network - response msg to this particular L2 bank
379 in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache) {
380 if (responseIntraChipL2Network_in.isReady()) {
381 peek(responseIntraChipL2Network_in, ResponseMsg) {
382 DEBUG_EXPR(in_msg.Address);
383 DEBUG_EXPR(id);
384 DEBUG_EXPR(getState(in_msg.Address));
385 DEBUG_EXPR(in_msg.SenderMachId);
386 DEBUG_EXPR(in_msg.Type);
387 DEBUG_EXPR(in_msg.NumPendingExtAcks);
388 // test wether it's from a local L1 or an off chip source
389 assert(in_msg.Destination.isElement(machineID));
390 if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L1Cache) {
391 if(in_msg.Type == CoherenceResponseType:DATA) {
392 if(L2_TBEs[in_msg.Address].NumPendingIntAcks == 1) {
393 trigger(Event:Data_int_ack, in_msg.Address); // L1 now has data and all on-chip acks
394 } else {
395 DEBUG_EXPR(in_msg.Address);
396 DEBUG_EXPR(L2_TBEs[in_msg.Address].NumPendingIntAcks);
397 error("Invalid L1 sent data when L2 wasn't expecting it");
398 }
399 } else if(in_msg.Type == CoherenceResponseType:INV_ACK) {
400 if(L2_TBEs.isPresent(in_msg.Address)) { // FIXME - possible to get a L1 ack after the transaction is completed
401 if(L2_TBEs[in_msg.Address].NumPendingIntAcks == 1) {
402 trigger(Event:Proc_last_int_ack, in_msg.Address); // L1 now has all on-chip acks
403 } else {
404 trigger(Event:Proc_int_ack, in_msg.Address); // process on-chip ack
405 }
406 }
407 }
408 } else { // external message
409 if(in_msg.Type == CoherenceResponseType:DATA) {
410 if(in_msg.NumPendingExtAcks == 0) {
411 trigger(Event:Data_ext_ack_0, in_msg.Address); // L2 now has data and all off-chip acks
412 } else {
413 if(in_msg.NumPendingExtAcks + L2_TBEs[in_msg.Address].NumPendingExtAcks != 0) {
414 trigger(Event:Data_ext_ack_not_0, in_msg.Address);
415 } else {
416 trigger(Event:Data_ext_ack_not_0_last, in_msg.Address);
417 }
418 }
419 } else if(in_msg.Type == CoherenceResponseType:ACK) {
420 if(L2_TBEs[in_msg.Address].NumPendingExtAcks != 1){
421 trigger(Event:Proc_ext_ack, in_msg.Address);
422 } else {
423 trigger(Event:Proc_last_ext_ack, in_msg.Address);
424 }
425 }
426 }
427 }
428 } // if not ready, do nothing
429 }
430
431 // Forwarded Request from Directory
432 in_port(forwardedRequestIntraChipL2Network_in, RequestMsg, forwardedRequestToL2Cache) {
433 if(forwardedRequestIntraChipL2Network_in.isReady()) {
434 peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
435 DEBUG_EXPR(in_msg.Address);
436 DEBUG_EXPR(id);
437 DEBUG_EXPR(getState(in_msg.Address));
438 DEBUG_EXPR(in_msg.RequestorMachId);
439 DEBUG_EXPR(in_msg.Type);
440 assert(in_msg.Destination.isElement(machineID));
441 if(in_msg.Type == CoherenceRequestType:GETS) {
442 trigger(Event:Forwarded_GETS, in_msg.Address); // L2
443 } else if(in_msg.Type == CoherenceRequestType:GET_INSTR) {
444 trigger(Event:Forwarded_GET_INSTR, in_msg.Address); // L2
445 } else if (in_msg.Type == CoherenceRequestType:GETX) {
446 trigger(Event:Forwarded_GETX, in_msg.Address); // L2
447 } else if (in_msg.Type == CoherenceRequestType:INV) {
448 trigger(Event:L2_INV, in_msg.Address); // L2
449 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
450 trigger(Event:Dir_WB_ack, in_msg.Address); // L2
451 } else if (in_msg.Type == CoherenceRequestType:EXE_ACK) {
452 trigger(Event:Dir_exe_ack, in_msg.Address); // L2
453 } else {
454 error("Invalid L2 forwarded request type");
455 }
456 }
457 }
458 }
459
460 // L1 Request
461 in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache) {
462 if(L1RequestIntraChipL2Network_in.isReady()) {
463 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
464 DEBUG_EXPR(in_msg.Address);
465 DEBUG_EXPR(id);
466 DEBUG_EXPR(version);
467 DEBUG_EXPR(getState(in_msg.Address));
468 DEBUG_EXPR(in_msg.RequestorMachId);
469 DEBUG_EXPR(in_msg.Type);
470 DEBUG_EXPR(in_msg.Destination);
471 assert(machineIDToMachineType(in_msg.RequestorMachId) == MachineType:L1Cache);
472 assert(in_msg.Destination.isElement(machineID));
473 if (L2cacheMemory.isTagPresent(in_msg.Address)) {
474 // The L2 contains the block, so proceeded with handling the request
475 trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.RequestorMachId), in_msg.Address);
476 } else {
477 if (L2cacheMemory.cacheAvail(in_msg.Address)) {
478 // L2 does't have the line, but we have space for it in the L2
479 trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address, in_msg.RequestorMachId), in_msg.Address);
480 } else {
481 // No room in the L2, so we need to make room before handling the request
482 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
483 }
484 }
485 }
486 }
487 }
488
489 // ACTIONS
490
491 action(a_issueGETS, "a", desc="Issue GETS") {
492 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
493 enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
494 out_msg.Address := address;
495 out_msg.Type := CoherenceRequestType:GETS;
496 out_msg.RequestorMachId := machineID;
497 out_msg.Destination.add(map_Address_to_Directory(address));
498 out_msg.MessageSize := MessageSizeType:Control;
499 out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
500 out_msg.L2CacheStateStr := getStateStr(address);
501 }
502 }
503 }
504
505 action(b_issueGETX, "b", desc="Issue GETX") {
506 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
507 enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
508 out_msg.Address := address;
509 out_msg.Type := CoherenceRequestType:GETX;
510 out_msg.RequestorMachId := machineID;
511 out_msg.Destination.add(map_Address_to_Directory(address));
512 out_msg.MessageSize := MessageSizeType:Control;
513 out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
514 out_msg.L2CacheStateStr := getStateStr(address);
515 }
516 }
517 }
518
519 // finalAck issued from the response queue
520 action(c_finalAckToDirIfNeeded, "c", desc="Send FinalAck to dir if this is response to 3-hop xfer") {
521 peek(responseIntraChipL2Network_in, ResponseMsg) {
522 DEBUG_EXPR(in_msg);
523 if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache) {
524 enqueue(finalAckIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY"){
525 out_msg.Address := address;
526 out_msg.Type := CoherenceResponseType:FINALACK;
527 out_msg.SenderMachId := machineID;
528 out_msg.Destination.add(map_Address_to_Directory(address));
529 out_msg.MessageSize := MessageSizeType:Control;
530 DEBUG_EXPR(out_msg);
531 }
532 }
533 }
534 }
535
536 // finalAck issued from TBE
537 action(n_sendFinalAckIfThreeHop, "n", desc=""){
538 peek(responseIntraChipL2Network_in, ResponseMsg){
539 DEBUG_EXPR(in_msg);
540 if(L2_TBEs[address].isThreeHop == true){
541 enqueue(finalAckIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY"){
542 out_msg.Address := address;
543 out_msg.Type := CoherenceResponseType:FINALACK;
544 out_msg.SenderMachId := machineID;
545 out_msg.Destination.add(map_Address_to_Directory(address));
546 out_msg.MessageSize := MessageSizeType:Control;
547 DEBUG_EXPR(out_msg);
548 }
549 }
550 }
551 }
552
553 action(mm_rememberIfFinalAckNeeded, "\m", desc=""){
554 peek(responseIntraChipL2Network_in, ResponseMsg){
555 if(machineIDToMachineType(in_msg.SenderMachId) == MachineType:L2Cache){
556 L2_TBEs[address].isThreeHop := true;
557 }
558 }
559 }
560
561 action(d_issuePUTX, "d", desc="Issue PUTX") {
562 enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
563 out_msg.Address := address;
564 out_msg.Type := CoherenceRequestType:PUTX;
565 out_msg.RequestorMachId := machineID;
566 out_msg.Destination.add(map_Address_to_Directory(address));
567 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
568 DEBUG_EXPR(out_msg.Address);
569 DEBUG_EXPR(out_msg.Destination);
570 DEBUG_EXPR(out_msg.DataBlk);
571 out_msg.MessageSize := MessageSizeType:Data;
572 out_msg.L1CacheStateStr := "NA";
573 out_msg.L2CacheStateStr := getStateStr(address);
574 }
575 }
576
577 action(f_issueGETINSTR, "f", desc="Issue GETINSTR") {
578 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
579 enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
580 out_msg.Address := address;
581 out_msg.Type := CoherenceRequestType:GET_INSTR;
582 out_msg.RequestorMachId := machineID;
583 out_msg.Destination.add(map_Address_to_Directory(address));
584 out_msg.MessageSize := MessageSizeType:Control;
585 out_msg.L1CacheStateStr := in_msg.L1CacheStateStr;
586 out_msg.L2CacheStateStr := getStateStr(address);
587 }
588 }
589 }
590
591 // DELAYED RESPONSES - Sorced from a TBE entry
592 // TBE -> L1
593 action(h_issueLoadHit, "h", desc="If not prefetch, notify sequencer the load completed.") {
594 DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
595 if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
596 // Non-prefetch
597 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
598 out_msg.Address := address;
599 out_msg.Type := CoherenceResponseType:DATA;
600 out_msg.SenderMachId := machineID;
601 out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // could be multiple internal nodes
602 DEBUG_EXPR(out_msg.Destination);
603 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
604 out_msg.MessageSize := MessageSizeType:Data;
605 }
606 } else {
607 // Prefetch - don't issue hit msg
608 }
609 }
610
611 action(oo_issueLoadHitInv, "\o", desc="If not prefetch, notify sequencer the load completed.") {
612 DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
613 if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
614 // Non-prefetch
615 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
616 out_msg.Address := address;
617 out_msg.Type := CoherenceResponseType:DATA_I;
618 out_msg.SenderMachId := machineID;
619 out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // could be multiple internal nodes
620 DEBUG_EXPR(out_msg.Destination);
621 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
622 out_msg.MessageSize := MessageSizeType:Data;
623 }
624 } else {
625 // Prefetch - don't issue hit msg
626 }
627
628 }
629
630 action(hh_issueStoreHit, "\h", desc="If not prefetch, issue store hit message to local L1 requestor") {
631 DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
632 if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
633 // Non-prefetch
634 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
635 out_msg.Address := address;
636 out_msg.Type := CoherenceResponseType:DATA;
637 out_msg.SenderMachId := machineID;
638 out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
639 DEBUG_EXPR(out_msg.Destination);
640 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
641 out_msg.MessageSize := MessageSizeType:Data;
642 }
643 } else {
644 // Prefetch - don't issue hit msg
645 }
646 }
647
648 action(pp_issueStoreHitInv, "\p", desc="If not prefetch, issue store hit message to local L1 requestor") {
649 DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
650 if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
651 // Non-prefetch
652 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
653 out_msg.Address := address;
654 out_msg.Type := CoherenceResponseType:DATA_I;
655 out_msg.SenderMachId := machineID;
656 out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
657 DEBUG_EXPR(out_msg.Destination);
658 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
659 out_msg.MessageSize := MessageSizeType:Data;
660 }
661 } else {
662 // Prefetch - don't issue hit msg
663 }
664 }
665
666 action(cc_issueStoreHitDG, "\c", desc="If not prefetch, issue store hit message to local L1 requestor") {
667 DEBUG_EXPR(getL2CacheEntry(address).DataBlk);
668 if((L2_TBEs.isPresent(address) == false) || (L2_TBEs[address].isPrefetch == false)) {
669 // Non-prefetch
670 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
671 out_msg.Address := address;
672 out_msg.Type := CoherenceResponseType:DATA_S;
673 out_msg.SenderMachId := machineID;
674 out_msg.Destination.add(L2_TBEs[address].L1_GetX_ID); // a single node
675 DEBUG_EXPR(out_msg.Destination);
676 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
677 out_msg.MessageSize := MessageSizeType:Data;
678 }
679 } else {
680 // Prefetch - don't issue hit msg
681 }
682 }
683
684 action(w_sendPutAckToL1Cache, "w", desc="send acknowledgement of an L1 replacement") {
685 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
686 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
687 out_msg.Address := address;
688 out_msg.Type := CoherenceResponseType:ACK;
689 out_msg.SenderMachId := machineID;
690 out_msg.Destination.add(in_msg.RequestorMachId); // a single node
691 DEBUG_EXPR(out_msg.Destination);
692 out_msg.MessageSize := MessageSizeType:Control;
693 }
694 }
695 }
696
697 // TBE -> L1s and L2s
698 action(ee_dataFromL2CacheToGetSIDs, "\e", desc="Send data from cache to all GetS IDs") {
699 // FIXME - In some cases this should be from the TBE, not the cache.
700 // may send to other mod-L2s
701 if (L2_TBEs[address].Forward_GetS_IDs.count() > 0) {
702 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
703 out_msg.Address := address;
704 out_msg.Type := CoherenceResponseType:DATA;
705 out_msg.SenderMachId := machineID;
706 out_msg.Destination := L2_TBEs[address].Forward_GetS_IDs; // external nodes
707 DEBUG_EXPR(out_msg.Destination);
708 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
709 out_msg.NumPendingExtAcks := 0;
710 DEBUG_EXPR(out_msg.Address);
711 DEBUG_EXPR(out_msg.Destination);
712 DEBUG_EXPR(out_msg.DataBlk);
713 out_msg.MessageSize := MessageSizeType:Data;
714 }
715 }
716 // may send to local L1s
717 if (L2_TBEs[address].L1_GetS_IDs.count() > 0) {
718 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
719 out_msg.Address := address;
720 out_msg.Type := CoherenceResponseType:DATA;
721 out_msg.SenderMachId := machineID;
722 out_msg.Destination := L2_TBEs[address].L1_GetS_IDs; // internal nodes
723 DEBUG_EXPR(out_msg.Destination);
724 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
725 out_msg.MessageSize := MessageSizeType:Data;
726 }
727 }
728 }
729
730 // TBE -> L2s only
731 action(bb_dataFromL2CacheToGetSForwardIDs, "\b", desc="Send data from cache to GetS ForwardIDs") {
732 // FIXME - In some cases this should be from the TBE, not the cache.
733 if ((L2_TBEs[address].Forward_GetS_IDs.count() > 0) || (L2_TBEs[address].L1_GetS_IDs.count() > 0)) {
734 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
735 out_msg.Address := address;
736 out_msg.Type := CoherenceResponseType:DATA;
737 out_msg.SenderMachId := machineID;
738 out_msg.Destination := L2_TBEs[address].Forward_GetS_IDs; // external nodes
739 DEBUG_EXPR(out_msg.Destination);
740 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
741 out_msg.NumPendingExtAcks := 0;
742 out_msg.MessageSize := MessageSizeType:Data;
743 }
744 }
745 }
746
747 // TBE -> L2 only
748 action(gg_dataFromL2CacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
749 // FIXME - In some cases this should be from the TBE, not the cache.
750 if (L2_TBEs[address].validForwardedGetXId) {
751 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
752 out_msg.Address := address;
753 out_msg.Type := CoherenceResponseType:DATA;
754 out_msg.SenderMachId := machineID;
755 out_msg.Destination.add(L2_TBEs[address].Forward_GetX_ID);
756 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
757 out_msg.NumPendingExtAcks := L2_TBEs[address].ForwardGetX_AckCount;
758 DEBUG_EXPR(out_msg.Address);
759 DEBUG_EXPR(out_msg.Destination);
760 DEBUG_EXPR(out_msg.DataBlk);
761 DEBUG_EXPR(out_msg.NumPendingExtAcks);
762 out_msg.MessageSize := MessageSizeType:Data;
763 }
764 }
765 }
766
767 // IMMEDIATE RESPONSES directly from the ForwardRequest queue
768 // ForwardRequest -> L2
769 action(e_dataFromL2CacheToL2Requestor, "e", desc="Send data from cache to requestor") {
770 peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
771 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
772 out_msg.Address := address;
773 out_msg.Type := CoherenceResponseType:DATA;
774 out_msg.SenderMachId := machineID;
775 out_msg.NumPendingExtAcks := in_msg.NumPendingExtAcks; // Needed when in state O and we see a GetX
776 out_msg.Destination.add(in_msg.RequestorMachId);
777 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
778 DEBUG_EXPR(out_msg.Address);
779 DEBUG_EXPR(out_msg.Destination);
780 DEBUG_EXPR(out_msg.DataBlk);
781 DEBUG_EXPR(out_msg.NumPendingExtAcks);
782 out_msg.MessageSize := MessageSizeType:Data;
783 }
784 }
785 }
786
787 // ForwardRequest -> L1
788 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data from cache to L1 requestor") {
789 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
790 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
791 out_msg.Address := address;
792 out_msg.Type := CoherenceResponseType:DATA;
793 out_msg.SenderMachId := machineID;
794 out_msg.Destination.add(in_msg.RequestorMachId);
795 DEBUG_EXPR(out_msg.Destination);
796 out_msg.DataBlk := getL2CacheEntry(address).DataBlk;
797 out_msg.MessageSize := MessageSizeType:Data;
798 }
799 }
800 }
801
802 // OTHER ACTIONS
803 action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
804 check_allocate(L2_TBEs);
805 L2_TBEs.allocate(address);
806 L2_TBEs[address].NumPendingIntAcks := 0; // default value
807 L2_TBEs[address].NumPendingExtAcks := 0; // default value
808 L2_TBEs[address].isPrefetch := false;
809 L2_TBEs[address].isThreeHop := false;
810 L2_TBEs[address].Forward_GetS_IDs.clear();
811 L2_TBEs[address].L1_GetS_IDs.clear();
812 L2_TBEs[address].validInvalidator := false;
813 L2_TBEs[address].validForwardedGetXId := false;
814 L2_TBEs[address].isInternalRequestOnly := false;
815 }
816
817 action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
818 L2_TBEs.deallocate(address);
819 }
820
821 action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
822 profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
823 }
824
825 action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
826 profileMsgDelay(2, forwardedRequestIntraChipL2Network_in.dequeue_getDelayCycles());
827 }
828
829 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
830 profileMsgDelay(3, responseIntraChipL2Network_in.dequeue_getDelayCycles());
831 }
832
833 action(p_addNumberOfPendingExtAcks, "p", desc="Add number of pending acks to TBE") {
834 peek(responseIntraChipL2Network_in, ResponseMsg) {
835 DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
836 L2_TBEs[address].NumPendingExtAcks := L2_TBEs[address].NumPendingExtAcks + in_msg.NumPendingExtAcks;
837 DEBUG_EXPR(in_msg.NumPendingExtAcks);
838 DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
839 }
840 }
841
842 action(q_decrementNumberOfPendingExtAcks, "q", desc="Decrement number of pending ext invalidations by one") {
843 DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
844 L2_TBEs[address].NumPendingExtAcks := L2_TBEs[address].NumPendingExtAcks - 1;
845 DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
846 }
847
848 action(r_decrementNumberOfPendingIntAcks, "r", desc="Decrement number of pending int invalidations by one") {
849 DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
850 L2_TBEs[address].NumPendingIntAcks := L2_TBEs[address].NumPendingIntAcks - 1;
851 DEBUG_EXPR(L2_TBEs[address].NumPendingExtAcks);
852 }
853
854 action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
855 peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
856 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
857 out_msg.Address := address;
858 out_msg.Type := CoherenceResponseType:ACK;
859 out_msg.SenderMachId := machineID;
860 out_msg.Destination.add(in_msg.RequestorMachId);
861 DEBUG_EXPR(out_msg.Destination);
862 out_msg.NumPendingExtAcks := 0;
863 out_msg.MessageSize := MessageSizeType:Control;
864 }
865 }
866 }
867
868 action(u_writeDataFromResponseQueueToL2Cache, "u", desc="Write data from response queue to cache") {
869 peek(responseIntraChipL2Network_in, ResponseMsg) {
870 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
871 }
872 }
873
874 // FIXME - probably need to change this to a seperate low priority request queue
875 action(m_writeDataFromRequestQueueToL2Cache, "m", desc="Write data from response queue to cache") {
876 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
877 getL2CacheEntry(address).DataBlk := in_msg.DataBlk;
878 }
879 }
880
881 action(x_copyDataFromL2CacheToTBE, "x", desc="Copy data from cache to TBE") {
882 L2_TBEs[address].DataBlk := getL2CacheEntry(address).DataBlk;
883 }
884
885 action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
886 peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
887 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
888 out_msg.Address := address;
889 out_msg.Type := CoherenceResponseType:DATA;
890 out_msg.SenderMachId := machineID;
891 out_msg.NumPendingExtAcks := in_msg.NumPendingExtAcks;
892 out_msg.Destination.add(in_msg.RequestorMachId);
893 out_msg.DataBlk := L2_TBEs[address].DataBlk;
894 DEBUG_EXPR(out_msg.Address);
895 DEBUG_EXPR(out_msg.Destination);
896 DEBUG_EXPR(out_msg.DataBlk);
897 DEBUG_EXPR(out_msg.NumPendingExtAcks);
898 out_msg.MessageSize := MessageSizeType:Data;
899 }
900 }
901 }
902
903 action(zz_sendAckToQueuedInvalidator, "\z", desc="Send ack to invalidator") {
904 if (L2_TBEs[address].validInvalidator) {
905 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
906 out_msg.Address := address;
907 out_msg.Type := CoherenceResponseType:ACK;
908 out_msg.SenderMachId := machineID;
909 out_msg.Destination.add(L2_TBEs[address].InvalidatorID);
910 DEBUG_EXPR(out_msg.Destination);
911 out_msg.NumPendingExtAcks := 0;
912 out_msg.MessageSize := MessageSizeType:Control;
913 }
914 }
915 }
916
917 action(z_stall, "z", desc="Stall") {
918 }
919
920 action(yy_recordInvalidatorID, "\y", desc="Record Invalidator for future response") {
921 peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
922 L2_TBEs[address].InvalidatorID := in_msg.RequestorMachId;
923 L2_TBEs[address].validInvalidator := true;
924 }
925 }
926
927 action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
928 peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
929 L2_TBEs[address].Forward_GetS_IDs.add(in_msg.RequestorMachId);
930 }
931 }
932
933 action(ss_recordGetSL1ID, "\s", desc="Record forwarded L1 GetS for load response") {
934 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
935 L2_TBEs[address].L1_GetS_IDs.add(in_msg.RequestorMachId);
936 }
937 }
938
939 action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
940 peek(forwardedRequestIntraChipL2Network_in, RequestMsg) {
941 L2_TBEs[address].Forward_GetX_ID := in_msg.RequestorMachId;
942 L2_TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingExtAcks;
943 L2_TBEs[address].validForwardedGetXId := true;
944 }
945 }
946
947 action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
948 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
949 L2_TBEs[address].L1_GetX_ID := in_msg.RequestorMachId;
950 }
951 }
952
953 action(set_setMRU, "\set", desc="set the MRU entry") {
954 L2cacheMemory.setMRU(address);
955 }
956
957 action(bbb_setPendingIntAcksToSharers, "\bb", desc="Set number of pending acks equal to number of sharers") {
958 L2_TBEs[address].NumPendingIntAcks := L2cacheMemory[address].Sharers.count();
959 }
960
961 action(ddd_setPendingIntAcksToOne, "\dd", desc="Set number of pending acks equal to one") {
962 L2_TBEs[address].NumPendingIntAcks := 1;
963 }
964
965 action(ccc_setPendingIntAcksMinusOne, "\cc", desc="Set number of pending acks equal to number of sharers minus one") {
966 L2_TBEs[address].NumPendingIntAcks := L2cacheMemory[address].Sharers.count() - 1;
967 }
968
969 action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
970 if (L2cacheMemory.isTagPresent(address) == false) {
971 L2cacheMemory.allocate(address);
972 }
973 }
974
975 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
976 L2cacheMemory.deallocate(address);
977 }
978
979 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
980 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
981 //profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, L1CacheMachIDToProcessorNum(in_msg.RequestorMachId));
982 }
983 }
984
985 action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
986 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
987 profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
988 }
989 }
990
991 action(v_issueInvalidateIntL1copyRequest, "v", desc="invalidate the L1 M copy") {
992 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
993 out_msg.Address := address;
994 out_msg.Type := CoherenceRequestType:INV;
995 out_msg.RequestorMachId := machineID;
996 out_msg.Destination := L2cacheMemory[address].Sharers;
997 out_msg.MessageSize := MessageSizeType:Control;
998 }
999 }
1000
1001 action(tt_issueSharedInvalidateIntL1copiesRequest, "\t", desc="invalidate all L1 S copies") {
1002 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
1003 out_msg.Address := address;
1004 out_msg.Type := CoherenceRequestType:INV_S;
1005 out_msg.RequestorMachId := machineID;
1006 out_msg.Destination := L2cacheMemory[address].Sharers;
1007 out_msg.MessageSize := MessageSizeType:Control;
1008 }
1009 }
1010
1011 action(vv_issueInvalidateOtherIntL1copiesRequest, "\v", desc="invalidate other L1 copies not the local requestor") {
1012 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
1013 if ((L2cacheMemory[address].Sharers.count() > 1) || (L2cacheMemory[address].Sharers.isElement(in_msg.RequestorMachId) != true)) {
1014 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
1015 out_msg.Address := address;
1016 out_msg.Type := CoherenceRequestType:INV_S;
1017 out_msg.RequestorMachId := machineID;
1018 out_msg.Destination := L2cacheMemory[address].Sharers;
1019 out_msg.Destination.remove(in_msg.RequestorMachId);
1020 out_msg.MessageSize := MessageSizeType:Control;
1021 }
1022 }
1023 }
1024 }
1025
1026 action(g_issueDownGradeIntL1copiesRequest, "g", desc="DownGrade L1 copy") {
1027 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
1028 out_msg.Address := address;
1029 out_msg.Type := CoherenceRequestType:L1_DG;
1030 out_msg.RequestorMachId := machineID;
1031 out_msg.Destination := L2cacheMemory[address].Sharers;
1032 out_msg.MessageSize := MessageSizeType:Control;
1033 }
1034 }
1035
1036 action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
1037 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
1038 addSharer(address, in_msg.RequestorMachId);
1039 }
1040 }
1041
1042 action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
1043 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
1044 L2cacheMemory[address].Sharers.remove(in_msg.RequestorMachId);
1045 }
1046 }
1047
1048 action(aa_removeResponseSharer, "\a", desc="Remove L1 Response sharer from list") {
1049 peek(responseIntraChipL2Network_in, ResponseMsg) {
1050 L2cacheMemory[address].Sharers.remove(in_msg.SenderMachId);
1051 }
1052 }
1053
1054 action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
1055 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
1056 L2cacheMemory[address].Sharers.clear();
1057 }
1058 }
1059
1060 //*****************************************************
1061 // TRANSITIONS
1062 //*****************************************************
1063
1064 //===============================================
1065 // STALLS
1066
1067 // Stalls L2 Replacement and L1 PUT for all transient states
1068 transition({L2_IS, L2_ISZ, L2_ISI, L2_IMV, L2_MV, L2_IM, L2_IMO, L2_IMI, L2_IMZ, L2_IMOI, L2_IMOZ,
1069 L2_SIV, L2_SIC,
1070 L2_MIV, L2_MIN, L2_MIC, L2_MIT, L2_MO, L2_MOIC, L2_MOICR, L2_MOZ,
1071 L2_OIV, L2_OIN, L2_OIC, L2_OMV, L2_OM},
1072 {L2_Replacement, L1_PUTX, L1_PUTX_last, L1_PUTS, L1_PUTS_last, L1_PUTX_old, L1_PUTS_old, }) {
1073 z_stall;
1074 }
1075
1076 //===============================================
1077 // old L1_PUT requests
1078
1079 transition({L2_NP, L2_I, L2_S, L2_SS, L2_M, L2_MT, L2_O, L2_SO}, {L1_PUTX_old, L1_PUTS_old}) {
1080 w_sendPutAckToL1Cache;
1081 jj_popL1RequestQueue;
1082 }
1083
1084 //===============================================
1085 // BASE STATE - I
1086
1087 // Transitions from I (Idle)
1088 transition({L2_NP,L2_I}, L2_Replacement) {
1089 rr_deallocateL2CacheBlock;
1090 }
1091
1092 transition({L2_NP,L2_I}, L2_INV) { // could see an invalidate from the directory, but not Forwards
1093 t_sendAckToInvalidator;
1094 l_popForwardedRequestQueue;
1095 }
1096
1097 transition({L2_NP,L2_I}, L1_GETS, L2_IS) {
1098 qq_allocateL2CacheBlock;
1099 ll_clearSharers;
1100 nn_addSharer;
1101 i_allocateTBE;
1102 ss_recordGetSL1ID;
1103 a_issueGETS;
1104 uu_profileMiss;
1105 jj_popL1RequestQueue;
1106 }
1107
1108 transition({L2_NP,L2_I}, L1_GET_INSTR, L2_IS) {
1109 qq_allocateL2CacheBlock;
1110 ll_clearSharers;
1111 nn_addSharer;
1112 i_allocateTBE;
1113 ss_recordGetSL1ID;
1114 f_issueGETINSTR;
1115 uu_profileMiss;
1116 jj_popL1RequestQueue;
1117 }
1118
1119 transition({L2_NP,L2_I}, {L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}, L2_IM) { // UPGRADE possible because L2_Replacement have higher priority
1120 qq_allocateL2CacheBlock;
1121 ll_clearSharers;
1122 nn_addSharer;
1123 i_allocateTBE;
1124 xx_recordGetXL1ID;
1125 b_issueGETX;
1126 uu_profileMiss;
1127 jj_popL1RequestQueue;
1128 }
1129
1130 // Transitions from L2_IS
1131 // could see L2_INVs or more L1 requests
1132 transition(L2_IS, L2_INV, L2_ISI) { // could see an invalidate from the directory, but not Forwards
1133 t_sendAckToInvalidator;
1134 l_popForwardedRequestQueue;
1135 }
1136
1137 transition(L2_IS, Data_ext_ack_0, L2_SS) {
1138 u_writeDataFromResponseQueueToL2Cache;
1139 h_issueLoadHit;
1140 c_finalAckToDirIfNeeded;
1141 s_deallocateTBE;
1142 o_popIncomingResponseQueue;
1143 }
1144
1145 transition(L2_IS, {L1_GETS,L1_GET_INSTR}) {
1146 set_setMRU;
1147 ww_profileMissNoDir;
1148 nn_addSharer;
1149 ss_recordGetSL1ID;
1150 jj_popL1RequestQueue;
1151 }
1152
1153 transition(L2_IS, L1_GETX, L2_ISZ) { // don't go there, just go to stall state
1154 z_stall;
1155 }
1156
1157 // Transitions from L2_ISZ
1158 // could see L2_INVs or more L1 requests
1159 // stall all L1 requests, wait for data
1160 transition(L2_ISZ, L2_INV, L2_ISI) { // could see an invalidate from the directory, but not Forwards
1161 t_sendAckToInvalidator;
1162 l_popForwardedRequestQueue;
1163 }
1164
1165 transition(L2_ISZ, Data_ext_ack_0, L2_SS) {
1166 u_writeDataFromResponseQueueToL2Cache;
1167 h_issueLoadHit;
1168 c_finalAckToDirIfNeeded;
1169 s_deallocateTBE;
1170 o_popIncomingResponseQueue;
1171 }
1172
1173 transition(L2_ISZ, {L1_GETS, L1_GET_INSTR, L1_GETX}) {
1174 z_stall;
1175 }
1176
1177 // Transitions from L2_ISI, already sent the invalidate ack so can imediately go to I
1178 // - in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
1179 // or, could get data from Dir if Dir's data lost race to Dir's INV
1180 // or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
1181 // processor already wrote it back
1182 transition(L2_ISI, Data_ext_ack_0, L2_I) {
1183 u_writeDataFromResponseQueueToL2Cache;
1184 oo_issueLoadHitInv;
1185 c_finalAckToDirIfNeeded;
1186 s_deallocateTBE;
1187 o_popIncomingResponseQueue;
1188 }
1189
1190 transition(L2_ISI, L2_INV) { // could see an invalidate from the directory, but not Forwards
1191 t_sendAckToInvalidator;
1192 l_popForwardedRequestQueue;
1193 }
1194
1195 transition(L2_ISI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
1196 z_stall;
1197 }
1198
1199 // Transitions from L2_IMV, waiting for int_acks
1200 // currently stall all request
1201 // could see forwards and/or more L1 requests
1202 transition(L2_IMV, L2_INV) { // could see an invalidate for SS
1203 yy_recordInvalidatorID;
1204 l_popForwardedRequestQueue;
1205 }
1206
1207 // stall all Forwarded request
1208 transition(L2_IMV, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
1209 z_stall;
1210 }
1211
1212 // stall all L1 request
1213 transition(L2_IMV, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
1214 z_stall;
1215 }
1216
1217 transition(L2_IMV, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MV) {
1218 u_writeDataFromResponseQueueToL2Cache;
1219 c_finalAckToDirIfNeeded;
1220 o_popIncomingResponseQueue;
1221 }
1222
1223 transition(L2_IMV, Data_ext_ack_not_0) {
1224 u_writeDataFromResponseQueueToL2Cache;
1225 p_addNumberOfPendingExtAcks;
1226 mm_rememberIfFinalAckNeeded;
1227 o_popIncomingResponseQueue;
1228 }
1229
1230 transition(L2_IMV, Proc_ext_ack) {
1231 q_decrementNumberOfPendingExtAcks;
1232 o_popIncomingResponseQueue;
1233 }
1234
1235 transition(L2_IMV, Proc_last_ext_ack, L2_MV) {
1236 n_sendFinalAckIfThreeHop;
1237 o_popIncomingResponseQueue;
1238 }
1239
1240 transition(L2_IMV, Proc_int_ack) {
1241 aa_removeResponseSharer;
1242 r_decrementNumberOfPendingIntAcks;
1243 o_popIncomingResponseQueue;
1244 }
1245
1246 transition(L2_IMV, Proc_last_int_ack, L2_IM) {
1247 aa_removeResponseSharer;
1248 r_decrementNumberOfPendingIntAcks;
1249 o_popIncomingResponseQueue;
1250 zz_sendAckToQueuedInvalidator;
1251 }
1252
1253 // Transitions from L2_MV, waiting for int_acks
1254 // external world gave us write permission
1255
1256 // stall all Forwarded request
1257 transition(L2_MV, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
1258 z_stall;
1259 }
1260
1261 // stall all L1 request
1262 transition(L2_MV, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
1263 z_stall;
1264 }
1265
1266 transition(L2_MV, Proc_int_ack) {
1267 aa_removeResponseSharer;
1268 r_decrementNumberOfPendingIntAcks;
1269 o_popIncomingResponseQueue;
1270 }
1271
1272 transition(L2_MV, Proc_last_int_ack, L2_MT) {
1273 aa_removeResponseSharer;
1274 r_decrementNumberOfPendingIntAcks;
1275 hh_issueStoreHit;
1276 s_deallocateTBE;
1277 o_popIncomingResponseQueue;
1278 }
1279
1280 // Transitions from L2_IM, waiting for external data before going to MT state
1281 // could see forwards and/or more L1 requests
1282 transition(L2_IM, L2_INV) { // could see an invalidate from the directory (earlier epoch)
1283 t_sendAckToInvalidator;
1284 l_popForwardedRequestQueue;
1285 }
1286
1287 transition(L2_IM, {Forwarded_GETS,Forwarded_GET_INSTR}, L2_IMO) { // could see Forwards, if directory responses get out-of-order
1288 dd_recordGetSForwardID;
1289 l_popForwardedRequestQueue;
1290 }
1291
1292 transition(L2_IM, {L1_GETS,L1_GET_INSTR}, L2_IMO) {
1293 set_setMRU;
1294 ww_profileMissNoDir;
1295 nn_addSharer;
1296 ss_recordGetSL1ID;
1297 jj_popL1RequestQueue;
1298 }
1299
1300 transition(L2_IM, Forwarded_GETX, L2_IMI) { // could see Forwards, if directory requests get ahead of responses
1301 ii_recordGetXForwardID;
1302 l_popForwardedRequestQueue;
1303 }
1304
1305 transition(L2_IM, L1_GETX, L2_IMZ) { // don't go there, just go to stall state
1306 z_stall;
1307 }
1308
1309 transition(L2_IM, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MT) {
1310 u_writeDataFromResponseQueueToL2Cache;
1311 hh_issueStoreHit;
1312 c_finalAckToDirIfNeeded;
1313 s_deallocateTBE;
1314 o_popIncomingResponseQueue;
1315 }
1316
1317 transition(L2_IM, Data_ext_ack_not_0) {
1318 u_writeDataFromResponseQueueToL2Cache;
1319 p_addNumberOfPendingExtAcks;
1320 mm_rememberIfFinalAckNeeded;
1321 o_popIncomingResponseQueue;
1322 }
1323
1324 transition(L2_IM, Proc_ext_ack) {
1325 q_decrementNumberOfPendingExtAcks;
1326 o_popIncomingResponseQueue;
1327 }
1328
1329 transition(L2_IM, Proc_last_ext_ack, L2_MT) {
1330 hh_issueStoreHit;
1331 n_sendFinalAckIfThreeHop;
1332 s_deallocateTBE;
1333 o_popIncomingResponseQueue;
1334 }
1335
1336 // transitions from L2_IMO
1337 transition(L2_IMO, L2_INV) { // could see an invalidate from the directory (earlier epoch)
1338 t_sendAckToInvalidator;
1339 l_popForwardedRequestQueue;
1340 }
1341
1342 transition(L2_IMO, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
1343 dd_recordGetSForwardID;
1344 l_popForwardedRequestQueue;
1345 }
1346
1347 transition(L2_IMO, Forwarded_GETX, L2_IMOI) { // could see Forwards
1348 ii_recordGetXForwardID;
1349 l_popForwardedRequestQueue;
1350 }
1351
1352 transition(L2_IMO, {L1_GETS,L1_GET_INSTR}) {
1353 set_setMRU;
1354 ww_profileMissNoDir;
1355 nn_addSharer;
1356 ss_recordGetSL1ID;
1357 jj_popL1RequestQueue;
1358 }
1359
1360 transition(L2_IMO, L1_GETX, L2_IMOZ) {
1361 z_stall;
1362 }
1363
1364 transition(L2_IMO, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MO) {
1365 u_writeDataFromResponseQueueToL2Cache;
1366 cc_issueStoreHitDG;
1367 ddd_setPendingIntAcksToOne;
1368 c_finalAckToDirIfNeeded;
1369 o_popIncomingResponseQueue;
1370 }
1371
1372 transition(L2_IMO, Data_ext_ack_not_0) {
1373 u_writeDataFromResponseQueueToL2Cache;
1374 p_addNumberOfPendingExtAcks;
1375 mm_rememberIfFinalAckNeeded;
1376 o_popIncomingResponseQueue;
1377 }
1378
1379 transition(L2_IMO, Proc_ext_ack) {
1380 q_decrementNumberOfPendingExtAcks;
1381 o_popIncomingResponseQueue;
1382 }
1383
1384 transition(L2_IMO, Proc_last_ext_ack, L2_MO) {
1385 n_sendFinalAckIfThreeHop;
1386 cc_issueStoreHitDG;
1387 ddd_setPendingIntAcksToOne;
1388 o_popIncomingResponseQueue;
1389 }
1390
1391 // transitions from L2_IMI
1392 // the directory put us in this state so it should tell us nothing (i.e. don't worry about INV or Forwards)
1393 // stall all L1 request
1394 transition(L2_IMI, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MIC) {
1395 u_writeDataFromResponseQueueToL2Cache;
1396 pp_issueStoreHitInv;
1397 ddd_setPendingIntAcksToOne;
1398 c_finalAckToDirIfNeeded;
1399 o_popIncomingResponseQueue;
1400 }
1401
1402 transition(L2_IMI, Data_ext_ack_not_0) {
1403 u_writeDataFromResponseQueueToL2Cache;
1404 p_addNumberOfPendingExtAcks;
1405 mm_rememberIfFinalAckNeeded;
1406 o_popIncomingResponseQueue;
1407 }
1408
1409 transition(L2_IMI, Proc_ext_ack) {
1410 q_decrementNumberOfPendingExtAcks;
1411 o_popIncomingResponseQueue;
1412 }
1413
1414 transition(L2_IMI, Proc_last_ext_ack, L2_MIC) {
1415 n_sendFinalAckIfThreeHop;
1416 pp_issueStoreHitInv;
1417 ddd_setPendingIntAcksToOne;
1418 o_popIncomingResponseQueue;
1419 }
1420
1421 transition(L2_IMI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
1422 z_stall;
1423 }
1424
1425 // transistions from L2_IMZ
1426 // just wait for all acks and data
1427 // stall on all requests
1428 // NOTE: A performance option might be possible to go into M state instead of MT
1429 transition(L2_IMZ, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MT) {
1430 u_writeDataFromResponseQueueToL2Cache;
1431 hh_issueStoreHit;
1432 c_finalAckToDirIfNeeded;
1433 s_deallocateTBE;
1434 o_popIncomingResponseQueue;
1435 }
1436
1437 transition(L2_IMZ, Data_ext_ack_not_0) {
1438 u_writeDataFromResponseQueueToL2Cache;
1439 p_addNumberOfPendingExtAcks;
1440 mm_rememberIfFinalAckNeeded;
1441 o_popIncomingResponseQueue;
1442 }
1443
1444 transition(L2_IMZ, Proc_ext_ack) {
1445 q_decrementNumberOfPendingExtAcks;
1446 o_popIncomingResponseQueue;
1447 }
1448
1449 transition(L2_IMZ, Proc_last_ext_ack, L2_MT) {
1450 hh_issueStoreHit;
1451 n_sendFinalAckIfThreeHop;
1452 s_deallocateTBE;
1453 o_popIncomingResponseQueue;
1454 }
1455
1456 transition(L2_IMZ, L2_INV) { // could see an invalidate from the directory (earlier epoch)
1457 t_sendAckToInvalidator;
1458 l_popForwardedRequestQueue;
1459 }
1460
1461 transition(L2_IMZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
1462 z_stall;
1463 }
1464
1465 // transitions from L2_IMOI
1466 // the directory put us in this state so it should tell us nothing (i.e. don't worry about INV or Forwards)
1467 // stall all L1 requests
1468 transition(L2_IMOI, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MOICR) {
1469 u_writeDataFromResponseQueueToL2Cache;
1470 pp_issueStoreHitInv;
1471 ddd_setPendingIntAcksToOne;
1472 c_finalAckToDirIfNeeded;
1473 o_popIncomingResponseQueue;
1474 }
1475
1476 transition(L2_IMOI, Data_ext_ack_not_0) {
1477 u_writeDataFromResponseQueueToL2Cache;
1478 p_addNumberOfPendingExtAcks;
1479 mm_rememberIfFinalAckNeeded;
1480 o_popIncomingResponseQueue;
1481 }
1482
1483 transition(L2_IMOI, Proc_ext_ack) {
1484 q_decrementNumberOfPendingExtAcks;
1485 o_popIncomingResponseQueue;
1486 }
1487
1488 transition(L2_IMOI, Proc_last_ext_ack, L2_MOICR) {
1489 n_sendFinalAckIfThreeHop;
1490 pp_issueStoreHitInv;
1491 ddd_setPendingIntAcksToOne;
1492 o_popIncomingResponseQueue;
1493 }
1494
1495 transition(L2_IMOI, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
1496 z_stall;
1497 }
1498
1499 // transitions from L2_IMOZ
1500 // just wait for all acks and data
1501 // stall on all requests
1502 transition(L2_IMOZ, L2_INV) { // could see an invalidate from the directory (earlier epoch)
1503 t_sendAckToInvalidator;
1504 l_popForwardedRequestQueue;
1505 }
1506
1507 transition(L2_IMOZ, {Data_ext_ack_0, Data_ext_ack_not_0_last}, L2_MOZ) {
1508 u_writeDataFromResponseQueueToL2Cache;
1509 cc_issueStoreHitDG;
1510 ddd_setPendingIntAcksToOne;
1511 c_finalAckToDirIfNeeded;
1512 o_popIncomingResponseQueue;
1513 }
1514
1515 transition(L2_IMOZ, Data_ext_ack_not_0) {
1516 u_writeDataFromResponseQueueToL2Cache;
1517 p_addNumberOfPendingExtAcks;
1518 mm_rememberIfFinalAckNeeded;
1519 o_popIncomingResponseQueue;
1520 }
1521
1522 transition(L2_IMOZ, Proc_ext_ack) {
1523 q_decrementNumberOfPendingExtAcks;
1524 o_popIncomingResponseQueue;
1525 }
1526
1527 transition(L2_IMOZ, Proc_last_ext_ack, L2_MOZ) {
1528 cc_issueStoreHitDG;
1529 ddd_setPendingIntAcksToOne;
1530 n_sendFinalAckIfThreeHop;
1531 o_popIncomingResponseQueue;
1532 }
1533
1534 // stall on all requests
1535 transition(L2_IMOZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
1536 z_stall;
1537 }
1538
1539 // ===============================================
1540 // BASE STATE - S
1541 // Transitions from S, no L1 copies
1542 transition(L2_S, L2_Replacement, L2_I) {
1543 rr_deallocateL2CacheBlock;
1544 }
1545
1546 transition(L2_S, L2_INV, L2_I) { // could see an invalidate from the directory, but not Forwards
1547 t_sendAckToInvalidator;
1548 l_popForwardedRequestQueue;
1549 }
1550
1551 transition(L2_S, {L1_GETS, L1_GET_INSTR}, L2_SS) {
1552 set_setMRU;
1553 ww_profileMissNoDir;
1554 nn_addSharer;
1555 k_dataFromL2CacheToL1Requestor;
1556 jj_popL1RequestQueue;
1557 }
1558
1559 transition(L2_S, L1_GETX, L2_IM) {
1560 set_setMRU;
1561 nn_addSharer;
1562 i_allocateTBE;
1563 xx_recordGetXL1ID;
1564 b_issueGETX;
1565 uu_profileMiss;
1566 jj_popL1RequestQueue;
1567 }
1568
1569 // BASE STATE - SS
1570 // Transitions from SS, L1 copies
1571 transition(L2_SS, L2_Replacement, L2_SIV) {
1572 i_allocateTBE; // for internal request
1573 bbb_setPendingIntAcksToSharers;
1574 tt_issueSharedInvalidateIntL1copiesRequest;
1575 }
1576
1577 transition(L2_SS, L2_INV, L2_SIC) {
1578 i_allocateTBE; // for internal request
1579 yy_recordInvalidatorID;
1580 bbb_setPendingIntAcksToSharers;
1581 tt_issueSharedInvalidateIntL1copiesRequest;
1582 l_popForwardedRequestQueue;
1583 }
1584
1585 transition(L2_SS, {L1_GETS, L1_GET_INSTR}) {
1586 set_setMRU;
1587 ww_profileMissNoDir;
1588 nn_addSharer;
1589 k_dataFromL2CacheToL1Requestor;
1590 jj_popL1RequestQueue;
1591 }
1592
1593 transition(L2_SS, L1_UPGRADE_no_others, L2_IM) {
1594 set_setMRU;
1595 i_allocateTBE; // for both ext. and int.
1596 xx_recordGetXL1ID;
1597 b_issueGETX; // for external
1598 uu_profileMiss;
1599 jj_popL1RequestQueue;
1600 }
1601
1602 transition(L2_SS, L1_UPGRADE, L2_IMV) {
1603 set_setMRU;
1604 i_allocateTBE; // for both ext. and int.
1605 xx_recordGetXL1ID;
1606 ccc_setPendingIntAcksMinusOne;
1607 vv_issueInvalidateOtherIntL1copiesRequest; // for internal
1608 b_issueGETX; // for external
1609 uu_profileMiss;
1610 jj_popL1RequestQueue;
1611 }
1612
1613 transition(L2_SS, L1_GETX, L2_IMV) {
1614 set_setMRU;
1615 i_allocateTBE; // for both ext. and int.
1616 xx_recordGetXL1ID;
1617 bbb_setPendingIntAcksToSharers;
1618 vv_issueInvalidateOtherIntL1copiesRequest; // for internal
1619 nn_addSharer;
1620 b_issueGETX; // for external
1621 uu_profileMiss;
1622 jj_popL1RequestQueue;
1623 }
1624
1625 transition(L2_SS, L1_PUTS) {
1626 ww_profileMissNoDir;
1627 w_sendPutAckToL1Cache;
1628 kk_removeRequestSharer;
1629 jj_popL1RequestQueue;
1630 }
1631
1632 transition(L2_SS, L1_PUTS_last, L2_S) {
1633 ww_profileMissNoDir;
1634 w_sendPutAckToL1Cache;
1635 kk_removeRequestSharer;
1636 jj_popL1RequestQueue;
1637 }
1638
1639 // Transitions from SIC - Initiated by an invalidate
1640 transition(L2_SIC, Proc_int_ack) {
1641 aa_removeResponseSharer;
1642 r_decrementNumberOfPendingIntAcks;
1643 o_popIncomingResponseQueue;
1644 }
1645
1646 transition(L2_SIC, Proc_last_int_ack, L2_I) {
1647 aa_removeResponseSharer;
1648 r_decrementNumberOfPendingIntAcks;
1649 o_popIncomingResponseQueue;
1650 zz_sendAckToQueuedInvalidator;
1651 s_deallocateTBE;
1652 }
1653
1654 transition(L2_SIC, L2_INV) { // could see an invalidate from the directory, but not Forwards
1655 l_popForwardedRequestQueue; // ignore: already know an ack must be sent to the directory
1656 }
1657
1658 transition(L2_SIC, {L1_GETS, L1_GET_INSTR, L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX}) { // stall on all L1 requests
1659 z_stall;
1660 }
1661
1662 // Transitions from SIV - initiated by a L2_Replacement
1663 transition(L2_SIV, Proc_int_ack) {
1664 aa_removeResponseSharer;
1665 r_decrementNumberOfPendingIntAcks;
1666 o_popIncomingResponseQueue;
1667 }
1668
1669 transition(L2_SIV, Proc_last_int_ack, L2_I) {
1670 aa_removeResponseSharer;
1671 r_decrementNumberOfPendingIntAcks;
1672 o_popIncomingResponseQueue;
1673 s_deallocateTBE;
1674 rr_deallocateL2CacheBlock;
1675 }
1676
1677 transition(L2_SIV, L2_INV) { // could see an invalidate from the directory, but not Forwards
1678 z_stall; // guarenteed to receive all acks thus moving the state to I where the L2_INV can be handled
1679 }
1680
1681 transition(L2_SIV, {L1_GETS, L1_GET_INSTR, L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX}) { // stall on all L1 requests
1682 z_stall;
1683 }
1684
1685 // ===============================================
1686 // BASE STATE - M
1687 // Transitions from M, no L1 copies
1688 transition(L2_M, L2_Replacement, L2_MIN) {
1689 i_allocateTBE;
1690 d_issuePUTX;
1691 x_copyDataFromL2CacheToTBE;
1692 rr_deallocateL2CacheBlock;
1693 }
1694
1695 transition(L2_M, {Forwarded_GETS,Forwarded_GET_INSTR}, L2_O) { // can see forwards, not inv
1696 e_dataFromL2CacheToL2Requestor;
1697 l_popForwardedRequestQueue;
1698 }
1699
1700 transition(L2_M, Forwarded_GETX, L2_I) { // can see forwards, not inv
1701 e_dataFromL2CacheToL2Requestor;
1702 l_popForwardedRequestQueue;
1703 }
1704
1705 transition(L2_M, {L1_GETS, L1_GET_INSTR}, L2_SO) { // FIXME FOR BETTER PERFORMANCE - an E state would be nice here
1706 set_setMRU;
1707 ww_profileMissNoDir;
1708 nn_addSharer;
1709 k_dataFromL2CacheToL1Requestor;
1710 jj_popL1RequestQueue;
1711 }
1712
1713 transition(L2_M, L1_GETX, L2_MT) {
1714 set_setMRU;
1715 ww_profileMissNoDir;
1716 nn_addSharer;
1717 k_dataFromL2CacheToL1Requestor;
1718 jj_popL1RequestQueue;
1719 }
1720
1721 // BASE STATE - MT
1722 // Transitions from MT, M L1 copy
1723 transition(L2_MT, L2_Replacement, L2_MIV) {
1724 i_allocateTBE;
1725 bbb_setPendingIntAcksToSharers;
1726 v_issueInvalidateIntL1copyRequest;
1727 }
1728
1729 transition(L2_MT, {Forwarded_GETS, Forwarded_GET_INSTR}, L2_MO) { // can see forwards, not inv
1730 i_allocateTBE;
1731 bbb_setPendingIntAcksToSharers;
1732 g_issueDownGradeIntL1copiesRequest;
1733 dd_recordGetSForwardID;
1734 l_popForwardedRequestQueue;
1735 }
1736
1737 transition(L2_MT, {L1_GETS, L1_GET_INSTR}, L2_MO) {
1738 set_setMRU;
1739 ww_profileMissNoDir;
1740 i_allocateTBE;
1741 bbb_setPendingIntAcksToSharers;
1742 g_issueDownGradeIntL1copiesRequest;
1743 ss_recordGetSL1ID;
1744 nn_addSharer;
1745 jj_popL1RequestQueue;
1746 }
1747
1748 transition(L2_MT, Forwarded_GETX, L2_MIC) { // can see forwards, not inv
1749 i_allocateTBE;
1750 bbb_setPendingIntAcksToSharers;
1751 v_issueInvalidateIntL1copyRequest;
1752 ii_recordGetXForwardID;
1753 l_popForwardedRequestQueue;
1754 }
1755
1756 transition(L2_MT, L1_GETX, L2_MIT) {
1757 set_setMRU;
1758 ww_profileMissNoDir;
1759 i_allocateTBE;
1760 bbb_setPendingIntAcksToSharers;
1761 v_issueInvalidateIntL1copyRequest;
1762 nn_addSharer;
1763 xx_recordGetXL1ID;
1764 jj_popL1RequestQueue;
1765 }
1766
1767 transition(L2_MT, L1_PUTX_last, L2_M) {
1768 ww_profileMissNoDir;
1769 w_sendPutAckToL1Cache;
1770 kk_removeRequestSharer;
1771 m_writeDataFromRequestQueueToL2Cache;
1772 jj_popL1RequestQueue;
1773 }
1774
1775 // Transitions from L2_MIV, waiting for local L1 response
1776 transition(L2_MIV, Data_int_ack, L2_MIN) {
1777 aa_removeResponseSharer;
1778 u_writeDataFromResponseQueueToL2Cache;
1779 bb_dataFromL2CacheToGetSForwardIDs; // likely won't send any messages
1780 gg_dataFromL2CacheToGetXForwardID; // likely won't send any messages
1781 d_issuePUTX;
1782 x_copyDataFromL2CacheToTBE;
1783 rr_deallocateL2CacheBlock;
1784 o_popIncomingResponseQueue;
1785 }
1786
1787 transition(L2_MIV, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
1788 dd_recordGetSForwardID;
1789 l_popForwardedRequestQueue;
1790 }
1791
1792 transition(L2_MIV, Forwarded_GETX) { // could see Forwards
1793 ii_recordGetXForwardID;
1794 l_popForwardedRequestQueue;
1795 }
1796
1797 transition(L2_MIV, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall on all L1 requests
1798 z_stall;
1799 }
1800
1801 // Transitions from L2_MIN, waiting for directory ack
1802 transition(L2_MIN, {Forwarded_GETS,Forwarded_GET_INSTR}) { // could see Forwards
1803 y_dataFromTBEToRequestor;
1804 l_popForwardedRequestQueue;
1805 }
1806
1807 transition(L2_MIN, Forwarded_GETX) { // could see Forwards
1808 y_dataFromTBEToRequestor;
1809 l_popForwardedRequestQueue;
1810 }
1811
1812 transition(L2_MIN, Dir_WB_ack, L2_I) {
1813 s_deallocateTBE;
1814 l_popForwardedRequestQueue;
1815 }
1816
1817 transition(L2_MIN, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
1818 z_stall;
1819 }
1820
1821 // Transitions from L2_MIC, waiting for local L1 response
1822 // Directory put us in this state with a forwarded GetX
1823 // therefore we shouldn't see anymore forwards
1824 // we stall on all L1 requests
1825 transition(L2_MIC, Data_int_ack, L2_I) {
1826 aa_removeResponseSharer;
1827 u_writeDataFromResponseQueueToL2Cache;
1828 gg_dataFromL2CacheToGetXForwardID;
1829 s_deallocateTBE;
1830 o_popIncomingResponseQueue;
1831 }
1832
1833 transition(L2_MIC, {L1_GETS, L1_GET_INSTR, L1_GETX}) { // stall all L1 requests
1834 z_stall;
1835 }
1836
1837 // Transitions from L2_MIT, waiting for local L1 response
1838 // A local L1 request put us in this state, so any request are possible
1839 // we currently stall all requests because of the ugly recursive path it could lead us on
1840 // removing some of the blocking here could have major performance benefits
1841 // however one must be careful not to violate cache coherence
1842 transition(L2_MIT, Data_int_ack, L2_MT) {
1843 aa_removeResponseSharer;
1844 u_writeDataFromResponseQueueToL2Cache;
1845 hh_issueStoreHit; // internal requestor
1846 s_deallocateTBE;
1847 o_popIncomingResponseQueue;
1848 }
1849
1850 // stall all requests
1851 transition(L2_MIT, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX}) {
1852 z_stall;
1853 }
1854
1855 // Transistion from L2_MO, waiting for local L1 data response
1856 // a GetS request put us in this state
1857 // stall must stall if we get a GETX request
1858 transition(L2_MO, Data_int_ack, L2_SO) {
1859 u_writeDataFromResponseQueueToL2Cache;
1860 ee_dataFromL2CacheToGetSIDs; // could be an internal or external requestor
1861 s_deallocateTBE;
1862 o_popIncomingResponseQueue;
1863 }
1864
1865 transition(L2_MO, {Forwarded_GETS, Forwarded_GET_INSTR}) { // can see forwards, not inv
1866 dd_recordGetSForwardID;
1867 l_popForwardedRequestQueue;
1868 }
1869
1870 transition(L2_MO, {L1_GETS, L1_GET_INSTR}) {
1871 set_setMRU;
1872 ww_profileMissNoDir;
1873 nn_addSharer;
1874 ss_recordGetSL1ID;
1875 jj_popL1RequestQueue;
1876 }
1877
1878 transition(L2_MO, Forwarded_GETX, L2_MOIC) { // can see forwards, not inv
1879 ii_recordGetXForwardID;
1880 l_popForwardedRequestQueue;
1881 }
1882
1883 transition(L2_MO, {L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}, L2_MOZ) { // don't go there, just go to a stall state
1884 z_stall;
1885 }
1886
1887 // Transistion from L2_MOIC
1888 // a Forwarded_GETX put us here so we should not see any more forwards
1889 // stall on all L1 requests, once data is received send new data to all queued up L1 shares
1890 // then immediately send invalidate request to those new L1 shared copies
1891 //
1892 // KEY DIFFERENCE: L2_MOICR assumes the L1 data responder moved to I state and removes the sharer,
1893 // while L2_MOIC assumes the L1 data responder moved to S state and doesn't remove the sharer
1894 transition(L2_MOIC, Data_int_ack, L2_OIC) { // need only one ack
1895 u_writeDataFromResponseQueueToL2Cache;
1896 ee_dataFromL2CacheToGetSIDs;
1897 bbb_setPendingIntAcksToSharers;
1898 tt_issueSharedInvalidateIntL1copiesRequest;
1899 o_popIncomingResponseQueue;
1900 }
1901
1902 transition(L2_MOIC, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
1903 z_stall;
1904 }
1905
1906 // Transistion from L2_MOICR
1907 // a Forwarded_GETX put us here so we should not see any more forwards
1908 // stall on all L1 requests, once data is received send new data to all queued up L1 shares
1909 // then immediately send invalidate request to those new L1 shared copies
1910 //
1911 // KEY DIFFERENCE: L2_MOICR assumes the L1 data responder moved to I state and removes the sharer,
1912 // while L2_MOIC assumes the L1 data responder moved to S state and doesn't remove the sharer
1913 transition(L2_MOICR, Data_int_ack, L2_OIC) { // need only one ack
1914 aa_removeResponseSharer;
1915 u_writeDataFromResponseQueueToL2Cache;
1916 ee_dataFromL2CacheToGetSIDs;
1917 bbb_setPendingIntAcksToSharers;
1918 tt_issueSharedInvalidateIntL1copiesRequest;
1919 o_popIncomingResponseQueue;
1920 }
1921
1922 transition(L2_MOICR, {L1_GETS, L1_GET_INSTR, L1_GETX}) {
1923 z_stall;
1924 }
1925
1926 // L2_MOZ
1927 // simply wait on data
1928 // stall on everything
1929 transition(L2_MOZ, Data_int_ack, L2_SO) {
1930 u_writeDataFromResponseQueueToL2Cache;
1931 ee_dataFromL2CacheToGetSIDs; // could be an internal or external requestor
1932 s_deallocateTBE;
1933 o_popIncomingResponseQueue;
1934 }
1935
1936 // stall everything
1937 transition(L2_MOZ, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX, L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE, L1_UPGRADE_no_others}) {
1938 z_stall;
1939 }
1940
1941 // ===============================================
1942 // BASE STATE - O
1943 // Transitions from L2_O, only block cached on the chip
1944 transition(L2_O, L2_Replacement, L2_OIN){
1945 i_allocateTBE;
1946 x_copyDataFromL2CacheToTBE;
1947 d_issuePUTX;
1948 rr_deallocateL2CacheBlock;
1949 }
1950
1951 transition(L2_O, {Forwarded_GETS,Forwarded_GET_INSTR}) {
1952 e_dataFromL2CacheToL2Requestor;
1953 l_popForwardedRequestQueue;
1954 }
1955
1956 transition(L2_O, Forwarded_GETX, L2_I) {
1957 e_dataFromL2CacheToL2Requestor;
1958 l_popForwardedRequestQueue;
1959 }
1960
1961 transition(L2_O, {L1_GETS, L1_GET_INSTR}, L2_SO) {
1962 set_setMRU;
1963 ww_profileMissNoDir;
1964 nn_addSharer;
1965 k_dataFromL2CacheToL1Requestor;
1966 jj_popL1RequestQueue;
1967 }
1968
1969 transition(L2_O, L1_GETX, L2_OM) {
1970 set_setMRU;
1971 nn_addSharer;
1972 i_allocateTBE;
1973 xx_recordGetXL1ID;
1974 b_issueGETX;
1975 uu_profileMiss;
1976 jj_popL1RequestQueue;
1977 }
1978
1979 // BASE STATE - SO
1980 // Transitions from L2_SO, other valid L1 cached copies
1981 transition(L2_SO, L2_Replacement, L2_OIV){
1982 i_allocateTBE;
1983 x_copyDataFromL2CacheToTBE;
1984 bbb_setPendingIntAcksToSharers;
1985 tt_issueSharedInvalidateIntL1copiesRequest;
1986 }
1987
1988 transition(L2_SO, {Forwarded_GETS,Forwarded_GET_INSTR}) {
1989 e_dataFromL2CacheToL2Requestor;
1990 l_popForwardedRequestQueue;
1991 }
1992
1993 transition(L2_SO, Forwarded_GETX, L2_OIC) {
1994 i_allocateTBE;
1995 bbb_setPendingIntAcksToSharers;
1996 ii_recordGetXForwardID;
1997 tt_issueSharedInvalidateIntL1copiesRequest;
1998 l_popForwardedRequestQueue;
1999 }
2000
2001 transition(L2_SO, {L1_GETS, L1_GET_INSTR}) {
2002 set_setMRU;
2003 ww_profileMissNoDir;
2004 nn_addSharer;
2005 k_dataFromL2CacheToL1Requestor;
2006 jj_popL1RequestQueue;
2007 }
2008
2009 transition(L2_SO, L1_UPGRADE, L2_OMV) {
2010 set_setMRU;
2011 nn_addSharer;
2012 i_allocateTBE;
2013 xx_recordGetXL1ID;
2014 ccc_setPendingIntAcksMinusOne;
2015 vv_issueInvalidateOtherIntL1copiesRequest; // for internal
2016 b_issueGETX; // for external
2017 uu_profileMiss;
2018 jj_popL1RequestQueue;
2019 }
2020
2021 transition(L2_SO, L1_UPGRADE_no_others, L2_OM) {
2022 set_setMRU;
2023 i_allocateTBE;
2024 xx_recordGetXL1ID;
2025 b_issueGETX; // for external
2026 uu_profileMiss;
2027 jj_popL1RequestQueue;
2028 }
2029
2030 transition(L2_SO, L1_GETX, L2_OMV) {
2031 set_setMRU;
2032 i_allocateTBE;
2033 xx_recordGetXL1ID;
2034 bbb_setPendingIntAcksToSharers;
2035 vv_issueInvalidateOtherIntL1copiesRequest;
2036 nn_addSharer;
2037 b_issueGETX; // for external
2038 uu_profileMiss;
2039 jj_popL1RequestQueue;
2040 }
2041
2042 transition(L2_SO, {L1_PUTS, L1_PUTX}) { // PUTX possible because L2 downgraded before seeing PUTX
2043 ww_profileMissNoDir;
2044 w_sendPutAckToL1Cache;
2045 kk_removeRequestSharer;
2046 jj_popL1RequestQueue;
2047 }
2048
2049 transition(L2_SO, {L1_PUTS_last, L1_PUTX_last}, L2_O) { // PUTX possible because L2 downgraded before seeing PUTX
2050 ww_profileMissNoDir;
2051 w_sendPutAckToL1Cache;
2052 kk_removeRequestSharer;
2053 jj_popL1RequestQueue;
2054 }
2055
2056 // Transitions from L2_OIV
2057 // L2 replacement put us here, we must stall all L1 requests
2058 transition(L2_OIV, {Forwarded_GETS, Forwarded_GET_INSTR}) {
2059 y_dataFromTBEToRequestor;
2060 l_popForwardedRequestQueue;
2061 }
2062
2063 transition(L2_OIV, Forwarded_GETX) {
2064 z_stall;
2065 }
2066
2067 transition(L2_OIV, Proc_int_ack) {
2068 aa_removeResponseSharer;
2069 r_decrementNumberOfPendingIntAcks
2070 o_popIncomingResponseQueue;
2071 }
2072
2073 transition(L2_OIV, Proc_last_int_ack, L2_OIN) {
2074 aa_removeResponseSharer;
2075 r_decrementNumberOfPendingIntAcks
2076 o_popIncomingResponseQueue;
2077 d_issuePUTX;
2078 rr_deallocateL2CacheBlock;
2079 }
2080
2081 transition(L2_OIV, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
2082 z_stall;
2083 }
2084
2085 // transitions from L2_OIN
2086 // L2 replacement put us here, we must stall all L1 requests
2087 transition(L2_OIN, {Forwarded_GETS, Forwarded_GET_INSTR, Forwarded_GETX}) {
2088 y_dataFromTBEToRequestor;
2089 l_popForwardedRequestQueue;
2090 }
2091
2092 transition(L2_OIN, Dir_WB_ack, L2_I) {
2093 s_deallocateTBE;
2094 l_popForwardedRequestQueue;
2095 }
2096
2097 transition(L2_OIN, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
2098 z_stall;
2099 }
2100
2101 // transitions from L2_OIC
2102 // directory put us in this state, should not see any forwards
2103 // we must stall all L1 requests
2104 transition(L2_OIC, Proc_int_ack) {
2105 aa_removeResponseSharer;
2106 r_decrementNumberOfPendingIntAcks
2107 o_popIncomingResponseQueue;
2108 }
2109
2110 transition(L2_OIC, Proc_last_int_ack, L2_I) {
2111 aa_removeResponseSharer;
2112 r_decrementNumberOfPendingIntAcks
2113 gg_dataFromL2CacheToGetXForwardID;
2114 s_deallocateTBE;
2115 o_popIncomingResponseQueue;
2116 }
2117
2118 transition(L2_OIC, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETX, L1_GETS, L1_GET_INSTR}) { // stall L1 requests
2119 z_stall;
2120 }
2121
2122 // Transitions from L2_OMV,
2123 // int_acks needed
2124 // waiting to see our Forwarded GETX from the directory
2125 // if we see the Forwarded GETX before all invalidates received, stall
2126 // stall all L1 requests
2127 transition(L2_OMV, Proc_int_ack) {
2128 aa_removeResponseSharer;
2129 r_decrementNumberOfPendingIntAcks;
2130 o_popIncomingResponseQueue;
2131 }
2132
2133 transition(L2_OMV, Proc_last_int_ack, L2_OM) {
2134 aa_removeResponseSharer;
2135 r_decrementNumberOfPendingIntAcks;
2136 o_popIncomingResponseQueue;
2137 }
2138
2139 transition(L2_OMV, Proc_ext_ack) {
2140 q_decrementNumberOfPendingExtAcks;
2141 o_popIncomingResponseQueue;
2142 }
2143
2144 transition(L2_OMV, {Forwarded_GETS, Forwarded_GET_INSTR}) { // these are GetS that beat us to the directory
2145 e_dataFromL2CacheToL2Requestor;
2146 l_popForwardedRequestQueue;
2147 }
2148
2149 transition(L2_OMV, Dir_exe_ack, L2_MV) {
2150 l_popForwardedRequestQueue;
2151 }
2152
2153 transition(L2_OMV, Forwarded_GETX) { // the Forwarded GetX may or may not be ours, we can't respond until int_acks received
2154 z_stall;
2155 }
2156
2157 transition(L2_OMV, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETS, L1_GET_INSTR, L1_GETX}) { // must stall all L1 requests
2158 z_stall;
2159 }
2160
2161 // Transitions from L2_OM,
2162 // all L1 copies invalid, no int_acks needed
2163 // waiting to see our Forwarded GETX from the directory
2164 // once we see the Forwarded GETX, we can move to IM and wait for the data_ack
2165 // stall all L1 requests
2166 transition(L2_OM, Proc_ext_ack) {
2167 q_decrementNumberOfPendingExtAcks;
2168 o_popIncomingResponseQueue;
2169 }
2170
2171 transition(L2_OM, {Forwarded_GETS, Forwarded_GET_INSTR}) { // these are GetS that beat us to the directory
2172 e_dataFromL2CacheToL2Requestor;
2173 l_popForwardedRequestQueue;
2174 }
2175
2176 transition(L2_OM, Forwarded_GETX, L2_IM) { // the Forwarded GetX may or may not be ours
2177 e_dataFromL2CacheToL2Requestor; // we're probably sending a message to ourselves here, but not guarenteed
2178 l_popForwardedRequestQueue;
2179 }
2180
2181 transition(L2_OM, Dir_exe_ack, L2_MT) { // Directory tells us we already have an exclusive copy
2182 hh_issueStoreHit;
2183 s_deallocateTBE;
2184 l_popForwardedRequestQueue;
2185 }
2186
2187 transition(L2_OM, {L1_UPGRADE, L1_UPGRADE_no_others, L1_GETS, L1_GET_INSTR, L1_GETX}) { // must stall all L1 requests
2188 z_stall;
2189 }
2190
2191 }