x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MESI_CMP_directory-L2cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MSI_MOSI_CMP_directory-L2cache.sm 1.12 05/01/19 15:55:40-06:00 beckmann@s0-28.cs.wisc.edu $
32 *
33 */
34
35 machine(L2Cache, "MESI Directory L2 Cache CMP")
36 : CacheMemory * L2cacheMemory,
37 Cycles l2_request_latency = 2,
38 Cycles l2_response_latency = 2,
39 Cycles to_l1_latency = 1
40 {
41 // L2 BANK QUEUES
42 // From local bank of L2 cache TO the network
43 MessageBuffer DirRequestFromL2Cache, network="To", virtual_network="0", ordered="false", vnet_type="request"; // this L2 bank -> Memory
44 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="0", ordered="false", vnet_type="request"; // this L2 bank -> a local L1
45 MessageBuffer responseFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="response"; // this L2 bank -> a local L1 || Memory
46
47 // FROM the network to this local bank of L2 cache
48 MessageBuffer unblockToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="unblock"; // a local L1 || Memory -> this L2 bank
49 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="0", ordered="false", vnet_type="request"; // a local L1 -> this L2 bank
50 MessageBuffer responseToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="response"; // a local L1 || Memory -> this L2 bank
51
52 // STATES
53 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_NP") {
54 // Base states
55 NP, AccessPermission:Invalid, desc="Not present in either cache";
56 SS, AccessPermission:Read_Only, desc="L2 cache entry Shared, also present in one or more L1s";
57 M, AccessPermission:Read_Write, desc="L2 cache entry Modified, not present in any L1s", format="!b";
58 MT, AccessPermission:Maybe_Stale, desc="L2 cache entry Modified in a local L1, assume L2 copy stale", format="!b";
59
60 // L2 replacement
61 M_I, AccessPermission:Busy, desc="L2 cache replacing, have all acks, sent dirty data to memory, waiting for ACK from memory";
62 MT_I, AccessPermission:Busy, desc="L2 cache replacing, getting data from exclusive";
63 MCT_I, AccessPermission:Busy, desc="L2 cache replacing, clean in L2, getting data or ack from exclusive";
64 I_I, AccessPermission:Busy, desc="L2 replacing clean data, need to inv sharers and then drop data";
65 S_I, AccessPermission:Busy, desc="L2 replacing dirty data, collecting acks from L1s";
66
67 // Transient States for fetching data from memory
68 ISS, AccessPermission:Busy, desc="L2 idle, got single L1_GETS, issued memory fetch, have not seen response yet";
69 IS, AccessPermission:Busy, desc="L2 idle, got L1_GET_INSTR or multiple L1_GETS, issued memory fetch, have not seen response yet";
70 IM, AccessPermission:Busy, desc="L2 idle, got L1_GETX, issued memory fetch, have not seen response(s) yet";
71
72 // Blocking states
73 SS_MB, AccessPermission:Busy, desc="Blocked for L1_GETX from SS";
74 MT_MB, AccessPermission:Busy, desc="Blocked for L1_GETX from MT";
75 M_MB, AccessPermission:Busy, desc="Blocked for L1_GETX from M";
76
77 MT_IIB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
78 MT_IB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
79 MT_SB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
80
81 }
82
83 // EVENTS
84 enumeration(Event, desc="L2 Cache events") {
85 // L2 events
86
87 // events initiated by the local L1s
88 L1_GET_INSTR, desc="a L1I GET INSTR request for a block maped to us";
89 L1_GETS, desc="a L1D GETS request for a block maped to us";
90 L1_GETX, desc="a L1D GETX request for a block maped to us";
91 L1_UPGRADE, desc="a L1D GETX request for a block maped to us";
92
93 L1_PUTX, desc="L1 replacing data";
94 L1_PUTX_old, desc="L1 replacing data, but no longer sharer";
95
96 Fwd_L1_GETX, desc="L1 did not have data, so we supply";
97 Fwd_L1_GETS, desc="L1 did not have data, so we supply";
98 Fwd_L1_GET_INSTR, desc="L1 did not have data, so we supply";
99
100 // events initiated by this L2
101 L2_Replacement, desc="L2 Replacement", format="!r";
102 L2_Replacement_clean, desc="L2 Replacement, but data is clean", format="!r";
103
104 // events from memory controller
105 Mem_Data, desc="data from memory", format="!r";
106 Mem_Ack, desc="ack from memory", format="!r";
107
108 // M->S data writeback
109 WB_Data, desc="data from L1";
110 WB_Data_clean, desc="clean data from L1";
111 Ack, desc="writeback ack";
112 Ack_all, desc="writeback ack";
113
114 Unblock, desc="Unblock from L1 requestor";
115 Unblock_Cancel, desc="Unblock from L1 requestor (FOR XACT MEMORY)";
116 Exclusive_Unblock, desc="Unblock from L1 requestor";
117
118 MEM_Inv, desc="Invalidation from directory";
119
120 }
121
122 // TYPES
123
124 // CacheEntry
125 structure(Entry, desc="...", interface="AbstractCacheEntry") {
126 State CacheState, desc="cache state";
127 NetDest Sharers, desc="tracks the L1 shares on-chip";
128 MachineID Exclusive, desc="Exclusive holder of block";
129 DataBlock DataBlk, desc="data for the block";
130 bool Dirty, default="false", desc="data is dirty";
131 }
132
133 // TBE fields
134 structure(TBE, desc="...") {
135 Address Address, desc="Physical address for this TBE";
136 State TBEState, desc="Transient state";
137 DataBlock DataBlk, desc="Buffer for the data block";
138 bool Dirty, default="false", desc="Data is Dirty";
139
140 NetDest L1_GetS_IDs, desc="Set of the internal processors that want the block in shared state";
141 MachineID L1_GetX_ID, desc="ID of the L1 cache to forward the block to once we get a response";
142 bool isPrefetch, desc="Set if this was caused by a prefetch";
143
144 int pendingAcks, desc="number of pending acks for invalidates during writeback";
145 }
146
147 structure(TBETable, external="yes") {
148 TBE lookup(Address);
149 void allocate(Address);
150 void deallocate(Address);
151 bool isPresent(Address);
152 }
153
154 TBETable L2_TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
155
156 void set_cache_entry(AbstractCacheEntry a);
157 void unset_cache_entry();
158 void set_tbe(TBE a);
159 void unset_tbe();
160 void wakeUpBuffers(Address a);
161
162 // inclusive cache, returns L2 entries only
163 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
164 return static_cast(Entry, "pointer", L2cacheMemory[addr]);
165 }
166
167 std::string getCoherenceRequestTypeStr(CoherenceRequestType type) {
168 return CoherenceRequestType_to_string(type);
169 }
170
171 bool isOneSharerLeft(Address addr, MachineID requestor, Entry cache_entry) {
172 assert(is_valid(cache_entry));
173 assert(cache_entry.Sharers.isElement(requestor));
174 return (cache_entry.Sharers.count() == 1);
175 }
176
177 bool isSharer(Address addr, MachineID requestor, Entry cache_entry) {
178 if (is_valid(cache_entry)) {
179 return cache_entry.Sharers.isElement(requestor);
180 } else {
181 return false;
182 }
183 }
184
185 void addSharer(Address addr, MachineID requestor, Entry cache_entry) {
186 assert(is_valid(cache_entry));
187 DPRINTF(RubySlicc, "machineID: %s, requestor: %s, address: %s\n",
188 machineID, requestor, addr);
189 cache_entry.Sharers.add(requestor);
190 }
191
192 State getState(TBE tbe, Entry cache_entry, Address addr) {
193 if(is_valid(tbe)) {
194 return tbe.TBEState;
195 } else if (is_valid(cache_entry)) {
196 return cache_entry.CacheState;
197 }
198 return State:NP;
199 }
200
201 std::string getStateStr(TBE tbe, Entry cache_entry, Address addr) {
202 return L2Cache_State_to_string(getState(tbe, cache_entry, addr));
203 }
204
205 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
206
207 // MUST CHANGE
208 if (is_valid(tbe)) {
209 tbe.TBEState := state;
210 }
211
212 if (is_valid(cache_entry)) {
213 cache_entry.CacheState := state;
214 }
215 }
216
217 AccessPermission getAccessPermission(Address addr) {
218 TBE tbe := L2_TBEs[addr];
219 if(is_valid(tbe)) {
220 DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
221 return L2Cache_State_to_permission(tbe.TBEState);
222 }
223
224 Entry cache_entry := getCacheEntry(addr);
225 if(is_valid(cache_entry)) {
226 DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(cache_entry.CacheState));
227 return L2Cache_State_to_permission(cache_entry.CacheState);
228 }
229
230 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
231 return AccessPermission:NotPresent;
232 }
233
234 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
235 TBE tbe := L2_TBEs[addr];
236 if(is_valid(tbe)) {
237 return tbe.DataBlk;
238 }
239
240 return getCacheEntry(addr).DataBlk;
241 }
242
243 void setAccessPermission(Entry cache_entry, Address addr, State state) {
244 if (is_valid(cache_entry)) {
245 cache_entry.changePermission(L2Cache_State_to_permission(state));
246 }
247 }
248
249 Event L1Cache_request_type_to_event(CoherenceRequestType type, Address addr,
250 MachineID requestor, Entry cache_entry) {
251 if(type == CoherenceRequestType:GETS) {
252 return Event:L1_GETS;
253 } else if(type == CoherenceRequestType:GET_INSTR) {
254 return Event:L1_GET_INSTR;
255 } else if (type == CoherenceRequestType:GETX) {
256 return Event:L1_GETX;
257 } else if (type == CoherenceRequestType:UPGRADE) {
258 if ( is_valid(cache_entry) && cache_entry.Sharers.isElement(requestor) ) {
259 return Event:L1_UPGRADE;
260 } else {
261 return Event:L1_GETX;
262 }
263 } else if (type == CoherenceRequestType:PUTX) {
264 if (isSharer(addr, requestor, cache_entry)) {
265 return Event:L1_PUTX;
266 } else {
267 return Event:L1_PUTX_old;
268 }
269 } else {
270 DPRINTF(RubySlicc, "address: %s, Request Type: %s\n", addr, type);
271 error("Invalid L1 forwarded request type");
272 }
273 }
274
275 int getPendingAcks(TBE tbe) {
276 return tbe.pendingAcks;
277 }
278
279 bool isDirty(Entry cache_entry) {
280 assert(is_valid(cache_entry));
281 return cache_entry.Dirty;
282 }
283
284 // ** OUT_PORTS **
285
286 out_port(L1RequestIntraChipL2Network_out, RequestMsg, L1RequestFromL2Cache);
287 out_port(DirRequestIntraChipL2Network_out, RequestMsg, DirRequestFromL2Cache);
288 out_port(responseIntraChipL2Network_out, ResponseMsg, responseFromL2Cache);
289
290
291 in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
292 if(L1unblockNetwork_in.isReady()) {
293 peek(L1unblockNetwork_in, ResponseMsg) {
294 Entry cache_entry := getCacheEntry(in_msg.Address);
295 TBE tbe := L2_TBEs[in_msg.Address];
296 DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
297 in_msg.Address, getState(tbe, cache_entry, in_msg.Address),
298 in_msg.Sender, in_msg.Type, in_msg.Destination);
299
300 assert(in_msg.Destination.isElement(machineID));
301 if (in_msg.Type == CoherenceResponseType:EXCLUSIVE_UNBLOCK) {
302 trigger(Event:Exclusive_Unblock, in_msg.Address, cache_entry, tbe);
303 } else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
304 trigger(Event:Unblock, in_msg.Address, cache_entry, tbe);
305 } else {
306 error("unknown unblock message");
307 }
308 }
309 }
310 }
311
312 // Response IntraChip L2 Network - response msg to this particular L2 bank
313 in_port(responseIntraChipL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
314 if (responseIntraChipL2Network_in.isReady()) {
315 peek(responseIntraChipL2Network_in, ResponseMsg) {
316 // test wether it's from a local L1 or an off chip source
317 assert(in_msg.Destination.isElement(machineID));
318 Entry cache_entry := getCacheEntry(in_msg.Address);
319 TBE tbe := L2_TBEs[in_msg.Address];
320
321 if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
322 if(in_msg.Type == CoherenceResponseType:DATA) {
323 if (in_msg.Dirty) {
324 trigger(Event:WB_Data, in_msg.Address, cache_entry, tbe);
325 } else {
326 trigger(Event:WB_Data_clean, in_msg.Address, cache_entry, tbe);
327 }
328 } else if (in_msg.Type == CoherenceResponseType:ACK) {
329 if ((getPendingAcks(tbe) - in_msg.AckCount) == 0) {
330 trigger(Event:Ack_all, in_msg.Address, cache_entry, tbe);
331 } else {
332 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
333 }
334 } else {
335 error("unknown message type");
336 }
337
338 } else { // external message
339 if(in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
340 // L2 now has data and all off-chip acks
341 trigger(Event:Mem_Data, in_msg.Address, cache_entry, tbe);
342 } else if(in_msg.Type == CoherenceResponseType:MEMORY_ACK) {
343 // L2 now has data and all off-chip acks
344 trigger(Event:Mem_Ack, in_msg.Address, cache_entry, tbe);
345 } else if(in_msg.Type == CoherenceResponseType:INV) {
346 // L2 now has data and all off-chip acks
347 trigger(Event:MEM_Inv, in_msg.Address, cache_entry, tbe);
348 } else {
349 error("unknown message type");
350 }
351 }
352 }
353 } // if not ready, do nothing
354 }
355
356 // L1 Request
357 in_port(L1RequestIntraChipL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
358 if(L1RequestIntraChipL2Network_in.isReady()) {
359 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
360 Entry cache_entry := getCacheEntry(in_msg.Address);
361 TBE tbe := L2_TBEs[in_msg.Address];
362
363 DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
364 in_msg.Address, getState(tbe, cache_entry, in_msg.Address),
365 in_msg.Requestor, in_msg.Type, in_msg.Destination);
366
367 assert(machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache);
368 assert(in_msg.Destination.isElement(machineID));
369
370 if (is_valid(cache_entry)) {
371 // The L2 contains the block, so proceeded with handling the request
372 trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address,
373 in_msg.Requestor, cache_entry),
374 in_msg.Address, cache_entry, tbe);
375 } else {
376 if (L2cacheMemory.cacheAvail(in_msg.Address)) {
377 // L2 does't have the line, but we have space for it in the L2
378 trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address,
379 in_msg.Requestor, cache_entry),
380 in_msg.Address, cache_entry, tbe);
381 } else {
382 // No room in the L2, so we need to make room before handling the request
383 Entry L2cache_entry := getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address));
384 if (isDirty(L2cache_entry)) {
385 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address),
386 L2cache_entry, L2_TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
387 } else {
388 trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address),
389 L2cache_entry, L2_TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
390 }
391 }
392 }
393 }
394 }
395 }
396
397
398 // ACTIONS
399
400 action(a_issueFetchToMemory, "a", desc="fetch data from memory") {
401 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
402 enqueue(DirRequestIntraChipL2Network_out, RequestMsg, latency=l2_request_latency) {
403 out_msg.Address := address;
404 out_msg.Type := CoherenceRequestType:GETS;
405 out_msg.Requestor := machineID;
406 out_msg.Destination.add(map_Address_to_Directory(address));
407 out_msg.MessageSize := MessageSizeType:Control;
408 }
409 }
410 }
411
412 action(b_forwardRequestToExclusive, "b", desc="Forward request to the exclusive L1") {
413 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
414 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
415 assert(is_valid(cache_entry));
416 out_msg.Address := address;
417 out_msg.Type := in_msg.Type;
418 out_msg.Requestor := in_msg.Requestor;
419 out_msg.Destination.add(cache_entry.Exclusive);
420 out_msg.MessageSize := MessageSizeType:Request_Control;
421 }
422 }
423 }
424
425 action(c_exclusiveReplacement, "c", desc="Send data to memory") {
426 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
427 assert(is_valid(cache_entry));
428 out_msg.Address := address;
429 out_msg.Type := CoherenceResponseType:MEMORY_DATA;
430 out_msg.Sender := machineID;
431 out_msg.Destination.add(map_Address_to_Directory(address));
432 out_msg.DataBlk := cache_entry.DataBlk;
433 out_msg.Dirty := cache_entry.Dirty;
434 out_msg.MessageSize := MessageSizeType:Response_Data;
435 }
436 }
437
438 action(c_exclusiveCleanReplacement, "cc", desc="Send ack to memory for clean replacement") {
439 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
440 out_msg.Address := address;
441 out_msg.Type := CoherenceResponseType:ACK;
442 out_msg.Sender := machineID;
443 out_msg.Destination.add(map_Address_to_Directory(address));
444 out_msg.MessageSize := MessageSizeType:Response_Control;
445 }
446 }
447
448 action(ct_exclusiveReplacementFromTBE, "ct", desc="Send data to memory") {
449 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
450 assert(is_valid(tbe));
451 out_msg.Address := address;
452 out_msg.Type := CoherenceResponseType:MEMORY_DATA;
453 out_msg.Sender := machineID;
454 out_msg.Destination.add(map_Address_to_Directory(address));
455 out_msg.DataBlk := tbe.DataBlk;
456 out_msg.Dirty := tbe.Dirty;
457 out_msg.MessageSize := MessageSizeType:Response_Data;
458 }
459 }
460
461 action(d_sendDataToRequestor, "d", desc="Send data from cache to reqeustor") {
462 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
463 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
464 assert(is_valid(cache_entry));
465 out_msg.Address := address;
466 out_msg.Type := CoherenceResponseType:DATA;
467 out_msg.Sender := machineID;
468 out_msg.Destination.add(in_msg.Requestor);
469 out_msg.DataBlk := cache_entry.DataBlk;
470 out_msg.Dirty := cache_entry.Dirty;
471 out_msg.MessageSize := MessageSizeType:Response_Data;
472
473 out_msg.AckCount := 0 - cache_entry.Sharers.count();
474 if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
475 out_msg.AckCount := out_msg.AckCount + 1;
476 }
477 }
478 }
479 }
480
481 action(dd_sendExclusiveDataToRequestor, "dd", desc="Send data from cache to reqeustor") {
482 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
483 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
484 assert(is_valid(cache_entry));
485 out_msg.Address := address;
486 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
487 out_msg.Sender := machineID;
488 out_msg.Destination.add(in_msg.Requestor);
489 out_msg.DataBlk := cache_entry.DataBlk;
490 out_msg.Dirty := cache_entry.Dirty;
491 out_msg.MessageSize := MessageSizeType:Response_Data;
492
493 out_msg.AckCount := 0 - cache_entry.Sharers.count();
494 if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
495 out_msg.AckCount := out_msg.AckCount + 1;
496 }
497 }
498 }
499 }
500
501 action(ds_sendSharedDataToRequestor, "ds", desc="Send data from cache to reqeustor") {
502 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
503 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=l2_response_latency) {
504 assert(is_valid(cache_entry));
505 out_msg.Address := address;
506 out_msg.Type := CoherenceResponseType:DATA;
507 out_msg.Sender := machineID;
508 out_msg.Destination.add(in_msg.Requestor);
509 out_msg.DataBlk := cache_entry.DataBlk;
510 out_msg.Dirty := cache_entry.Dirty;
511 out_msg.MessageSize := MessageSizeType:Response_Data;
512 out_msg.AckCount := 0;
513 }
514 }
515 }
516
517 action(e_sendDataToGetSRequestors, "e", desc="Send data from cache to all GetS IDs") {
518 assert(is_valid(tbe));
519 assert(tbe.L1_GetS_IDs.count() > 0);
520 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
521 assert(is_valid(cache_entry));
522 out_msg.Address := address;
523 out_msg.Type := CoherenceResponseType:DATA;
524 out_msg.Sender := machineID;
525 out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
526 out_msg.DataBlk := cache_entry.DataBlk;
527 out_msg.Dirty := cache_entry.Dirty;
528 out_msg.MessageSize := MessageSizeType:Response_Data;
529 }
530 }
531
532 action(ex_sendExclusiveDataToGetSRequestors, "ex", desc="Send data from cache to all GetS IDs") {
533 assert(is_valid(tbe));
534 assert(tbe.L1_GetS_IDs.count() == 1);
535 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
536 assert(is_valid(cache_entry));
537 out_msg.Address := address;
538 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
539 out_msg.Sender := machineID;
540 out_msg.Destination := tbe.L1_GetS_IDs; // internal nodes
541 out_msg.DataBlk := cache_entry.DataBlk;
542 out_msg.Dirty := cache_entry.Dirty;
543 out_msg.MessageSize := MessageSizeType:Response_Data;
544 }
545 }
546
547 action(ee_sendDataToGetXRequestor, "ee", desc="Send data from cache to GetX ID") {
548 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
549 assert(is_valid(tbe));
550 assert(is_valid(cache_entry));
551 out_msg.Address := address;
552 out_msg.Type := CoherenceResponseType:DATA;
553 out_msg.Sender := machineID;
554 out_msg.Destination.add(tbe.L1_GetX_ID);
555 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
556 out_msg.DataBlk := cache_entry.DataBlk;
557 out_msg.Dirty := cache_entry.Dirty;
558 DPRINTF(RubySlicc, "Address: %s, Destination: %s, DataBlock: %s\n",
559 out_msg.Address, out_msg.Destination, out_msg.DataBlk);
560 out_msg.MessageSize := MessageSizeType:Response_Data;
561 }
562 }
563
564 action(f_sendInvToSharers, "f", desc="invalidate sharers for L2 replacement") {
565 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
566 assert(is_valid(cache_entry));
567 out_msg.Address := address;
568 out_msg.Type := CoherenceRequestType:INV;
569 out_msg.Requestor := machineID;
570 out_msg.Destination := cache_entry.Sharers;
571 out_msg.MessageSize := MessageSizeType:Request_Control;
572 }
573 }
574
575 action(fw_sendFwdInvToSharers, "fw", desc="invalidate sharers for request") {
576 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
577 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
578 assert(is_valid(cache_entry));
579 out_msg.Address := address;
580 out_msg.Type := CoherenceRequestType:INV;
581 out_msg.Requestor := in_msg.Requestor;
582 out_msg.Destination := cache_entry.Sharers;
583 out_msg.MessageSize := MessageSizeType:Request_Control;
584 }
585 }
586 }
587
588 action(fwm_sendFwdInvToSharersMinusRequestor, "fwm", desc="invalidate sharers for request, requestor is sharer") {
589 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
590 enqueue(L1RequestIntraChipL2Network_out, RequestMsg, latency=to_l1_latency) {
591 assert(is_valid(cache_entry));
592 out_msg.Address := address;
593 out_msg.Type := CoherenceRequestType:INV;
594 out_msg.Requestor := in_msg.Requestor;
595 out_msg.Destination := cache_entry.Sharers;
596 out_msg.Destination.remove(in_msg.Requestor);
597 out_msg.MessageSize := MessageSizeType:Request_Control;
598 }
599 }
600 }
601
602 // OTHER ACTIONS
603 action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
604 check_allocate(L2_TBEs);
605 assert(is_valid(cache_entry));
606 L2_TBEs.allocate(address);
607 set_tbe(L2_TBEs[address]);
608 tbe.L1_GetS_IDs.clear();
609 tbe.DataBlk := cache_entry.DataBlk;
610 tbe.Dirty := cache_entry.Dirty;
611 tbe.pendingAcks := cache_entry.Sharers.count();
612 }
613
614 action(s_deallocateTBE, "s", desc="Deallocate external TBE") {
615 L2_TBEs.deallocate(address);
616 unset_tbe();
617 }
618
619 action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
620 profileMsgDelay(0, L1RequestIntraChipL2Network_in.dequeue_getDelayCycles());
621 }
622
623 action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
624 profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
625 }
626
627 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
628 profileMsgDelay(1, responseIntraChipL2Network_in.dequeue_getDelayCycles());
629 }
630
631 action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
632 peek(responseIntraChipL2Network_in, ResponseMsg) {
633 assert(is_valid(cache_entry));
634 cache_entry.DataBlk := in_msg.DataBlk;
635 cache_entry.Dirty := in_msg.Dirty;
636 }
637 }
638
639 action(mr_writeDataToCacheFromRequest, "mr", desc="Write data from response queue to cache") {
640 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
641 assert(is_valid(cache_entry));
642 cache_entry.DataBlk := in_msg.DataBlk;
643 cache_entry.Dirty := in_msg.Dirty;
644 }
645 }
646
647 action(q_updateAck, "q", desc="update pending ack count") {
648 peek(responseIntraChipL2Network_in, ResponseMsg) {
649 assert(is_valid(tbe));
650 tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
651 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
652 APPEND_TRANSITION_COMMENT(" p: ");
653 APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
654 }
655 }
656
657 action(qq_writeDataToTBE, "\qq", desc="Write data from response queue to TBE") {
658 peek(responseIntraChipL2Network_in, ResponseMsg) {
659 assert(is_valid(tbe));
660 tbe.DataBlk := in_msg.DataBlk;
661 tbe.Dirty := in_msg.Dirty;
662 }
663 }
664
665 action(z_stall, "z", desc="Stall") {
666 }
667
668 action(ss_recordGetSL1ID, "\s", desc="Record L1 GetS for load response") {
669 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
670 assert(is_valid(tbe));
671 tbe.L1_GetS_IDs.add(in_msg.Requestor);
672 }
673 }
674
675 action(xx_recordGetXL1ID, "\x", desc="Record L1 GetX for store response") {
676 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
677 assert(is_valid(tbe));
678 tbe.L1_GetX_ID := in_msg.Requestor;
679 }
680 }
681
682 action(set_setMRU, "\set", desc="set the MRU entry") {
683 L2cacheMemory.setMRU(address);
684 }
685
686 action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
687 if (is_invalid(cache_entry)) {
688 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
689 }
690 }
691
692 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
693 L2cacheMemory.deallocate(address);
694 unset_cache_entry();
695 }
696
697 action(t_sendWBAck, "t", desc="Send writeback ACK") {
698 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
699 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
700 out_msg.Address := address;
701 out_msg.Type := CoherenceResponseType:WB_ACK;
702 out_msg.Sender := machineID;
703 out_msg.Destination.add(in_msg.Requestor);
704 out_msg.MessageSize := MessageSizeType:Response_Control;
705 }
706 }
707 }
708
709 action(ts_sendInvAckToUpgrader, "ts", desc="Send ACK to upgrader") {
710 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
711 enqueue(responseIntraChipL2Network_out, ResponseMsg, latency=to_l1_latency) {
712 assert(is_valid(cache_entry));
713 out_msg.Address := address;
714 out_msg.Type := CoherenceResponseType:ACK;
715 out_msg.Sender := machineID;
716 out_msg.Destination.add(in_msg.Requestor);
717 out_msg.MessageSize := MessageSizeType:Response_Control;
718 // upgrader doesn't get ack from itself, hence the + 1
719 out_msg.AckCount := 0 - cache_entry.Sharers.count() + 1;
720 }
721 }
722 }
723
724 GenericRequestType convertToGenericType(CoherenceRequestType type) {
725 if(type == CoherenceRequestType:GETS) {
726 return GenericRequestType:GETS;
727 } else if(type == CoherenceRequestType:GETX) {
728 return GenericRequestType:GETX;
729 } else if(type == CoherenceRequestType:GET_INSTR) {
730 return GenericRequestType:GET_INSTR;
731 } else if(type == CoherenceRequestType:UPGRADE) {
732 return GenericRequestType:UPGRADE;
733 } else {
734 DPRINTF(RubySlicc, "%s\n", type);
735 error("Invalid CoherenceRequestType\n");
736 }
737 }
738
739 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
740 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
741 L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
742 in_msg.AccessMode, in_msg.Prefetch);
743 }
744 }
745
746 action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
747 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
748 // profile_request(in_msg.L1CacheStateStr, getStateStr(address), "NA", getCoherenceRequestTypeStr(in_msg.Type));
749 }
750 }
751
752 action(nn_addSharer, "\n", desc="Add L1 sharer to list") {
753 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
754 assert(is_valid(cache_entry));
755 addSharer(address, in_msg.Requestor, cache_entry);
756 APPEND_TRANSITION_COMMENT( cache_entry.Sharers );
757 }
758 }
759
760 action(nnu_addSharerFromUnblock, "\nu", desc="Add L1 sharer to list") {
761 peek(L1unblockNetwork_in, ResponseMsg) {
762 assert(is_valid(cache_entry));
763 addSharer(address, in_msg.Sender, cache_entry);
764 }
765 }
766
767 action(kk_removeRequestSharer, "\k", desc="Remove L1 Request sharer from list") {
768 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
769 assert(is_valid(cache_entry));
770 cache_entry.Sharers.remove(in_msg.Requestor);
771 }
772 }
773
774 action(ll_clearSharers, "\l", desc="Remove all L1 sharers from list") {
775 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
776 assert(is_valid(cache_entry));
777 cache_entry.Sharers.clear();
778 }
779 }
780
781 action(mm_markExclusive, "\m", desc="set the exclusive owner") {
782 peek(L1RequestIntraChipL2Network_in, RequestMsg) {
783 assert(is_valid(cache_entry));
784 cache_entry.Sharers.clear();
785 cache_entry.Exclusive := in_msg.Requestor;
786 addSharer(address, in_msg.Requestor, cache_entry);
787 }
788 }
789
790 action(mmu_markExclusiveFromUnblock, "\mu", desc="set the exclusive owner") {
791 peek(L1unblockNetwork_in, ResponseMsg) {
792 assert(is_valid(cache_entry));
793 cache_entry.Sharers.clear();
794 cache_entry.Exclusive := in_msg.Sender;
795 addSharer(address, in_msg.Sender, cache_entry);
796 }
797 }
798
799 action(zz_stallAndWaitL1RequestQueue, "zz", desc="recycle L1 request queue") {
800 stall_and_wait(L1RequestIntraChipL2Network_in, address);
801 }
802
803 action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
804 responseIntraChipL2Network_in.recycle();
805 }
806
807 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
808 wakeUpBuffers(address);
809 }
810
811 //*****************************************************
812 // TRANSITIONS
813 //*****************************************************
814
815
816 //===============================================
817 // BASE STATE - I
818
819 // Transitions from I (Idle)
820 transition({NP, IS, ISS, IM, SS, M, M_I, I_I, S_I, M_MB, MT_IB, MT_SB}, L1_PUTX) {
821 t_sendWBAck;
822 jj_popL1RequestQueue;
823 }
824
825 transition({NP, SS, M, MT, M_I, I_I, S_I, IS, ISS, IM, M_MB, MT_IB, MT_SB}, L1_PUTX_old) {
826 t_sendWBAck;
827 jj_popL1RequestQueue;
828 }
829
830 transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L2_Replacement, L2_Replacement_clean}) {
831 zz_stallAndWaitL1RequestQueue;
832 }
833
834 transition({IM, IS, ISS, SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, MEM_Inv) {
835 zn_recycleResponseNetwork;
836 }
837
838 transition({S_I, M_I, MT_I}, MEM_Inv) {
839 o_popIncomingResponseQueue;
840 }
841
842
843 transition({SS_MB, M_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, {L1_GETS, L1_GET_INSTR, L1_GETX, L1_UPGRADE}) {
844 zz_stallAndWaitL1RequestQueue;
845 }
846
847
848 transition(NP, L1_GETS, ISS) {
849 qq_allocateL2CacheBlock;
850 ll_clearSharers;
851 nn_addSharer;
852 i_allocateTBE;
853 ss_recordGetSL1ID;
854 a_issueFetchToMemory;
855 uu_profileMiss;
856 jj_popL1RequestQueue;
857 }
858
859 transition(NP, L1_GET_INSTR, IS) {
860 qq_allocateL2CacheBlock;
861 ll_clearSharers;
862 nn_addSharer;
863 i_allocateTBE;
864 ss_recordGetSL1ID;
865 a_issueFetchToMemory;
866 uu_profileMiss;
867 jj_popL1RequestQueue;
868 }
869
870 transition(NP, L1_GETX, IM) {
871 qq_allocateL2CacheBlock;
872 ll_clearSharers;
873 // nn_addSharer;
874 i_allocateTBE;
875 xx_recordGetXL1ID;
876 a_issueFetchToMemory;
877 uu_profileMiss;
878 jj_popL1RequestQueue;
879 }
880
881
882 // transitions from IS/IM
883
884 transition(ISS, Mem_Data, MT_MB) {
885 m_writeDataToCache;
886 ex_sendExclusiveDataToGetSRequestors;
887 s_deallocateTBE;
888 o_popIncomingResponseQueue;
889 }
890
891 transition(IS, Mem_Data, SS) {
892 m_writeDataToCache;
893 e_sendDataToGetSRequestors;
894 s_deallocateTBE;
895 o_popIncomingResponseQueue;
896 kd_wakeUpDependents;
897 }
898
899 transition(IM, Mem_Data, MT_MB) {
900 m_writeDataToCache;
901 ee_sendDataToGetXRequestor;
902 s_deallocateTBE;
903 o_popIncomingResponseQueue;
904 }
905
906 transition({IS, ISS}, {L1_GETS, L1_GET_INSTR}, IS) {
907 nn_addSharer;
908 ss_recordGetSL1ID;
909 uu_profileMiss;
910 jj_popL1RequestQueue;
911 }
912
913 transition({IS, ISS}, L1_GETX) {
914 zz_stallAndWaitL1RequestQueue;
915 }
916
917 transition(IM, {L1_GETX, L1_GETS, L1_GET_INSTR}) {
918 zz_stallAndWaitL1RequestQueue;
919 }
920
921 // transitions from SS
922 transition(SS, {L1_GETS, L1_GET_INSTR}) {
923 ds_sendSharedDataToRequestor;
924 nn_addSharer;
925 set_setMRU;
926 jj_popL1RequestQueue;
927 }
928
929
930 transition(SS, L1_GETX, SS_MB) {
931 d_sendDataToRequestor;
932 // fw_sendFwdInvToSharers;
933 fwm_sendFwdInvToSharersMinusRequestor;
934 set_setMRU;
935 jj_popL1RequestQueue;
936 }
937
938 transition(SS, L1_UPGRADE, SS_MB) {
939 fwm_sendFwdInvToSharersMinusRequestor;
940 ts_sendInvAckToUpgrader;
941 set_setMRU;
942 jj_popL1RequestQueue;
943 }
944
945 transition(SS, L2_Replacement_clean, I_I) {
946 i_allocateTBE;
947 f_sendInvToSharers;
948 rr_deallocateL2CacheBlock;
949 }
950
951 transition(SS, {L2_Replacement, MEM_Inv}, S_I) {
952 i_allocateTBE;
953 f_sendInvToSharers;
954 rr_deallocateL2CacheBlock;
955 }
956
957
958 transition(M, L1_GETX, MT_MB) {
959 d_sendDataToRequestor;
960 set_setMRU;
961 jj_popL1RequestQueue;
962 }
963
964 transition(M, L1_GET_INSTR, SS) {
965 d_sendDataToRequestor;
966 nn_addSharer;
967 set_setMRU;
968 jj_popL1RequestQueue;
969 }
970
971 transition(M, L1_GETS, MT_MB) {
972 dd_sendExclusiveDataToRequestor;
973 set_setMRU;
974 jj_popL1RequestQueue;
975 }
976
977 transition(M, {L2_Replacement, MEM_Inv}, M_I) {
978 i_allocateTBE;
979 c_exclusiveReplacement;
980 rr_deallocateL2CacheBlock;
981 }
982
983 transition(M, L2_Replacement_clean, M_I) {
984 i_allocateTBE;
985 c_exclusiveCleanReplacement;
986 rr_deallocateL2CacheBlock;
987 }
988
989
990 // transitions from MT
991
992 transition(MT, L1_GETX, MT_MB) {
993 b_forwardRequestToExclusive;
994 uu_profileMiss;
995 set_setMRU;
996 jj_popL1RequestQueue;
997 }
998
999
1000 transition(MT, {L1_GETS, L1_GET_INSTR}, MT_IIB) {
1001 b_forwardRequestToExclusive;
1002 uu_profileMiss;
1003 set_setMRU;
1004 jj_popL1RequestQueue;
1005 }
1006
1007 transition(MT, {L2_Replacement, MEM_Inv}, MT_I) {
1008 i_allocateTBE;
1009 f_sendInvToSharers;
1010 rr_deallocateL2CacheBlock;
1011 }
1012
1013 transition(MT, L2_Replacement_clean, MCT_I) {
1014 i_allocateTBE;
1015 f_sendInvToSharers;
1016 rr_deallocateL2CacheBlock;
1017 }
1018
1019 transition(MT, L1_PUTX, M) {
1020 ll_clearSharers;
1021 mr_writeDataToCacheFromRequest;
1022 t_sendWBAck;
1023 jj_popL1RequestQueue;
1024 }
1025
1026
1027 // transitions from blocking states
1028 transition(SS_MB, Unblock_Cancel, SS) {
1029 k_popUnblockQueue;
1030 kd_wakeUpDependents;
1031 }
1032
1033 transition(MT_MB, Unblock_Cancel, MT) {
1034 k_popUnblockQueue;
1035 kd_wakeUpDependents;
1036 }
1037
1038 transition(MT_IB, Unblock_Cancel, MT) {
1039 k_popUnblockQueue;
1040 kd_wakeUpDependents;
1041 }
1042
1043 transition(SS_MB, Exclusive_Unblock, MT) {
1044 // update actual directory
1045 mmu_markExclusiveFromUnblock;
1046 k_popUnblockQueue;
1047 kd_wakeUpDependents;
1048 }
1049
1050 transition({M_MB, MT_MB}, Exclusive_Unblock, MT) {
1051 // update actual directory
1052 mmu_markExclusiveFromUnblock;
1053 k_popUnblockQueue;
1054 kd_wakeUpDependents;
1055 }
1056
1057 transition(MT_IIB, {L1_PUTX, L1_PUTX_old}){
1058 zz_stallAndWaitL1RequestQueue;
1059 }
1060
1061 transition(MT_IIB, Unblock, MT_IB) {
1062 nnu_addSharerFromUnblock;
1063 k_popUnblockQueue;
1064 }
1065
1066 transition(MT_IIB, {WB_Data, WB_Data_clean}, MT_SB) {
1067 m_writeDataToCache;
1068 o_popIncomingResponseQueue;
1069 }
1070
1071 transition(MT_IB, {WB_Data, WB_Data_clean}, SS) {
1072 m_writeDataToCache;
1073 o_popIncomingResponseQueue;
1074 kd_wakeUpDependents;
1075 }
1076
1077 transition(MT_SB, Unblock, SS) {
1078 nnu_addSharerFromUnblock;
1079 k_popUnblockQueue;
1080 kd_wakeUpDependents;
1081 }
1082
1083 // writeback states
1084 transition({I_I, S_I, MT_I, MCT_I, M_I}, {L1_GETX, L1_UPGRADE, L1_GETS, L1_GET_INSTR}) {
1085 zz_stallAndWaitL1RequestQueue;
1086 }
1087
1088 transition(I_I, Ack) {
1089 q_updateAck;
1090 o_popIncomingResponseQueue;
1091 }
1092
1093 transition(I_I, Ack_all, M_I) {
1094 c_exclusiveCleanReplacement;
1095 o_popIncomingResponseQueue;
1096 }
1097
1098 transition({MT_I, MCT_I}, WB_Data, M_I) {
1099 qq_writeDataToTBE;
1100 ct_exclusiveReplacementFromTBE;
1101 o_popIncomingResponseQueue;
1102 }
1103
1104 transition(MCT_I, {WB_Data_clean, Ack_all}, M_I) {
1105 c_exclusiveCleanReplacement;
1106 o_popIncomingResponseQueue;
1107 }
1108
1109 transition(MCT_I, {L1_PUTX, L1_PUTX_old}){
1110 zz_stallAndWaitL1RequestQueue;
1111 }
1112
1113 // L1 never changed Dirty data
1114 transition(MT_I, Ack_all, M_I) {
1115 ct_exclusiveReplacementFromTBE;
1116 o_popIncomingResponseQueue;
1117 }
1118
1119 transition(MT_I, {L1_PUTX, L1_PUTX_old}){
1120 zz_stallAndWaitL1RequestQueue;
1121 }
1122
1123 // possible race between unblock and immediate replacement
1124 transition({MT_MB,SS_MB}, {L1_PUTX, L1_PUTX_old}) {
1125 zz_stallAndWaitL1RequestQueue;
1126 }
1127
1128 transition(MT_I, WB_Data_clean, NP) {
1129 s_deallocateTBE;
1130 o_popIncomingResponseQueue;
1131 kd_wakeUpDependents;
1132 }
1133
1134 transition(S_I, Ack) {
1135 q_updateAck;
1136 o_popIncomingResponseQueue;
1137 }
1138
1139 transition(S_I, Ack_all, M_I) {
1140 ct_exclusiveReplacementFromTBE;
1141 o_popIncomingResponseQueue;
1142 }
1143
1144 transition(M_I, Mem_Ack, NP) {
1145 s_deallocateTBE;
1146 o_popIncomingResponseQueue;
1147 kd_wakeUpDependents;
1148 }
1149 }