ruby: Added merge GETS optimization to hammer
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
45 {
46
47 // NETWORK BUFFERS
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
51
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
54
55
56 // STATES
57 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 I, desc="Idle";
60 S, desc="Shared";
61 O, desc="Owned";
62 M, desc="Modified (dirty)";
63 MM, desc="Modified (dirty and locally modified)";
64
65 // Transient States
66 IM, "IM", desc="Issued GetX";
67 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
68 OM, "OM", desc="Issued GetX, received data";
69 ISM, "ISM", desc="Issued GetX, received data, waiting for all acks";
70 M_W, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, "IS", desc="Issued GetS";
73 SS, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, "OI", desc="Issued PutO, waiting for ack";
75 MI, "MI", desc="Issued PutX, waiting for ack";
76 II, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, "IT", desc="Invalid block transferring to L1";
78 ST, "ST", desc="S block transferring to L1";
79 OT, "OT", desc="O block transferring to L1";
80 MT, "MT", desc="M block transferring to L1";
81 MMT, "MMT", desc="MM block transferring to L1";
82 }
83
84 // EVENTS
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
94
95 // Requests
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 Invalidate, desc="Invalidate block";
101
102 // Responses
103 Ack, desc="Received an ack message";
104 Shared_Ack, desc="Received an ack message, responder has a shared copy";
105 Data, desc="Received a data message";
106 Shared_Data, desc="Received a data message, responder has a shared copy";
107 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
108
109 Writeback_Ack, desc="Writeback O.K. from directory";
110 Writeback_Nack, desc="Writeback not O.K. from directory";
111
112 // Triggers
113 All_acks, desc="Received all required data and message acks";
114 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
115 }
116
117 // TYPES
118
119 // STRUCTURE DEFINITIONS
120
121 MessageBuffer mandatoryQueue, ordered="false";
122
123 // CacheEntry
124 structure(Entry, desc="...", interface="AbstractCacheEntry") {
125 State CacheState, desc="cache state";
126 bool Dirty, desc="Is the data dirty (different than memory)?";
127 DataBlock DataBlk, desc="data for the block";
128 bool FromL2, default="false", desc="block just moved from L2";
129 bool AtomicAccessed, default="false", desc="block just moved from L2";
130 }
131
132 // TBE fields
133 structure(TBE, desc="...") {
134 State TBEState, desc="Transient state";
135 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
136 bool Dirty, desc="Is the data dirty (different than memory)?";
137 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
138 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
139 MachineID LastResponder, desc="last machine to send a response for this request";
140 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
141 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
142 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
143 Time FirstResponseTime, default="0", desc="the time the first response was received";
144 }
145
146 external_type(TBETable) {
147 TBE lookup(Address);
148 void allocate(Address);
149 void deallocate(Address);
150 bool isPresent(Address);
151 }
152
153 TBETable TBEs, template_hack="<L1Cache_TBE>";
154
155 Entry getCacheEntry(Address addr), return_by_ref="yes" {
156 if (L2cacheMemory.isTagPresent(addr)) {
157 return static_cast(Entry, L2cacheMemory[addr]);
158 } else if (L1DcacheMemory.isTagPresent(addr)) {
159 return static_cast(Entry, L1DcacheMemory[addr]);
160 } else {
161 return static_cast(Entry, L1IcacheMemory[addr]);
162 }
163 }
164
165 void changePermission(Address addr, AccessPermission permission) {
166 if (L2cacheMemory.isTagPresent(addr)) {
167 return L2cacheMemory.changePermission(addr, permission);
168 } else if (L1DcacheMemory.isTagPresent(addr)) {
169 return L1DcacheMemory.changePermission(addr, permission);
170 } else {
171 return L1IcacheMemory.changePermission(addr, permission);
172 }
173 }
174
175 bool isCacheTagPresent(Address addr) {
176 return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
177 }
178
179 State getState(Address addr) {
180 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
181 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
182 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
183
184 if(TBEs.isPresent(addr)) {
185 return TBEs[addr].TBEState;
186 } else if (isCacheTagPresent(addr)) {
187 return getCacheEntry(addr).CacheState;
188 }
189 return State:I;
190 }
191
192 void setState(Address addr, State state) {
193 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
194 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
195 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
196
197 if (TBEs.isPresent(addr)) {
198 TBEs[addr].TBEState := state;
199 }
200
201 if (isCacheTagPresent(addr)) {
202 getCacheEntry(addr).CacheState := state;
203
204 // Set permission
205 if ((state == State:MM) ||
206 (state == State:MM_W)) {
207 changePermission(addr, AccessPermission:Read_Write);
208 } else if (state == State:S ||
209 state == State:O ||
210 state == State:M ||
211 state == State:M_W ||
212 state == State:SM ||
213 state == State:ISM ||
214 state == State:OM ||
215 state == State:SS) {
216 changePermission(addr, AccessPermission:Read_Only);
217 } else {
218 changePermission(addr, AccessPermission:Invalid);
219 }
220 }
221 }
222
223 Event mandatory_request_type_to_event(CacheRequestType type) {
224 if (type == CacheRequestType:LD) {
225 return Event:Load;
226 } else if (type == CacheRequestType:IFETCH) {
227 return Event:Ifetch;
228 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
229 return Event:Store;
230 } else {
231 error("Invalid CacheRequestType");
232 }
233 }
234
235 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
236 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
237 //
238 // NOTE direct local hits should not call this
239 //
240 return GenericMachineType:L1Cache_wCC;
241 } else {
242 return ConvertMachToGenericMach(machineIDToMachineType(sender));
243 }
244 }
245
246 GenericMachineType testAndClearLocalHit(Address addr) {
247 if (getCacheEntry(addr).FromL2) {
248 getCacheEntry(addr).FromL2 := false;
249 return GenericMachineType:L2Cache;
250 } else {
251 return GenericMachineType:L1Cache;
252 }
253 }
254
255 MessageBuffer triggerQueue, ordered="true";
256
257 // ** OUT_PORTS **
258
259 out_port(requestNetwork_out, RequestMsg, requestFromCache);
260 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
261 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
262 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
263
264 // ** IN_PORTS **
265
266 // Trigger Queue
267 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
268 if (triggerQueue_in.isReady()) {
269 peek(triggerQueue_in, TriggerMsg) {
270 if (in_msg.Type == TriggerType:L2_to_L1) {
271 trigger(Event:Complete_L2_to_L1, in_msg.Address);
272 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
273 trigger(Event:All_acks, in_msg.Address);
274 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
275 trigger(Event:All_acks_no_sharers, in_msg.Address);
276 } else {
277 error("Unexpected message");
278 }
279 }
280 }
281 }
282
283 // Nothing from the request network
284
285 // Forward Network
286 in_port(forwardToCache_in, RequestMsg, forwardToCache) {
287 if (forwardToCache_in.isReady()) {
288 peek(forwardToCache_in, RequestMsg, block_on="Address") {
289 if (in_msg.Type == CoherenceRequestType:GETX) {
290 trigger(Event:Other_GETX, in_msg.Address);
291 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
292 trigger(Event:Merged_GETS, in_msg.Address);
293 } else if (in_msg.Type == CoherenceRequestType:GETS) {
294 if (isCacheTagPresent(in_msg.Address)) {
295 if (getCacheEntry(in_msg.Address).AtomicAccessed && no_mig_atomic) {
296 trigger(Event:Other_GETS_No_Mig, in_msg.Address);
297 } else {
298 trigger(Event:Other_GETS, in_msg.Address);
299 }
300 } else {
301 trigger(Event:Other_GETS, in_msg.Address);
302 }
303 } else if (in_msg.Type == CoherenceRequestType:INV) {
304 trigger(Event:Invalidate, in_msg.Address);
305 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
306 trigger(Event:Writeback_Ack, in_msg.Address);
307 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
308 trigger(Event:Writeback_Nack, in_msg.Address);
309 } else {
310 error("Unexpected message");
311 }
312 }
313 }
314 }
315
316 // Response Network
317 in_port(responseToCache_in, ResponseMsg, responseToCache) {
318 if (responseToCache_in.isReady()) {
319 peek(responseToCache_in, ResponseMsg, block_on="Address") {
320 if (in_msg.Type == CoherenceResponseType:ACK) {
321 trigger(Event:Ack, in_msg.Address);
322 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
323 trigger(Event:Shared_Ack, in_msg.Address);
324 } else if (in_msg.Type == CoherenceResponseType:DATA) {
325 trigger(Event:Data, in_msg.Address);
326 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
327 trigger(Event:Shared_Data, in_msg.Address);
328 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
329 trigger(Event:Exclusive_Data, in_msg.Address);
330 } else {
331 error("Unexpected message");
332 }
333 }
334 }
335 }
336
337 // Nothing from the unblock network
338
339 // Mandatory Queue
340 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
341 if (mandatoryQueue_in.isReady()) {
342 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
343
344 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
345
346 if (in_msg.Type == CacheRequestType:IFETCH) {
347 // ** INSTRUCTION ACCESS ***
348
349 // Check to see if it is in the OTHER L1
350 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
351 // The block is in the wrong L1, try to write it to the L2
352 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
353 trigger(Event:L1_to_L2, in_msg.LineAddress);
354 } else {
355 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
356 }
357 }
358
359 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
360 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
361 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
362 } else {
363 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
364 // L1 does't have the line, but we have space for it in the L1
365 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
366 // L2 has it (maybe not with the right permissions)
367 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress);
368 } else {
369 // We have room, the L2 doesn't have it, so the L1 fetches the line
370 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
371 }
372 } else {
373 // No room in the L1, so we need to make room
374 if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) {
375 // The L2 has room, so we move the line from the L1 to the L2
376 trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
377 } else {
378 // The L2 does not have room, so we replace a line from the L2
379 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)));
380 }
381 }
382 }
383 } else {
384 // *** DATA ACCESS ***
385
386 // Check to see if it is in the OTHER L1
387 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
388 // The block is in the wrong L1, try to write it to the L2
389 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
390 trigger(Event:L1_to_L2, in_msg.LineAddress);
391 } else {
392 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
393 }
394 }
395
396 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
397 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
398 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
399 } else {
400 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
401 // L1 does't have the line, but we have space for it in the L1
402 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
403 // L2 has it (maybe not with the right permissions)
404 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress);
405 } else {
406 // We have room, the L2 doesn't have it, so the L1 fetches the line
407 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
408 }
409 } else {
410 // No room in the L1, so we need to make room
411 if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) {
412 // The L2 has room, so we move the line from the L1 to the L2
413 trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
414 } else {
415 // The L2 does not have room, so we replace a line from the L2
416 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)));
417 }
418 }
419 }
420 }
421 }
422 }
423 }
424
425 // ACTIONS
426
427 action(a_issueGETS, "a", desc="Issue GETS") {
428 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
429 out_msg.Address := address;
430 out_msg.Type := CoherenceRequestType:GETS;
431 out_msg.Requestor := machineID;
432 out_msg.Destination.add(map_Address_to_Directory(address));
433 out_msg.MessageSize := MessageSizeType:Request_Control;
434 out_msg.InitialRequestTime := get_time();
435 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
436 }
437 }
438
439 action(b_issueGETX, "b", desc="Issue GETX") {
440 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
441 out_msg.Address := address;
442 out_msg.Type := CoherenceRequestType:GETX;
443 out_msg.Requestor := machineID;
444 out_msg.Destination.add(map_Address_to_Directory(address));
445 out_msg.MessageSize := MessageSizeType:Request_Control;
446 out_msg.InitialRequestTime := get_time();
447 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
448 }
449 }
450
451 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
452 peek(forwardToCache_in, RequestMsg) {
453 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
454 out_msg.Address := address;
455 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
456 out_msg.Sender := machineID;
457 out_msg.Destination.add(in_msg.Requestor);
458 out_msg.DataBlk := getCacheEntry(address).DataBlk;
459 out_msg.Dirty := getCacheEntry(address).Dirty;
460 if (in_msg.DirectedProbe) {
461 out_msg.Acks := machineCount(MachineType:L1Cache);
462 } else {
463 out_msg.Acks := 2;
464 }
465 out_msg.MessageSize := MessageSizeType:Response_Data;
466 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
467 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
468 }
469 }
470 }
471
472 action(d_issuePUT, "d", desc="Issue PUT") {
473 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
474 out_msg.Address := address;
475 out_msg.Type := CoherenceRequestType:PUT;
476 out_msg.Requestor := machineID;
477 out_msg.Destination.add(map_Address_to_Directory(address));
478 out_msg.MessageSize := MessageSizeType:Writeback_Control;
479 }
480 }
481
482 action(e_sendData, "e", desc="Send data from cache to requestor") {
483 peek(forwardToCache_in, RequestMsg) {
484 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
485 out_msg.Address := address;
486 out_msg.Type := CoherenceResponseType:DATA;
487 out_msg.Sender := machineID;
488 out_msg.Destination.add(in_msg.Requestor);
489 out_msg.DataBlk := getCacheEntry(address).DataBlk;
490 out_msg.Dirty := getCacheEntry(address).Dirty;
491 if (in_msg.DirectedProbe) {
492 out_msg.Acks := machineCount(MachineType:L1Cache);
493 } else {
494 out_msg.Acks := 2;
495 }
496 out_msg.MessageSize := MessageSizeType:Response_Data;
497 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
498 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
499 }
500 }
501 }
502
503 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
504 peek(forwardToCache_in, RequestMsg) {
505 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
506 out_msg.Address := address;
507 out_msg.Type := CoherenceResponseType:DATA_SHARED;
508 out_msg.Sender := machineID;
509 out_msg.Destination.add(in_msg.Requestor);
510 out_msg.DataBlk := getCacheEntry(address).DataBlk;
511 DEBUG_EXPR(out_msg.DataBlk);
512 out_msg.Dirty := getCacheEntry(address).Dirty;
513 if (in_msg.DirectedProbe) {
514 out_msg.Acks := machineCount(MachineType:L1Cache);
515 } else {
516 out_msg.Acks := 2;
517 }
518 out_msg.MessageSize := MessageSizeType:Response_Data;
519 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
520 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
521 }
522 }
523 }
524
525 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
526 peek(forwardToCache_in, RequestMsg) {
527 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
528 out_msg.Address := address;
529 out_msg.Type := CoherenceResponseType:DATA_SHARED;
530 out_msg.Sender := machineID;
531 out_msg.Destination := in_msg.MergedRequestors;
532 out_msg.DataBlk := getCacheEntry(address).DataBlk;
533 DEBUG_EXPR(out_msg.DataBlk);
534 out_msg.Dirty := getCacheEntry(address).Dirty;
535 out_msg.Acks := machineCount(MachineType:L1Cache);
536 out_msg.MessageSize := MessageSizeType:Response_Data;
537 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
538 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
539 }
540 }
541 }
542
543 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
544 peek(forwardToCache_in, RequestMsg) {
545 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
546 out_msg.Address := address;
547 out_msg.Type := CoherenceResponseType:ACK;
548 out_msg.Sender := machineID;
549 out_msg.Destination.add(in_msg.Requestor);
550 out_msg.Acks := 1;
551 assert(in_msg.DirectedProbe == false);
552 out_msg.MessageSize := MessageSizeType:Response_Control;
553 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
554 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
555 }
556 }
557 }
558
559 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
560 peek(forwardToCache_in, RequestMsg) {
561 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
562 out_msg.Address := address;
563 out_msg.Type := CoherenceResponseType:ACK_SHARED;
564 out_msg.Sender := machineID;
565 out_msg.Destination.add(in_msg.Requestor);
566 out_msg.Acks := 1;
567 assert(in_msg.DirectedProbe == false);
568 out_msg.MessageSize := MessageSizeType:Response_Control;
569 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
570 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
571 }
572 }
573 }
574
575 action(g_sendUnblock, "g", desc="Send unblock to memory") {
576 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
577 out_msg.Address := address;
578 out_msg.Type := CoherenceResponseType:UNBLOCK;
579 out_msg.Sender := machineID;
580 out_msg.Destination.add(map_Address_to_Directory(address));
581 out_msg.MessageSize := MessageSizeType:Unblock_Control;
582 }
583 }
584
585 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
586 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
587 out_msg.Address := address;
588 out_msg.Type := CoherenceResponseType:UNBLOCKM;
589 out_msg.Sender := machineID;
590 out_msg.Destination.add(map_Address_to_Directory(address));
591 out_msg.MessageSize := MessageSizeType:Unblock_Control;
592 }
593 }
594
595 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
596 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
597 out_msg.Address := address;
598 out_msg.Type := CoherenceResponseType:UNBLOCKS;
599 out_msg.Sender := machineID;
600 out_msg.CurOwner := TBEs[address].CurOwner;
601 out_msg.Destination.add(map_Address_to_Directory(address));
602 out_msg.MessageSize := MessageSizeType:Unblock_Control;
603 }
604 }
605
606 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
607 DEBUG_EXPR(getCacheEntry(address).DataBlk);
608
609 sequencer.readCallback(address,
610 testAndClearLocalHit(address),
611 getCacheEntry(address).DataBlk);
612
613 }
614
615 action(hx_external_load_hit, "hx", desc="load required external msgs") {
616 DEBUG_EXPR(getCacheEntry(address).DataBlk);
617 peek(responseToCache_in, ResponseMsg) {
618
619 sequencer.readCallback(address,
620 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
621 getCacheEntry(address).DataBlk,
622 TBEs[address].InitialRequestTime,
623 TBEs[address].ForwardRequestTime,
624 TBEs[address].FirstResponseTime);
625
626 }
627 }
628
629 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
630 DEBUG_EXPR(getCacheEntry(address).DataBlk);
631 peek(mandatoryQueue_in, CacheMsg) {
632 sequencer.writeCallback(address,
633 testAndClearLocalHit(address),
634 getCacheEntry(address).DataBlk);
635
636 getCacheEntry(address).Dirty := true;
637 if (in_msg.Type == CacheRequestType:ATOMIC) {
638 getCacheEntry(address).AtomicAccessed := true;
639 }
640 }
641 }
642
643 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
644 DEBUG_EXPR(getCacheEntry(address).DataBlk);
645 peek(responseToCache_in, ResponseMsg) {
646
647 sequencer.writeCallback(address,
648 getNondirectHitMachType(address, in_msg.Sender),
649 getCacheEntry(address).DataBlk,
650 TBEs[address].InitialRequestTime,
651 TBEs[address].ForwardRequestTime,
652 TBEs[address].FirstResponseTime);
653
654 }
655 getCacheEntry(address).Dirty := true;
656 }
657
658 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
659 DEBUG_EXPR(getCacheEntry(address).DataBlk);
660
661 sequencer.writeCallback(address,
662 getNondirectHitMachType(address,
663 TBEs[address].LastResponder),
664 getCacheEntry(address).DataBlk,
665 TBEs[address].InitialRequestTime,
666 TBEs[address].ForwardRequestTime,
667 TBEs[address].FirstResponseTime);
668
669 getCacheEntry(address).Dirty := true;
670 }
671
672 action(i_allocateTBE, "i", desc="Allocate TBE") {
673 check_allocate(TBEs);
674 TBEs.allocate(address);
675 TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
676 TBEs[address].Dirty := getCacheEntry(address).Dirty;
677 TBEs[address].Sharers := false;
678 }
679
680 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
681 triggerQueue_in.dequeue();
682 }
683
684 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
685 mandatoryQueue_in.dequeue();
686 }
687
688 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
689 forwardToCache_in.dequeue();
690 }
691
692 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
693 peek(responseToCache_in, ResponseMsg) {
694 assert(in_msg.Acks > 0);
695 DEBUG_EXPR(TBEs[address].NumPendingMsgs);
696 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
697 DEBUG_EXPR(TBEs[address].NumPendingMsgs);
698 TBEs[address].LastResponder := in_msg.Sender;
699 if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
700 assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime);
701 }
702 if (in_msg.InitialRequestTime != zero_time()) {
703 TBEs[address].InitialRequestTime := in_msg.InitialRequestTime;
704 }
705 if (TBEs[address].ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
706 assert(TBEs[address].ForwardRequestTime == in_msg.ForwardRequestTime);
707 }
708 if (in_msg.ForwardRequestTime != zero_time()) {
709 TBEs[address].ForwardRequestTime := in_msg.ForwardRequestTime;
710 }
711 if (TBEs[address].FirstResponseTime == zero_time()) {
712 TBEs[address].FirstResponseTime := get_time();
713 }
714 }
715 }
716 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
717 peek(responseToCache_in, ResponseMsg) {
718 TBEs[address].CurOwner := in_msg.Sender;
719 }
720 }
721
722 action(n_popResponseQueue, "n", desc="Pop response queue") {
723 responseToCache_in.dequeue();
724 }
725
726 action(ll_L2toL1Transfer, "ll", desc="") {
727 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
728 out_msg.Address := address;
729 out_msg.Type := TriggerType:L2_to_L1;
730 }
731 }
732
733 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
734 if (TBEs[address].NumPendingMsgs == 0) {
735 enqueue(triggerQueue_out, TriggerMsg) {
736 out_msg.Address := address;
737 if (TBEs[address].Sharers) {
738 out_msg.Type := TriggerType:ALL_ACKS;
739 } else {
740 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
741 }
742 }
743 }
744 }
745
746 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
747 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
748 }
749
750 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
751 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs + 1;
752 }
753
754 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
755 peek(forwardToCache_in, RequestMsg) {
756 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
757 out_msg.Address := address;
758 out_msg.Type := CoherenceResponseType:DATA;
759 out_msg.Sender := machineID;
760 out_msg.Destination.add(in_msg.Requestor);
761 DEBUG_EXPR(out_msg.Destination);
762 out_msg.DataBlk := TBEs[address].DataBlk;
763 out_msg.Dirty := TBEs[address].Dirty;
764 if (in_msg.DirectedProbe) {
765 out_msg.Acks := machineCount(MachineType:L1Cache);
766 } else {
767 out_msg.Acks := 2;
768 }
769 out_msg.MessageSize := MessageSizeType:Response_Data;
770 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
771 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
772 }
773 }
774 }
775
776 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
777 peek(forwardToCache_in, RequestMsg) {
778 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
779 out_msg.Address := address;
780 out_msg.Type := CoherenceResponseType:DATA;
781 out_msg.Sender := machineID;
782 out_msg.Destination := in_msg.MergedRequestors;
783 DEBUG_EXPR(out_msg.Destination);
784 out_msg.DataBlk := TBEs[address].DataBlk;
785 out_msg.Dirty := TBEs[address].Dirty;
786 out_msg.Acks := machineCount(MachineType:L1Cache);
787 out_msg.MessageSize := MessageSizeType:Response_Data;
788 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
789 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
790 }
791 }
792 }
793
794 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
795 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
796 out_msg.Address := address;
797 out_msg.Sender := machineID;
798 out_msg.Destination.add(map_Address_to_Directory(address));
799 out_msg.Dirty := TBEs[address].Dirty;
800 if (TBEs[address].Dirty) {
801 out_msg.Type := CoherenceResponseType:WB_DIRTY;
802 out_msg.DataBlk := TBEs[address].DataBlk;
803 out_msg.MessageSize := MessageSizeType:Writeback_Data;
804 } else {
805 out_msg.Type := CoherenceResponseType:WB_CLEAN;
806 // NOTE: in a real system this would not send data. We send
807 // data here only so we can check it at the memory
808 out_msg.DataBlk := TBEs[address].DataBlk;
809 out_msg.MessageSize := MessageSizeType:Writeback_Control;
810 }
811 }
812 }
813
814 action(r_setSharerBit, "r", desc="We saw other sharers") {
815 TBEs[address].Sharers := true;
816 }
817
818 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
819 TBEs.deallocate(address);
820 }
821
822 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
823 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
824 out_msg.Address := address;
825 out_msg.Sender := machineID;
826 out_msg.Destination.add(map_Address_to_Directory(address));
827 out_msg.DataBlk := TBEs[address].DataBlk;
828 out_msg.Dirty := TBEs[address].Dirty;
829 if (TBEs[address].Dirty) {
830 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
831 out_msg.DataBlk := TBEs[address].DataBlk;
832 out_msg.MessageSize := MessageSizeType:Writeback_Data;
833 } else {
834 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
835 // NOTE: in a real system this would not send data. We send
836 // data here only so we can check it at the memory
837 out_msg.DataBlk := TBEs[address].DataBlk;
838 out_msg.MessageSize := MessageSizeType:Writeback_Control;
839 }
840 }
841 }
842
843 action(u_writeDataToCache, "u", desc="Write data to cache") {
844 peek(responseToCache_in, ResponseMsg) {
845 getCacheEntry(address).DataBlk := in_msg.DataBlk;
846 getCacheEntry(address).Dirty := in_msg.Dirty;
847 }
848 }
849
850 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
851 peek(responseToCache_in, ResponseMsg) {
852 DEBUG_EXPR(getCacheEntry(address).DataBlk);
853 DEBUG_EXPR(in_msg.DataBlk);
854 assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
855 getCacheEntry(address).DataBlk := in_msg.DataBlk;
856 getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty;
857 }
858 }
859
860 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
861 if (L1DcacheMemory.isTagPresent(address)) {
862 L1DcacheMemory.deallocate(address);
863 } else {
864 L1IcacheMemory.deallocate(address);
865 }
866 }
867
868 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
869 if (L1DcacheMemory.isTagPresent(address) == false) {
870 L1DcacheMemory.allocate(address, new Entry);
871 }
872 }
873
874 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
875 if (L1IcacheMemory.isTagPresent(address) == false) {
876 L1IcacheMemory.allocate(address, new Entry);
877 }
878 }
879
880 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
881 L2cacheMemory.allocate(address, new Entry);
882 }
883
884 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
885 L2cacheMemory.deallocate(address);
886 }
887
888 action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
889 if (L1DcacheMemory.isTagPresent(address)) {
890 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty;
891 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk;
892 } else {
893 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty;
894 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk;
895 }
896 }
897
898 action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
899 if (L1DcacheMemory.isTagPresent(address)) {
900 static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
901 static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
902 static_cast(Entry, L1DcacheMemory[address]).FromL2 := true;
903 } else {
904 static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
905 static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
906 static_cast(Entry, L1IcacheMemory[address]).FromL2 := true;
907 }
908 }
909
910 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
911 peek(mandatoryQueue_in, CacheMsg) {
912 if (L1IcacheMemory.isTagPresent(address)) {
913 L1IcacheMemory.profileMiss(in_msg);
914 } else if (L1DcacheMemory.isTagPresent(address)) {
915 L1DcacheMemory.profileMiss(in_msg);
916 }
917 if (L2cacheMemory.isTagPresent(address) == false) {
918 L2cacheMemory.profileMiss(in_msg);
919 }
920 }
921 }
922
923 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
924 mandatoryQueue_in.recycle();
925 }
926
927 //*****************************************************
928 // TRANSITIONS
929 //*****************************************************
930
931 // Transitions for Load/Store/L2_Replacement from transient states
932 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
933 zz_recycleMandatoryQueue;
934 }
935
936 transition({M_W, MM_W}, {L2_Replacement}) {
937 zz_recycleMandatoryQueue;
938 }
939
940 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
941 zz_recycleMandatoryQueue;
942 }
943
944 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
945 zz_recycleMandatoryQueue;
946 }
947
948 transition({IT, ST, OT, MT, MMT}, {Other_GETX, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
949 // stall
950 }
951
952 // Transitions moving data between the L1 and L2 caches
953 transition({I, S, O, M, MM}, L1_to_L2) {
954 vv_allocateL2CacheBlock;
955 ss_copyFromL1toL2; // Not really needed for state I
956 gg_deallocateL1CacheBlock;
957 }
958
959 transition(I, Trigger_L2_to_L1D, IT) {
960 ii_allocateL1DCacheBlock;
961 tt_copyFromL2toL1; // Not really needed for state I
962 uu_profileMiss;
963 rr_deallocateL2CacheBlock;
964 zz_recycleMandatoryQueue;
965 ll_L2toL1Transfer;
966 }
967
968 transition(S, Trigger_L2_to_L1D, ST) {
969 ii_allocateL1DCacheBlock;
970 tt_copyFromL2toL1;
971 uu_profileMiss;
972 rr_deallocateL2CacheBlock;
973 zz_recycleMandatoryQueue;
974 ll_L2toL1Transfer;
975 }
976
977 transition(O, Trigger_L2_to_L1D, OT) {
978 ii_allocateL1DCacheBlock;
979 tt_copyFromL2toL1;
980 uu_profileMiss;
981 rr_deallocateL2CacheBlock;
982 zz_recycleMandatoryQueue;
983 ll_L2toL1Transfer;
984 }
985
986 transition(M, Trigger_L2_to_L1D, MT) {
987 ii_allocateL1DCacheBlock;
988 tt_copyFromL2toL1;
989 uu_profileMiss;
990 rr_deallocateL2CacheBlock;
991 zz_recycleMandatoryQueue;
992 ll_L2toL1Transfer;
993 }
994
995 transition(MM, Trigger_L2_to_L1D, MMT) {
996 ii_allocateL1DCacheBlock;
997 tt_copyFromL2toL1;
998 uu_profileMiss;
999 rr_deallocateL2CacheBlock;
1000 zz_recycleMandatoryQueue;
1001 ll_L2toL1Transfer;
1002 }
1003
1004 transition(I, Trigger_L2_to_L1I, IT) {
1005 jj_allocateL1ICacheBlock;
1006 tt_copyFromL2toL1; // Not really needed for state I
1007 uu_profileMiss;
1008 rr_deallocateL2CacheBlock;
1009 zz_recycleMandatoryQueue;
1010 ll_L2toL1Transfer;
1011 }
1012
1013 transition(S, Trigger_L2_to_L1I, ST) {
1014 jj_allocateL1ICacheBlock;
1015 tt_copyFromL2toL1;
1016 uu_profileMiss;
1017 rr_deallocateL2CacheBlock;
1018 zz_recycleMandatoryQueue;
1019 ll_L2toL1Transfer;
1020 }
1021
1022 transition(O, Trigger_L2_to_L1I, OT) {
1023 jj_allocateL1ICacheBlock;
1024 tt_copyFromL2toL1;
1025 uu_profileMiss;
1026 rr_deallocateL2CacheBlock;
1027 zz_recycleMandatoryQueue;
1028 ll_L2toL1Transfer;
1029 }
1030
1031 transition(M, Trigger_L2_to_L1I, MT) {
1032 jj_allocateL1ICacheBlock;
1033 tt_copyFromL2toL1;
1034 uu_profileMiss;
1035 rr_deallocateL2CacheBlock;
1036 zz_recycleMandatoryQueue;
1037 ll_L2toL1Transfer;
1038 }
1039
1040 transition(MM, Trigger_L2_to_L1I, MMT) {
1041 jj_allocateL1ICacheBlock;
1042 tt_copyFromL2toL1;
1043 uu_profileMiss;
1044 rr_deallocateL2CacheBlock;
1045 zz_recycleMandatoryQueue;
1046 ll_L2toL1Transfer;
1047 }
1048
1049 transition(IT, Complete_L2_to_L1, I) {
1050 j_popTriggerQueue;
1051 }
1052
1053 transition(ST, Complete_L2_to_L1, S) {
1054 j_popTriggerQueue;
1055 }
1056
1057 transition(OT, Complete_L2_to_L1, O) {
1058 j_popTriggerQueue;
1059 }
1060
1061 transition(MT, Complete_L2_to_L1, M) {
1062 j_popTriggerQueue;
1063 }
1064
1065 transition(MMT, Complete_L2_to_L1, MM) {
1066 j_popTriggerQueue;
1067 }
1068
1069 // Transitions from Idle
1070 transition(I, Load, IS) {
1071 ii_allocateL1DCacheBlock;
1072 i_allocateTBE;
1073 a_issueGETS;
1074 uu_profileMiss;
1075 k_popMandatoryQueue;
1076 }
1077
1078 transition(I, Ifetch, IS) {
1079 jj_allocateL1ICacheBlock;
1080 i_allocateTBE;
1081 a_issueGETS;
1082 uu_profileMiss;
1083 k_popMandatoryQueue;
1084 }
1085
1086 transition(I, Store, IM) {
1087 ii_allocateL1DCacheBlock;
1088 i_allocateTBE;
1089 b_issueGETX;
1090 uu_profileMiss;
1091 k_popMandatoryQueue;
1092 }
1093
1094 transition(I, L2_Replacement) {
1095 rr_deallocateL2CacheBlock;
1096 }
1097
1098 transition(I, {Other_GETX, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1099 f_sendAck;
1100 l_popForwardQueue;
1101 }
1102
1103 // Transitions from Shared
1104 transition({S, SM, ISM}, {Load, Ifetch}) {
1105 h_load_hit;
1106 k_popMandatoryQueue;
1107 }
1108
1109 transition(S, Store, SM) {
1110 i_allocateTBE;
1111 b_issueGETX;
1112 uu_profileMiss;
1113 k_popMandatoryQueue;
1114 }
1115
1116 transition(S, L2_Replacement, I) {
1117 rr_deallocateL2CacheBlock;
1118 }
1119
1120 transition(S, {Other_GETX, Invalidate}, I) {
1121 f_sendAck;
1122 l_popForwardQueue;
1123 }
1124
1125 transition(S, {Other_GETS, Other_GETS_No_Mig}) {
1126 ff_sendAckShared;
1127 l_popForwardQueue;
1128 }
1129
1130 // Transitions from Owned
1131 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1132 h_load_hit;
1133 k_popMandatoryQueue;
1134 }
1135
1136 transition(O, Store, OM) {
1137 i_allocateTBE;
1138 b_issueGETX;
1139 p_decrementNumberOfMessagesByOne;
1140 uu_profileMiss;
1141 k_popMandatoryQueue;
1142 }
1143
1144 transition(O, L2_Replacement, OI) {
1145 i_allocateTBE;
1146 d_issuePUT;
1147 rr_deallocateL2CacheBlock;
1148 }
1149
1150 transition(O, {Other_GETX, Invalidate}, I) {
1151 e_sendData;
1152 l_popForwardQueue;
1153 }
1154
1155 transition(O, {Other_GETS, Other_GETS_No_Mig}) {
1156 ee_sendDataShared;
1157 l_popForwardQueue;
1158 }
1159
1160 transition(O, Merged_GETS) {
1161 em_sendDataSharedMultiple;
1162 l_popForwardQueue;
1163 }
1164
1165 // Transitions from Modified
1166 transition(MM, {Load, Ifetch}) {
1167 h_load_hit;
1168 k_popMandatoryQueue;
1169 }
1170
1171 transition(MM, Store) {
1172 hh_store_hit;
1173 k_popMandatoryQueue;
1174 }
1175
1176 transition(MM, L2_Replacement, MI) {
1177 i_allocateTBE;
1178 d_issuePUT;
1179 rr_deallocateL2CacheBlock;
1180 }
1181
1182 transition(MM, {Other_GETX, Invalidate}, I) {
1183 c_sendExclusiveData;
1184 l_popForwardQueue;
1185 }
1186
1187 transition(MM, Other_GETS, I) {
1188 c_sendExclusiveData;
1189 l_popForwardQueue;
1190 }
1191
1192 transition(MM, Other_GETS_No_Mig, O) {
1193 ee_sendDataShared;
1194 l_popForwardQueue;
1195 }
1196
1197 transition(MM, Merged_GETS, O) {
1198 em_sendDataSharedMultiple;
1199 l_popForwardQueue;
1200 }
1201
1202 // Transitions from Dirty Exclusive
1203 transition(M, {Load, Ifetch}) {
1204 h_load_hit;
1205 k_popMandatoryQueue;
1206 }
1207
1208 transition(M, Store, MM) {
1209 hh_store_hit;
1210 k_popMandatoryQueue;
1211 }
1212
1213 transition(M, L2_Replacement, MI) {
1214 i_allocateTBE;
1215 d_issuePUT;
1216 rr_deallocateL2CacheBlock;
1217 }
1218
1219 transition(M, {Other_GETX, Invalidate}, I) {
1220 c_sendExclusiveData;
1221 l_popForwardQueue;
1222 }
1223
1224 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1225 ee_sendDataShared;
1226 l_popForwardQueue;
1227 }
1228
1229 transition(M, Merged_GETS, O) {
1230 em_sendDataSharedMultiple;
1231 l_popForwardQueue;
1232 }
1233
1234 // Transitions from IM
1235
1236 transition(IM, {Other_GETX, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1237 f_sendAck;
1238 l_popForwardQueue;
1239 }
1240
1241 transition(IM, Ack) {
1242 m_decrementNumberOfMessages;
1243 o_checkForCompletion;
1244 n_popResponseQueue;
1245 }
1246
1247 transition(IM, Data, ISM) {
1248 u_writeDataToCache;
1249 m_decrementNumberOfMessages;
1250 o_checkForCompletion;
1251 n_popResponseQueue;
1252 }
1253
1254 transition(IM, Exclusive_Data, MM_W) {
1255 u_writeDataToCache;
1256 m_decrementNumberOfMessages;
1257 o_checkForCompletion;
1258 sx_external_store_hit;
1259 n_popResponseQueue;
1260 }
1261
1262 // Transitions from SM
1263 transition(SM, {Other_GETS, Other_GETS_No_Mig}) {
1264 ff_sendAckShared;
1265 l_popForwardQueue;
1266 }
1267
1268 transition(SM, {Other_GETX, Invalidate}, IM) {
1269 f_sendAck;
1270 l_popForwardQueue;
1271 }
1272
1273 transition(SM, Ack) {
1274 m_decrementNumberOfMessages;
1275 o_checkForCompletion;
1276 n_popResponseQueue;
1277 }
1278
1279 transition(SM, Data, ISM) {
1280 v_writeDataToCacheVerify;
1281 m_decrementNumberOfMessages;
1282 o_checkForCompletion;
1283 n_popResponseQueue;
1284 }
1285
1286 // Transitions from ISM
1287 transition(ISM, Ack) {
1288 m_decrementNumberOfMessages;
1289 o_checkForCompletion;
1290 n_popResponseQueue;
1291 }
1292
1293 transition(ISM, All_acks_no_sharers, MM) {
1294 sxt_trig_ext_store_hit;
1295 gm_sendUnblockM;
1296 s_deallocateTBE;
1297 j_popTriggerQueue;
1298 }
1299
1300 // Transitions from OM
1301
1302 transition(OM, {Other_GETX, Invalidate}, IM) {
1303 e_sendData;
1304 pp_incrementNumberOfMessagesByOne;
1305 l_popForwardQueue;
1306 }
1307
1308 transition(OM, {Other_GETS, Other_GETS_No_Mig}) {
1309 ee_sendDataShared;
1310 l_popForwardQueue;
1311 }
1312
1313 transition(OM, Merged_GETS) {
1314 em_sendDataSharedMultiple;
1315 l_popForwardQueue;
1316 }
1317
1318 transition(OM, Ack) {
1319 m_decrementNumberOfMessages;
1320 o_checkForCompletion;
1321 n_popResponseQueue;
1322 }
1323
1324 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1325 sxt_trig_ext_store_hit;
1326 gm_sendUnblockM;
1327 s_deallocateTBE;
1328 j_popTriggerQueue;
1329 }
1330
1331 // Transitions from IS
1332
1333 transition(IS, {Other_GETX, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1334 f_sendAck;
1335 l_popForwardQueue;
1336 }
1337
1338 transition(IS, Ack) {
1339 m_decrementNumberOfMessages;
1340 o_checkForCompletion;
1341 n_popResponseQueue;
1342 }
1343
1344 transition(IS, Shared_Ack) {
1345 m_decrementNumberOfMessages;
1346 r_setSharerBit;
1347 o_checkForCompletion;
1348 n_popResponseQueue;
1349 }
1350
1351 transition(IS, Data, SS) {
1352 u_writeDataToCache;
1353 m_decrementNumberOfMessages;
1354 o_checkForCompletion;
1355 hx_external_load_hit;
1356 uo_updateCurrentOwner;
1357 n_popResponseQueue;
1358 }
1359
1360 transition(IS, Exclusive_Data, M_W) {
1361 u_writeDataToCache;
1362 m_decrementNumberOfMessages;
1363 o_checkForCompletion;
1364 hx_external_load_hit;
1365 n_popResponseQueue;
1366 }
1367
1368 transition(IS, Shared_Data, SS) {
1369 u_writeDataToCache;
1370 r_setSharerBit;
1371 m_decrementNumberOfMessages;
1372 o_checkForCompletion;
1373 hx_external_load_hit;
1374 uo_updateCurrentOwner;
1375 n_popResponseQueue;
1376 }
1377
1378 // Transitions from SS
1379
1380 transition(SS, Ack) {
1381 m_decrementNumberOfMessages;
1382 o_checkForCompletion;
1383 n_popResponseQueue;
1384 }
1385
1386 transition(SS, Shared_Ack) {
1387 m_decrementNumberOfMessages;
1388 r_setSharerBit;
1389 o_checkForCompletion;
1390 n_popResponseQueue;
1391 }
1392
1393 transition(SS, All_acks, S) {
1394 gs_sendUnblockS;
1395 s_deallocateTBE;
1396 j_popTriggerQueue;
1397 }
1398
1399 transition(SS, All_acks_no_sharers, S) {
1400 // Note: The directory might still be the owner, so that is why we go to S
1401 gs_sendUnblockS;
1402 s_deallocateTBE;
1403 j_popTriggerQueue;
1404 }
1405
1406 // Transitions from MM_W
1407
1408 transition(MM_W, Store) {
1409 hh_store_hit;
1410 k_popMandatoryQueue;
1411 }
1412
1413 transition(MM_W, Ack) {
1414 m_decrementNumberOfMessages;
1415 o_checkForCompletion;
1416 n_popResponseQueue;
1417 }
1418
1419 transition(MM_W, All_acks_no_sharers, MM) {
1420 gm_sendUnblockM;
1421 s_deallocateTBE;
1422 j_popTriggerQueue;
1423 }
1424
1425 // Transitions from M_W
1426
1427 transition(M_W, Store, MM_W) {
1428 hh_store_hit;
1429 k_popMandatoryQueue;
1430 }
1431
1432 transition(M_W, Ack) {
1433 m_decrementNumberOfMessages;
1434 o_checkForCompletion;
1435 n_popResponseQueue;
1436 }
1437
1438 transition(M_W, All_acks_no_sharers, M) {
1439 gm_sendUnblockM;
1440 s_deallocateTBE;
1441 j_popTriggerQueue;
1442 }
1443
1444 // Transitions from OI/MI
1445
1446 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1447 q_sendDataFromTBEToCache;
1448 l_popForwardQueue;
1449 }
1450
1451 transition({OI, MI}, {Other_GETS, Other_GETS_No_Mig}, OI) {
1452 q_sendDataFromTBEToCache;
1453 l_popForwardQueue;
1454 }
1455
1456 transition({OI, MI}, Merged_GETS, OI) {
1457 qm_sendDataFromTBEToCache;
1458 l_popForwardQueue;
1459 }
1460
1461 transition(MI, Writeback_Ack, I) {
1462 t_sendExclusiveDataFromTBEToMemory;
1463 s_deallocateTBE;
1464 l_popForwardQueue;
1465 }
1466
1467 transition(OI, Writeback_Ack, I) {
1468 qq_sendDataFromTBEToMemory;
1469 s_deallocateTBE;
1470 l_popForwardQueue;
1471 }
1472
1473 // Transitions from II
1474 transition(II, {Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1475 f_sendAck;
1476 l_popForwardQueue;
1477 }
1478
1479 transition(II, Writeback_Ack, I) {
1480 g_sendUnblock;
1481 s_deallocateTBE;
1482 l_popForwardQueue;
1483 }
1484
1485 transition(II, Writeback_Nack, I) {
1486 s_deallocateTBE;
1487 l_popForwardQueue;
1488 }
1489 }
1490