This patch removes the WARN_* and ERROR_* from src/mem/ruby/common/Debug.hh file...
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
45 {
46
47 // NETWORK BUFFERS
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
51
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
54
55
56 // STATES
57 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 I, desc="Idle";
60 S, desc="Shared";
61 O, desc="Owned";
62 M, desc="Modified (dirty)";
63 MM, desc="Modified (dirty and locally modified)";
64
65 // Transient States
66 IM, "IM", desc="Issued GetX";
67 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
68 OM, "OM", desc="Issued GetX, received data";
69 ISM, "ISM", desc="Issued GetX, received data, waiting for all acks";
70 M_W, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, "IS", desc="Issued GetS";
73 SS, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, "OI", desc="Issued PutO, waiting for ack";
75 MI, "MI", desc="Issued PutX, waiting for ack";
76 II, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, "IT", desc="Invalid block transferring to L1";
78 ST, "ST", desc="S block transferring to L1";
79 OT, "OT", desc="O block transferring to L1";
80 MT, "MT", desc="M block transferring to L1";
81 MMT, "MMT", desc="MM block transferring to L1";
82 }
83
84 // EVENTS
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
94
95 // Requests
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 NC_DMA_GETS, desc="special GetS when only DMA exists";
101 Invalidate, desc="Invalidate block";
102
103 // Responses
104 Ack, desc="Received an ack message";
105 Shared_Ack, desc="Received an ack message, responder has a shared copy";
106 Data, desc="Received a data message";
107 Shared_Data, desc="Received a data message, responder has a shared copy";
108 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
109
110 Writeback_Ack, desc="Writeback O.K. from directory";
111 Writeback_Nack, desc="Writeback not O.K. from directory";
112
113 // Triggers
114 All_acks, desc="Received all required data and message acks";
115 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
116 }
117
118 // TYPES
119
120 // STRUCTURE DEFINITIONS
121
122 MessageBuffer mandatoryQueue, ordered="false";
123
124 // CacheEntry
125 structure(Entry, desc="...", interface="AbstractCacheEntry") {
126 State CacheState, desc="cache state";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 DataBlock DataBlk, desc="data for the block";
129 bool FromL2, default="false", desc="block just moved from L2";
130 bool AtomicAccessed, default="false", desc="block just moved from L2";
131 }
132
133 // TBE fields
134 structure(TBE, desc="...") {
135 State TBEState, desc="Transient state";
136 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
137 bool Dirty, desc="Is the data dirty (different than memory)?";
138 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
139 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
140 MachineID LastResponder, desc="last machine to send a response for this request";
141 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
142 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
143 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
144 Time FirstResponseTime, default="0", desc="the time the first response was received";
145 }
146
147 external_type(TBETable) {
148 TBE lookup(Address);
149 void allocate(Address);
150 void deallocate(Address);
151 bool isPresent(Address);
152 }
153
154 TBETable TBEs, template_hack="<L1Cache_TBE>";
155
156 Entry getCacheEntry(Address addr), return_by_ref="yes" {
157 if (L2cacheMemory.isTagPresent(addr)) {
158 return static_cast(Entry, L2cacheMemory[addr]);
159 } else if (L1DcacheMemory.isTagPresent(addr)) {
160 return static_cast(Entry, L1DcacheMemory[addr]);
161 } else {
162 return static_cast(Entry, L1IcacheMemory[addr]);
163 }
164 }
165
166 void changePermission(Address addr, AccessPermission permission) {
167 if (L2cacheMemory.isTagPresent(addr)) {
168 return L2cacheMemory.changePermission(addr, permission);
169 } else if (L1DcacheMemory.isTagPresent(addr)) {
170 return L1DcacheMemory.changePermission(addr, permission);
171 } else {
172 return L1IcacheMemory.changePermission(addr, permission);
173 }
174 }
175
176 bool isCacheTagPresent(Address addr) {
177 return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
178 }
179
180 State getState(Address addr) {
181 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
182 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
183 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
184
185 if(TBEs.isPresent(addr)) {
186 return TBEs[addr].TBEState;
187 } else if (isCacheTagPresent(addr)) {
188 return getCacheEntry(addr).CacheState;
189 }
190 return State:I;
191 }
192
193 void setState(Address addr, State state) {
194 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
195 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
196 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
197
198 if (TBEs.isPresent(addr)) {
199 TBEs[addr].TBEState := state;
200 }
201
202 if (isCacheTagPresent(addr)) {
203 getCacheEntry(addr).CacheState := state;
204
205 // Set permission
206 if ((state == State:MM) ||
207 (state == State:MM_W)) {
208 changePermission(addr, AccessPermission:Read_Write);
209 } else if (state == State:S ||
210 state == State:O ||
211 state == State:M ||
212 state == State:M_W ||
213 state == State:SM ||
214 state == State:ISM ||
215 state == State:OM ||
216 state == State:SS) {
217 changePermission(addr, AccessPermission:Read_Only);
218 } else {
219 changePermission(addr, AccessPermission:Invalid);
220 }
221 }
222 }
223
224 Event mandatory_request_type_to_event(CacheRequestType type) {
225 if (type == CacheRequestType:LD) {
226 return Event:Load;
227 } else if (type == CacheRequestType:IFETCH) {
228 return Event:Ifetch;
229 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
230 return Event:Store;
231 } else {
232 error("Invalid CacheRequestType");
233 }
234 }
235
236 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
237 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
238 //
239 // NOTE direct local hits should not call this
240 //
241 return GenericMachineType:L1Cache_wCC;
242 } else {
243 return ConvertMachToGenericMach(machineIDToMachineType(sender));
244 }
245 }
246
247 GenericMachineType testAndClearLocalHit(Address addr) {
248 if (getCacheEntry(addr).FromL2) {
249 getCacheEntry(addr).FromL2 := false;
250 return GenericMachineType:L2Cache;
251 } else {
252 return GenericMachineType:L1Cache;
253 }
254 }
255
256 MessageBuffer triggerQueue, ordered="true";
257
258 // ** OUT_PORTS **
259
260 out_port(requestNetwork_out, RequestMsg, requestFromCache);
261 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
262 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
263 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
264
265 // ** IN_PORTS **
266
267 // Trigger Queue
268 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
269 if (triggerQueue_in.isReady()) {
270 peek(triggerQueue_in, TriggerMsg) {
271 if (in_msg.Type == TriggerType:L2_to_L1) {
272 trigger(Event:Complete_L2_to_L1, in_msg.Address);
273 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
274 trigger(Event:All_acks, in_msg.Address);
275 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
276 trigger(Event:All_acks_no_sharers, in_msg.Address);
277 } else {
278 error("Unexpected message");
279 }
280 }
281 }
282 }
283
284 // Nothing from the request network
285
286 // Forward Network
287 in_port(forwardToCache_in, RequestMsg, forwardToCache) {
288 if (forwardToCache_in.isReady()) {
289 peek(forwardToCache_in, RequestMsg, block_on="Address") {
290 if (in_msg.Type == CoherenceRequestType:GETX) {
291 trigger(Event:Other_GETX, in_msg.Address);
292 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
293 trigger(Event:Merged_GETS, in_msg.Address);
294 } else if (in_msg.Type == CoherenceRequestType:GETS) {
295 if (machineCount(MachineType:L1Cache) > 1) {
296 if (isCacheTagPresent(in_msg.Address)) {
297 if (getCacheEntry(in_msg.Address).AtomicAccessed && no_mig_atomic) {
298 trigger(Event:Other_GETS_No_Mig, in_msg.Address);
299 } else {
300 trigger(Event:Other_GETS, in_msg.Address);
301 }
302 } else {
303 trigger(Event:Other_GETS, in_msg.Address);
304 }
305 } else {
306 trigger(Event:NC_DMA_GETS, in_msg.Address);
307 }
308 } else if (in_msg.Type == CoherenceRequestType:INV) {
309 trigger(Event:Invalidate, in_msg.Address);
310 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
311 trigger(Event:Writeback_Ack, in_msg.Address);
312 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
313 trigger(Event:Writeback_Nack, in_msg.Address);
314 } else {
315 error("Unexpected message");
316 }
317 }
318 }
319 }
320
321 // Response Network
322 in_port(responseToCache_in, ResponseMsg, responseToCache) {
323 if (responseToCache_in.isReady()) {
324 peek(responseToCache_in, ResponseMsg, block_on="Address") {
325 if (in_msg.Type == CoherenceResponseType:ACK) {
326 trigger(Event:Ack, in_msg.Address);
327 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
328 trigger(Event:Shared_Ack, in_msg.Address);
329 } else if (in_msg.Type == CoherenceResponseType:DATA) {
330 trigger(Event:Data, in_msg.Address);
331 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
332 trigger(Event:Shared_Data, in_msg.Address);
333 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
334 trigger(Event:Exclusive_Data, in_msg.Address);
335 } else {
336 error("Unexpected message");
337 }
338 }
339 }
340 }
341
342 // Nothing from the unblock network
343
344 // Mandatory Queue
345 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
346 if (mandatoryQueue_in.isReady()) {
347 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
348
349 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
350
351 if (in_msg.Type == CacheRequestType:IFETCH) {
352 // ** INSTRUCTION ACCESS ***
353
354 // Check to see if it is in the OTHER L1
355 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
356 // The block is in the wrong L1, try to write it to the L2
357 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
358 trigger(Event:L1_to_L2, in_msg.LineAddress);
359 } else {
360 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
361 }
362 }
363
364 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
365 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
366 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
367 } else {
368 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
369 // L1 does't have the line, but we have space for it in the L1
370 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
371 // L2 has it (maybe not with the right permissions)
372 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress);
373 } else {
374 // We have room, the L2 doesn't have it, so the L1 fetches the line
375 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
376 }
377 } else {
378 // No room in the L1, so we need to make room
379 if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) {
380 // The L2 has room, so we move the line from the L1 to the L2
381 trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
382 } else {
383 // The L2 does not have room, so we replace a line from the L2
384 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)));
385 }
386 }
387 }
388 } else {
389 // *** DATA ACCESS ***
390
391 // Check to see if it is in the OTHER L1
392 if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
393 // The block is in the wrong L1, try to write it to the L2
394 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
395 trigger(Event:L1_to_L2, in_msg.LineAddress);
396 } else {
397 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.LineAddress));
398 }
399 }
400
401 if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
402 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
403 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
404 } else {
405 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
406 // L1 does't have the line, but we have space for it in the L1
407 if (L2cacheMemory.isTagPresent(in_msg.LineAddress)) {
408 // L2 has it (maybe not with the right permissions)
409 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress);
410 } else {
411 // We have room, the L2 doesn't have it, so the L1 fetches the line
412 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
413 }
414 } else {
415 // No room in the L1, so we need to make room
416 if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) {
417 // The L2 has room, so we move the line from the L1 to the L2
418 trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
419 } else {
420 // The L2 does not have room, so we replace a line from the L2
421 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)));
422 }
423 }
424 }
425 }
426 }
427 }
428 }
429
430 // ACTIONS
431
432 action(a_issueGETS, "a", desc="Issue GETS") {
433 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
434 out_msg.Address := address;
435 out_msg.Type := CoherenceRequestType:GETS;
436 out_msg.Requestor := machineID;
437 out_msg.Destination.add(map_Address_to_Directory(address));
438 out_msg.MessageSize := MessageSizeType:Request_Control;
439 out_msg.InitialRequestTime := get_time();
440 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
441 }
442 }
443
444 action(b_issueGETX, "b", desc="Issue GETX") {
445 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
446 out_msg.Address := address;
447 out_msg.Type := CoherenceRequestType:GETX;
448 out_msg.Requestor := machineID;
449 out_msg.Destination.add(map_Address_to_Directory(address));
450 out_msg.MessageSize := MessageSizeType:Request_Control;
451 out_msg.InitialRequestTime := get_time();
452 TBEs[address].NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
453 }
454 }
455
456 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
457 peek(forwardToCache_in, RequestMsg) {
458 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
459 out_msg.Address := address;
460 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
461 out_msg.Sender := machineID;
462 out_msg.Destination.add(in_msg.Requestor);
463 out_msg.DataBlk := getCacheEntry(address).DataBlk;
464 out_msg.Dirty := getCacheEntry(address).Dirty;
465 if (in_msg.DirectedProbe) {
466 out_msg.Acks := machineCount(MachineType:L1Cache);
467 } else {
468 out_msg.Acks := 2;
469 }
470 out_msg.MessageSize := MessageSizeType:Response_Data;
471 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
472 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
473 }
474 }
475 }
476
477 action(d_issuePUT, "d", desc="Issue PUT") {
478 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
479 out_msg.Address := address;
480 out_msg.Type := CoherenceRequestType:PUT;
481 out_msg.Requestor := machineID;
482 out_msg.Destination.add(map_Address_to_Directory(address));
483 out_msg.MessageSize := MessageSizeType:Writeback_Control;
484 }
485 }
486
487 action(e_sendData, "e", desc="Send data from cache to requestor") {
488 peek(forwardToCache_in, RequestMsg) {
489 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
490 out_msg.Address := address;
491 out_msg.Type := CoherenceResponseType:DATA;
492 out_msg.Sender := machineID;
493 out_msg.Destination.add(in_msg.Requestor);
494 out_msg.DataBlk := getCacheEntry(address).DataBlk;
495 out_msg.Dirty := getCacheEntry(address).Dirty;
496 if (in_msg.DirectedProbe) {
497 out_msg.Acks := machineCount(MachineType:L1Cache);
498 } else {
499 out_msg.Acks := 2;
500 }
501 out_msg.MessageSize := MessageSizeType:Response_Data;
502 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
503 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
504 }
505 }
506 }
507
508 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
509 peek(forwardToCache_in, RequestMsg) {
510 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
511 out_msg.Address := address;
512 out_msg.Type := CoherenceResponseType:DATA_SHARED;
513 out_msg.Sender := machineID;
514 out_msg.Destination.add(in_msg.Requestor);
515 out_msg.DataBlk := getCacheEntry(address).DataBlk;
516 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
517 out_msg.Dirty := getCacheEntry(address).Dirty;
518 if (in_msg.DirectedProbe) {
519 out_msg.Acks := machineCount(MachineType:L1Cache);
520 } else {
521 out_msg.Acks := 2;
522 }
523 out_msg.MessageSize := MessageSizeType:Response_Data;
524 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
525 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
526 }
527 }
528 }
529
530 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
531 peek(forwardToCache_in, RequestMsg) {
532 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
533 out_msg.Address := address;
534 out_msg.Type := CoherenceResponseType:DATA_SHARED;
535 out_msg.Sender := machineID;
536 out_msg.Destination := in_msg.MergedRequestors;
537 out_msg.DataBlk := getCacheEntry(address).DataBlk;
538 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
539 out_msg.Dirty := getCacheEntry(address).Dirty;
540 out_msg.Acks := machineCount(MachineType:L1Cache);
541 out_msg.MessageSize := MessageSizeType:Response_Data;
542 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
543 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
544 }
545 }
546 }
547
548 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
549 peek(forwardToCache_in, RequestMsg) {
550 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
551 out_msg.Address := address;
552 out_msg.Type := CoherenceResponseType:ACK;
553 out_msg.Sender := machineID;
554 out_msg.Destination.add(in_msg.Requestor);
555 out_msg.Acks := 1;
556 assert(in_msg.DirectedProbe == false);
557 out_msg.MessageSize := MessageSizeType:Response_Control;
558 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
559 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
560 }
561 }
562 }
563
564 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
565 peek(forwardToCache_in, RequestMsg) {
566 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
567 out_msg.Address := address;
568 out_msg.Type := CoherenceResponseType:ACK_SHARED;
569 out_msg.Sender := machineID;
570 out_msg.Destination.add(in_msg.Requestor);
571 out_msg.Acks := 1;
572 assert(in_msg.DirectedProbe == false);
573 out_msg.MessageSize := MessageSizeType:Response_Control;
574 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
575 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
576 }
577 }
578 }
579
580 action(g_sendUnblock, "g", desc="Send unblock to memory") {
581 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
582 out_msg.Address := address;
583 out_msg.Type := CoherenceResponseType:UNBLOCK;
584 out_msg.Sender := machineID;
585 out_msg.Destination.add(map_Address_to_Directory(address));
586 out_msg.MessageSize := MessageSizeType:Unblock_Control;
587 }
588 }
589
590 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
591 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
592 out_msg.Address := address;
593 out_msg.Type := CoherenceResponseType:UNBLOCKM;
594 out_msg.Sender := machineID;
595 out_msg.Destination.add(map_Address_to_Directory(address));
596 out_msg.MessageSize := MessageSizeType:Unblock_Control;
597 }
598 }
599
600 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
601 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
602 out_msg.Address := address;
603 out_msg.Type := CoherenceResponseType:UNBLOCKS;
604 out_msg.Sender := machineID;
605 out_msg.CurOwner := TBEs[address].CurOwner;
606 out_msg.Destination.add(map_Address_to_Directory(address));
607 out_msg.MessageSize := MessageSizeType:Unblock_Control;
608 }
609 }
610
611 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
612 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
613
614 sequencer.readCallback(address,
615 testAndClearLocalHit(address),
616 getCacheEntry(address).DataBlk);
617
618 }
619
620 action(hx_external_load_hit, "hx", desc="load required external msgs") {
621 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
622 peek(responseToCache_in, ResponseMsg) {
623
624 sequencer.readCallback(address,
625 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
626 getCacheEntry(address).DataBlk,
627 TBEs[address].InitialRequestTime,
628 TBEs[address].ForwardRequestTime,
629 TBEs[address].FirstResponseTime);
630
631 }
632 }
633
634 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
635 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
636 peek(mandatoryQueue_in, CacheMsg) {
637 sequencer.writeCallback(address,
638 testAndClearLocalHit(address),
639 getCacheEntry(address).DataBlk);
640
641 getCacheEntry(address).Dirty := true;
642 if (in_msg.Type == CacheRequestType:ATOMIC) {
643 getCacheEntry(address).AtomicAccessed := true;
644 }
645 }
646 }
647
648 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
649 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
650 peek(responseToCache_in, ResponseMsg) {
651
652 sequencer.writeCallback(address,
653 getNondirectHitMachType(address, in_msg.Sender),
654 getCacheEntry(address).DataBlk,
655 TBEs[address].InitialRequestTime,
656 TBEs[address].ForwardRequestTime,
657 TBEs[address].FirstResponseTime);
658
659 }
660 getCacheEntry(address).Dirty := true;
661 }
662
663 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
664 DPRINTF(RubySlicc, "%s\n", getCacheEntry(address).DataBlk);
665
666 sequencer.writeCallback(address,
667 getNondirectHitMachType(address,
668 TBEs[address].LastResponder),
669 getCacheEntry(address).DataBlk,
670 TBEs[address].InitialRequestTime,
671 TBEs[address].ForwardRequestTime,
672 TBEs[address].FirstResponseTime);
673
674 getCacheEntry(address).Dirty := true;
675 }
676
677 action(i_allocateTBE, "i", desc="Allocate TBE") {
678 check_allocate(TBEs);
679 TBEs.allocate(address);
680 TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
681 TBEs[address].Dirty := getCacheEntry(address).Dirty;
682 TBEs[address].Sharers := false;
683 }
684
685 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
686 triggerQueue_in.dequeue();
687 }
688
689 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
690 mandatoryQueue_in.dequeue();
691 }
692
693 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
694 forwardToCache_in.dequeue();
695 }
696
697 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
698 peek(responseToCache_in, ResponseMsg) {
699 assert(in_msg.Acks > 0);
700 DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
701 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
702 DPRINTF(RubySlicc, "%d\n", TBEs[address].NumPendingMsgs);
703 TBEs[address].LastResponder := in_msg.Sender;
704 if (TBEs[address].InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
705 assert(TBEs[address].InitialRequestTime == in_msg.InitialRequestTime);
706 }
707 if (in_msg.InitialRequestTime != zero_time()) {
708 TBEs[address].InitialRequestTime := in_msg.InitialRequestTime;
709 }
710 if (TBEs[address].ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
711 assert(TBEs[address].ForwardRequestTime == in_msg.ForwardRequestTime);
712 }
713 if (in_msg.ForwardRequestTime != zero_time()) {
714 TBEs[address].ForwardRequestTime := in_msg.ForwardRequestTime;
715 }
716 if (TBEs[address].FirstResponseTime == zero_time()) {
717 TBEs[address].FirstResponseTime := get_time();
718 }
719 }
720 }
721 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
722 peek(responseToCache_in, ResponseMsg) {
723 TBEs[address].CurOwner := in_msg.Sender;
724 }
725 }
726
727 action(n_popResponseQueue, "n", desc="Pop response queue") {
728 responseToCache_in.dequeue();
729 }
730
731 action(ll_L2toL1Transfer, "ll", desc="") {
732 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
733 out_msg.Address := address;
734 out_msg.Type := TriggerType:L2_to_L1;
735 }
736 }
737
738 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
739 if (TBEs[address].NumPendingMsgs == 0) {
740 enqueue(triggerQueue_out, TriggerMsg) {
741 out_msg.Address := address;
742 if (TBEs[address].Sharers) {
743 out_msg.Type := TriggerType:ALL_ACKS;
744 } else {
745 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
746 }
747 }
748 }
749 }
750
751 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
752 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - 1;
753 }
754
755 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
756 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs + 1;
757 }
758
759 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
760 peek(forwardToCache_in, RequestMsg) {
761 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
762 out_msg.Address := address;
763 out_msg.Type := CoherenceResponseType:DATA;
764 out_msg.Sender := machineID;
765 out_msg.Destination.add(in_msg.Requestor);
766 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
767 out_msg.DataBlk := TBEs[address].DataBlk;
768 out_msg.Dirty := TBEs[address].Dirty;
769 if (in_msg.DirectedProbe) {
770 out_msg.Acks := machineCount(MachineType:L1Cache);
771 } else {
772 out_msg.Acks := 2;
773 }
774 out_msg.MessageSize := MessageSizeType:Response_Data;
775 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
776 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
777 }
778 }
779 }
780
781 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
782 peek(forwardToCache_in, RequestMsg) {
783 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
784 out_msg.Address := address;
785 out_msg.Type := CoherenceResponseType:DATA;
786 out_msg.Sender := machineID;
787 out_msg.Destination := in_msg.MergedRequestors;
788 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
789 out_msg.DataBlk := TBEs[address].DataBlk;
790 out_msg.Dirty := TBEs[address].Dirty;
791 out_msg.Acks := machineCount(MachineType:L1Cache);
792 out_msg.MessageSize := MessageSizeType:Response_Data;
793 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
794 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
795 }
796 }
797 }
798
799 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
800 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
801 out_msg.Address := address;
802 out_msg.Sender := machineID;
803 out_msg.Destination.add(map_Address_to_Directory(address));
804 out_msg.Dirty := TBEs[address].Dirty;
805 if (TBEs[address].Dirty) {
806 out_msg.Type := CoherenceResponseType:WB_DIRTY;
807 out_msg.DataBlk := TBEs[address].DataBlk;
808 out_msg.MessageSize := MessageSizeType:Writeback_Data;
809 } else {
810 out_msg.Type := CoherenceResponseType:WB_CLEAN;
811 // NOTE: in a real system this would not send data. We send
812 // data here only so we can check it at the memory
813 out_msg.DataBlk := TBEs[address].DataBlk;
814 out_msg.MessageSize := MessageSizeType:Writeback_Control;
815 }
816 }
817 }
818
819 action(r_setSharerBit, "r", desc="We saw other sharers") {
820 TBEs[address].Sharers := true;
821 }
822
823 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
824 TBEs.deallocate(address);
825 }
826
827 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
828 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
829 out_msg.Address := address;
830 out_msg.Sender := machineID;
831 out_msg.Destination.add(map_Address_to_Directory(address));
832 out_msg.DataBlk := TBEs[address].DataBlk;
833 out_msg.Dirty := TBEs[address].Dirty;
834 if (TBEs[address].Dirty) {
835 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
836 out_msg.DataBlk := TBEs[address].DataBlk;
837 out_msg.MessageSize := MessageSizeType:Writeback_Data;
838 } else {
839 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
840 // NOTE: in a real system this would not send data. We send
841 // data here only so we can check it at the memory
842 out_msg.DataBlk := TBEs[address].DataBlk;
843 out_msg.MessageSize := MessageSizeType:Writeback_Control;
844 }
845 }
846 }
847
848 action(u_writeDataToCache, "u", desc="Write data to cache") {
849 peek(responseToCache_in, ResponseMsg) {
850 getCacheEntry(address).DataBlk := in_msg.DataBlk;
851 getCacheEntry(address).Dirty := in_msg.Dirty;
852 }
853 }
854
855 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
856 peek(responseToCache_in, ResponseMsg) {
857 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
858 getCacheEntry(address).DataBlk, in_msg.DataBlk);
859 assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
860 getCacheEntry(address).DataBlk := in_msg.DataBlk;
861 getCacheEntry(address).Dirty := in_msg.Dirty || getCacheEntry(address).Dirty;
862 }
863 }
864
865 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
866 if (L1DcacheMemory.isTagPresent(address)) {
867 L1DcacheMemory.deallocate(address);
868 } else {
869 L1IcacheMemory.deallocate(address);
870 }
871 }
872
873 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
874 if (L1DcacheMemory.isTagPresent(address) == false) {
875 L1DcacheMemory.allocate(address, new Entry);
876 }
877 }
878
879 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
880 if (L1IcacheMemory.isTagPresent(address) == false) {
881 L1IcacheMemory.allocate(address, new Entry);
882 }
883 }
884
885 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
886 L2cacheMemory.allocate(address, new Entry);
887 }
888
889 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
890 L2cacheMemory.deallocate(address);
891 }
892
893 action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
894 if (L1DcacheMemory.isTagPresent(address)) {
895 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty;
896 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk;
897 } else {
898 static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty;
899 static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk;
900 }
901 }
902
903 action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
904 if (L1DcacheMemory.isTagPresent(address)) {
905 static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
906 static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
907 static_cast(Entry, L1DcacheMemory[address]).FromL2 := true;
908 } else {
909 static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
910 static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
911 static_cast(Entry, L1IcacheMemory[address]).FromL2 := true;
912 }
913 }
914
915 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
916 peek(mandatoryQueue_in, CacheMsg) {
917 if (L1IcacheMemory.isTagPresent(address)) {
918 L1IcacheMemory.profileMiss(in_msg);
919 } else if (L1DcacheMemory.isTagPresent(address)) {
920 L1DcacheMemory.profileMiss(in_msg);
921 }
922 if (L2cacheMemory.isTagPresent(address) == false) {
923 L2cacheMemory.profileMiss(in_msg);
924 }
925 }
926 }
927
928 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
929 mandatoryQueue_in.recycle();
930 }
931
932 //*****************************************************
933 // TRANSITIONS
934 //*****************************************************
935
936 // Transitions for Load/Store/L2_Replacement from transient states
937 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
938 zz_recycleMandatoryQueue;
939 }
940
941 transition({M_W, MM_W}, {L2_Replacement}) {
942 zz_recycleMandatoryQueue;
943 }
944
945 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
946 zz_recycleMandatoryQueue;
947 }
948
949 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
950 zz_recycleMandatoryQueue;
951 }
952
953 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
954 // stall
955 }
956
957 // Transitions moving data between the L1 and L2 caches
958 transition({I, S, O, M, MM}, L1_to_L2) {
959 vv_allocateL2CacheBlock;
960 ss_copyFromL1toL2; // Not really needed for state I
961 gg_deallocateL1CacheBlock;
962 }
963
964 transition(I, Trigger_L2_to_L1D, IT) {
965 ii_allocateL1DCacheBlock;
966 tt_copyFromL2toL1; // Not really needed for state I
967 uu_profileMiss;
968 rr_deallocateL2CacheBlock;
969 zz_recycleMandatoryQueue;
970 ll_L2toL1Transfer;
971 }
972
973 transition(S, Trigger_L2_to_L1D, ST) {
974 ii_allocateL1DCacheBlock;
975 tt_copyFromL2toL1;
976 uu_profileMiss;
977 rr_deallocateL2CacheBlock;
978 zz_recycleMandatoryQueue;
979 ll_L2toL1Transfer;
980 }
981
982 transition(O, Trigger_L2_to_L1D, OT) {
983 ii_allocateL1DCacheBlock;
984 tt_copyFromL2toL1;
985 uu_profileMiss;
986 rr_deallocateL2CacheBlock;
987 zz_recycleMandatoryQueue;
988 ll_L2toL1Transfer;
989 }
990
991 transition(M, Trigger_L2_to_L1D, MT) {
992 ii_allocateL1DCacheBlock;
993 tt_copyFromL2toL1;
994 uu_profileMiss;
995 rr_deallocateL2CacheBlock;
996 zz_recycleMandatoryQueue;
997 ll_L2toL1Transfer;
998 }
999
1000 transition(MM, Trigger_L2_to_L1D, MMT) {
1001 ii_allocateL1DCacheBlock;
1002 tt_copyFromL2toL1;
1003 uu_profileMiss;
1004 rr_deallocateL2CacheBlock;
1005 zz_recycleMandatoryQueue;
1006 ll_L2toL1Transfer;
1007 }
1008
1009 transition(I, Trigger_L2_to_L1I, IT) {
1010 jj_allocateL1ICacheBlock;
1011 tt_copyFromL2toL1; // Not really needed for state I
1012 uu_profileMiss;
1013 rr_deallocateL2CacheBlock;
1014 zz_recycleMandatoryQueue;
1015 ll_L2toL1Transfer;
1016 }
1017
1018 transition(S, Trigger_L2_to_L1I, ST) {
1019 jj_allocateL1ICacheBlock;
1020 tt_copyFromL2toL1;
1021 uu_profileMiss;
1022 rr_deallocateL2CacheBlock;
1023 zz_recycleMandatoryQueue;
1024 ll_L2toL1Transfer;
1025 }
1026
1027 transition(O, Trigger_L2_to_L1I, OT) {
1028 jj_allocateL1ICacheBlock;
1029 tt_copyFromL2toL1;
1030 uu_profileMiss;
1031 rr_deallocateL2CacheBlock;
1032 zz_recycleMandatoryQueue;
1033 ll_L2toL1Transfer;
1034 }
1035
1036 transition(M, Trigger_L2_to_L1I, MT) {
1037 jj_allocateL1ICacheBlock;
1038 tt_copyFromL2toL1;
1039 uu_profileMiss;
1040 rr_deallocateL2CacheBlock;
1041 zz_recycleMandatoryQueue;
1042 ll_L2toL1Transfer;
1043 }
1044
1045 transition(MM, Trigger_L2_to_L1I, MMT) {
1046 jj_allocateL1ICacheBlock;
1047 tt_copyFromL2toL1;
1048 uu_profileMiss;
1049 rr_deallocateL2CacheBlock;
1050 zz_recycleMandatoryQueue;
1051 ll_L2toL1Transfer;
1052 }
1053
1054 transition(IT, Complete_L2_to_L1, I) {
1055 j_popTriggerQueue;
1056 }
1057
1058 transition(ST, Complete_L2_to_L1, S) {
1059 j_popTriggerQueue;
1060 }
1061
1062 transition(OT, Complete_L2_to_L1, O) {
1063 j_popTriggerQueue;
1064 }
1065
1066 transition(MT, Complete_L2_to_L1, M) {
1067 j_popTriggerQueue;
1068 }
1069
1070 transition(MMT, Complete_L2_to_L1, MM) {
1071 j_popTriggerQueue;
1072 }
1073
1074 // Transitions from Idle
1075 transition(I, Load, IS) {
1076 ii_allocateL1DCacheBlock;
1077 i_allocateTBE;
1078 a_issueGETS;
1079 uu_profileMiss;
1080 k_popMandatoryQueue;
1081 }
1082
1083 transition(I, Ifetch, IS) {
1084 jj_allocateL1ICacheBlock;
1085 i_allocateTBE;
1086 a_issueGETS;
1087 uu_profileMiss;
1088 k_popMandatoryQueue;
1089 }
1090
1091 transition(I, Store, IM) {
1092 ii_allocateL1DCacheBlock;
1093 i_allocateTBE;
1094 b_issueGETX;
1095 uu_profileMiss;
1096 k_popMandatoryQueue;
1097 }
1098
1099 transition(I, L2_Replacement) {
1100 rr_deallocateL2CacheBlock;
1101 }
1102
1103 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1104 f_sendAck;
1105 l_popForwardQueue;
1106 }
1107
1108 // Transitions from Shared
1109 transition({S, SM, ISM}, {Load, Ifetch}) {
1110 h_load_hit;
1111 k_popMandatoryQueue;
1112 }
1113
1114 transition(S, Store, SM) {
1115 i_allocateTBE;
1116 b_issueGETX;
1117 uu_profileMiss;
1118 k_popMandatoryQueue;
1119 }
1120
1121 transition(S, L2_Replacement, I) {
1122 rr_deallocateL2CacheBlock;
1123 }
1124
1125 transition(S, {Other_GETX, Invalidate}, I) {
1126 f_sendAck;
1127 l_popForwardQueue;
1128 }
1129
1130 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1131 ff_sendAckShared;
1132 l_popForwardQueue;
1133 }
1134
1135 // Transitions from Owned
1136 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1137 h_load_hit;
1138 k_popMandatoryQueue;
1139 }
1140
1141 transition(O, Store, OM) {
1142 i_allocateTBE;
1143 b_issueGETX;
1144 p_decrementNumberOfMessagesByOne;
1145 uu_profileMiss;
1146 k_popMandatoryQueue;
1147 }
1148
1149 transition(O, L2_Replacement, OI) {
1150 i_allocateTBE;
1151 d_issuePUT;
1152 rr_deallocateL2CacheBlock;
1153 }
1154
1155 transition(O, {Other_GETX, Invalidate}, I) {
1156 e_sendData;
1157 l_popForwardQueue;
1158 }
1159
1160 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1161 ee_sendDataShared;
1162 l_popForwardQueue;
1163 }
1164
1165 transition(O, Merged_GETS) {
1166 em_sendDataSharedMultiple;
1167 l_popForwardQueue;
1168 }
1169
1170 // Transitions from Modified
1171 transition(MM, {Load, Ifetch}) {
1172 h_load_hit;
1173 k_popMandatoryQueue;
1174 }
1175
1176 transition(MM, Store) {
1177 hh_store_hit;
1178 k_popMandatoryQueue;
1179 }
1180
1181 transition(MM, L2_Replacement, MI) {
1182 i_allocateTBE;
1183 d_issuePUT;
1184 rr_deallocateL2CacheBlock;
1185 }
1186
1187 transition(MM, {Other_GETX, Invalidate}, I) {
1188 c_sendExclusiveData;
1189 l_popForwardQueue;
1190 }
1191
1192 transition(MM, Other_GETS, I) {
1193 c_sendExclusiveData;
1194 l_popForwardQueue;
1195 }
1196
1197 transition(MM, NC_DMA_GETS) {
1198 c_sendExclusiveData;
1199 l_popForwardQueue;
1200 }
1201
1202 transition(MM, Other_GETS_No_Mig, O) {
1203 ee_sendDataShared;
1204 l_popForwardQueue;
1205 }
1206
1207 transition(MM, Merged_GETS, O) {
1208 em_sendDataSharedMultiple;
1209 l_popForwardQueue;
1210 }
1211
1212 // Transitions from Dirty Exclusive
1213 transition(M, {Load, Ifetch}) {
1214 h_load_hit;
1215 k_popMandatoryQueue;
1216 }
1217
1218 transition(M, Store, MM) {
1219 hh_store_hit;
1220 k_popMandatoryQueue;
1221 }
1222
1223 transition(M, L2_Replacement, MI) {
1224 i_allocateTBE;
1225 d_issuePUT;
1226 rr_deallocateL2CacheBlock;
1227 }
1228
1229 transition(M, {Other_GETX, Invalidate}, I) {
1230 c_sendExclusiveData;
1231 l_popForwardQueue;
1232 }
1233
1234 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1235 ee_sendDataShared;
1236 l_popForwardQueue;
1237 }
1238
1239 transition(M, NC_DMA_GETS) {
1240 ee_sendDataShared;
1241 l_popForwardQueue;
1242 }
1243
1244 transition(M, Merged_GETS, O) {
1245 em_sendDataSharedMultiple;
1246 l_popForwardQueue;
1247 }
1248
1249 // Transitions from IM
1250
1251 transition(IM, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1252 f_sendAck;
1253 l_popForwardQueue;
1254 }
1255
1256 transition(IM, Ack) {
1257 m_decrementNumberOfMessages;
1258 o_checkForCompletion;
1259 n_popResponseQueue;
1260 }
1261
1262 transition(IM, Data, ISM) {
1263 u_writeDataToCache;
1264 m_decrementNumberOfMessages;
1265 o_checkForCompletion;
1266 n_popResponseQueue;
1267 }
1268
1269 transition(IM, Exclusive_Data, MM_W) {
1270 u_writeDataToCache;
1271 m_decrementNumberOfMessages;
1272 o_checkForCompletion;
1273 sx_external_store_hit;
1274 n_popResponseQueue;
1275 }
1276
1277 // Transitions from SM
1278 transition(SM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1279 ff_sendAckShared;
1280 l_popForwardQueue;
1281 }
1282
1283 transition(SM, {Other_GETX, Invalidate}, IM) {
1284 f_sendAck;
1285 l_popForwardQueue;
1286 }
1287
1288 transition(SM, Ack) {
1289 m_decrementNumberOfMessages;
1290 o_checkForCompletion;
1291 n_popResponseQueue;
1292 }
1293
1294 transition(SM, Data, ISM) {
1295 v_writeDataToCacheVerify;
1296 m_decrementNumberOfMessages;
1297 o_checkForCompletion;
1298 n_popResponseQueue;
1299 }
1300
1301 // Transitions from ISM
1302 transition(ISM, Ack) {
1303 m_decrementNumberOfMessages;
1304 o_checkForCompletion;
1305 n_popResponseQueue;
1306 }
1307
1308 transition(ISM, All_acks_no_sharers, MM) {
1309 sxt_trig_ext_store_hit;
1310 gm_sendUnblockM;
1311 s_deallocateTBE;
1312 j_popTriggerQueue;
1313 }
1314
1315 // Transitions from OM
1316
1317 transition(OM, {Other_GETX, Invalidate}, IM) {
1318 e_sendData;
1319 pp_incrementNumberOfMessagesByOne;
1320 l_popForwardQueue;
1321 }
1322
1323 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1324 ee_sendDataShared;
1325 l_popForwardQueue;
1326 }
1327
1328 transition(OM, Merged_GETS) {
1329 em_sendDataSharedMultiple;
1330 l_popForwardQueue;
1331 }
1332
1333 transition(OM, Ack) {
1334 m_decrementNumberOfMessages;
1335 o_checkForCompletion;
1336 n_popResponseQueue;
1337 }
1338
1339 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1340 sxt_trig_ext_store_hit;
1341 gm_sendUnblockM;
1342 s_deallocateTBE;
1343 j_popTriggerQueue;
1344 }
1345
1346 // Transitions from IS
1347
1348 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1349 f_sendAck;
1350 l_popForwardQueue;
1351 }
1352
1353 transition(IS, Ack) {
1354 m_decrementNumberOfMessages;
1355 o_checkForCompletion;
1356 n_popResponseQueue;
1357 }
1358
1359 transition(IS, Shared_Ack) {
1360 m_decrementNumberOfMessages;
1361 r_setSharerBit;
1362 o_checkForCompletion;
1363 n_popResponseQueue;
1364 }
1365
1366 transition(IS, Data, SS) {
1367 u_writeDataToCache;
1368 m_decrementNumberOfMessages;
1369 o_checkForCompletion;
1370 hx_external_load_hit;
1371 uo_updateCurrentOwner;
1372 n_popResponseQueue;
1373 }
1374
1375 transition(IS, Exclusive_Data, M_W) {
1376 u_writeDataToCache;
1377 m_decrementNumberOfMessages;
1378 o_checkForCompletion;
1379 hx_external_load_hit;
1380 n_popResponseQueue;
1381 }
1382
1383 transition(IS, Shared_Data, SS) {
1384 u_writeDataToCache;
1385 r_setSharerBit;
1386 m_decrementNumberOfMessages;
1387 o_checkForCompletion;
1388 hx_external_load_hit;
1389 uo_updateCurrentOwner;
1390 n_popResponseQueue;
1391 }
1392
1393 // Transitions from SS
1394
1395 transition(SS, Ack) {
1396 m_decrementNumberOfMessages;
1397 o_checkForCompletion;
1398 n_popResponseQueue;
1399 }
1400
1401 transition(SS, Shared_Ack) {
1402 m_decrementNumberOfMessages;
1403 r_setSharerBit;
1404 o_checkForCompletion;
1405 n_popResponseQueue;
1406 }
1407
1408 transition(SS, All_acks, S) {
1409 gs_sendUnblockS;
1410 s_deallocateTBE;
1411 j_popTriggerQueue;
1412 }
1413
1414 transition(SS, All_acks_no_sharers, S) {
1415 // Note: The directory might still be the owner, so that is why we go to S
1416 gs_sendUnblockS;
1417 s_deallocateTBE;
1418 j_popTriggerQueue;
1419 }
1420
1421 // Transitions from MM_W
1422
1423 transition(MM_W, Store) {
1424 hh_store_hit;
1425 k_popMandatoryQueue;
1426 }
1427
1428 transition(MM_W, Ack) {
1429 m_decrementNumberOfMessages;
1430 o_checkForCompletion;
1431 n_popResponseQueue;
1432 }
1433
1434 transition(MM_W, All_acks_no_sharers, MM) {
1435 gm_sendUnblockM;
1436 s_deallocateTBE;
1437 j_popTriggerQueue;
1438 }
1439
1440 // Transitions from M_W
1441
1442 transition(M_W, Store, MM_W) {
1443 hh_store_hit;
1444 k_popMandatoryQueue;
1445 }
1446
1447 transition(M_W, Ack) {
1448 m_decrementNumberOfMessages;
1449 o_checkForCompletion;
1450 n_popResponseQueue;
1451 }
1452
1453 transition(M_W, All_acks_no_sharers, M) {
1454 gm_sendUnblockM;
1455 s_deallocateTBE;
1456 j_popTriggerQueue;
1457 }
1458
1459 // Transitions from OI/MI
1460
1461 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1462 q_sendDataFromTBEToCache;
1463 l_popForwardQueue;
1464 }
1465
1466 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1467 q_sendDataFromTBEToCache;
1468 l_popForwardQueue;
1469 }
1470
1471 transition({OI, MI}, Merged_GETS, OI) {
1472 qm_sendDataFromTBEToCache;
1473 l_popForwardQueue;
1474 }
1475
1476 transition(MI, Writeback_Ack, I) {
1477 t_sendExclusiveDataFromTBEToMemory;
1478 s_deallocateTBE;
1479 l_popForwardQueue;
1480 }
1481
1482 transition(OI, Writeback_Ack, I) {
1483 qq_sendDataFromTBEToMemory;
1484 s_deallocateTBE;
1485 l_popForwardQueue;
1486 }
1487
1488 // Transitions from II
1489 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1490 f_sendAck;
1491 l_popForwardQueue;
1492 }
1493
1494 transition(II, Writeback_Ack, I) {
1495 g_sendUnblock;
1496 s_deallocateTBE;
1497 l_popForwardQueue;
1498 }
1499
1500 transition(II, Writeback_Nack, I) {
1501 s_deallocateTBE;
1502 l_popForwardQueue;
1503 }
1504 }
1505