SCons: Clean up some inconsistent capitalization in scons options.
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
45 {
46
47 // NETWORK BUFFERS
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
51
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
54
55
56 // STATES
57 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 I, AccessPermission:Invalid, desc="Idle";
60 S, AccessPermission:Read_Only, desc="Shared";
61 O, AccessPermission:Read_Only, desc="Owned";
62 M, AccessPermission:Read_Only, desc="Modified (dirty)";
63 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
64
65 // Transient States
66 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
67 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
68 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
69 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
70 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
73 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
75 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
76 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
78 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
79 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
80 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
81 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L1";
82 }
83
84 // EVENTS
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
94
95 // Requests
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 NC_DMA_GETS, desc="special GetS when only DMA exists";
101 Invalidate, desc="Invalidate block";
102
103 // Responses
104 Ack, desc="Received an ack message";
105 Shared_Ack, desc="Received an ack message, responder has a shared copy";
106 Data, desc="Received a data message";
107 Shared_Data, desc="Received a data message, responder has a shared copy";
108 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
109
110 Writeback_Ack, desc="Writeback O.K. from directory";
111 Writeback_Nack, desc="Writeback not O.K. from directory";
112
113 // Triggers
114 All_acks, desc="Received all required data and message acks";
115 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
116 }
117
118 // TYPES
119
120 // STRUCTURE DEFINITIONS
121
122 MessageBuffer mandatoryQueue, ordered="false";
123
124 // CacheEntry
125 structure(Entry, desc="...", interface="AbstractCacheEntry") {
126 State CacheState, desc="cache state";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 DataBlock DataBlk, desc="data for the block";
129 bool FromL2, default="false", desc="block just moved from L2";
130 bool AtomicAccessed, default="false", desc="block just moved from L2";
131 }
132
133 // TBE fields
134 structure(TBE, desc="...") {
135 State TBEState, desc="Transient state";
136 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
137 bool Dirty, desc="Is the data dirty (different than memory)?";
138 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
139 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
140 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
141 MachineID LastResponder, desc="last machine to send a response for this request";
142 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
143 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
144 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
145 Time FirstResponseTime, default="0", desc="the time the first response was received";
146 }
147
148 external_type(TBETable) {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 TBETable TBEs, template_hack="<L1Cache_TBE>";
156
157 void set_cache_entry(AbstractCacheEntry b);
158 void unset_cache_entry();
159 void set_tbe(TBE b);
160 void unset_tbe();
161
162 Entry getCacheEntry(Address address), return_by_pointer="yes" {
163 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
164 if(is_valid(L2cache_entry)) {
165 return L2cache_entry;
166 }
167
168 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
169 if(is_valid(L1Dcache_entry)) {
170 return L1Dcache_entry;
171 }
172
173 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
174 return L1Icache_entry;
175 }
176
177 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
178 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
179 return L2cache_entry;
180 }
181
182 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
183 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
184 return L1Dcache_entry;
185 }
186
187 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
188 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
189 return L1Icache_entry;
190 }
191
192 State getState(TBE tbe, Entry cache_entry, Address addr) {
193 if(is_valid(tbe)) {
194 return tbe.TBEState;
195 } else if (is_valid(cache_entry)) {
196 return cache_entry.CacheState;
197 }
198 return State:I;
199 }
200
201 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
202 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
203 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
204 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
205
206 if (is_valid(tbe)) {
207 tbe.TBEState := state;
208 }
209
210 if (is_valid(cache_entry)) {
211 cache_entry.CacheState := state;
212 }
213 }
214
215 Event mandatory_request_type_to_event(CacheRequestType type) {
216 if (type == CacheRequestType:LD) {
217 return Event:Load;
218 } else if (type == CacheRequestType:IFETCH) {
219 return Event:Ifetch;
220 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
221 return Event:Store;
222 } else {
223 error("Invalid CacheRequestType");
224 }
225 }
226
227 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
228 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
229 //
230 // NOTE direct local hits should not call this
231 //
232 return GenericMachineType:L1Cache_wCC;
233 } else {
234 return ConvertMachToGenericMach(machineIDToMachineType(sender));
235 }
236 }
237
238 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
239 if (is_valid(cache_entry) && cache_entry.FromL2) {
240 cache_entry.FromL2 := false;
241 return GenericMachineType:L2Cache;
242 } else {
243 return GenericMachineType:L1Cache;
244 }
245 }
246
247 bool IsAtomicAccessed(Entry cache_entry) {
248 assert(is_valid(cache_entry));
249 return cache_entry.AtomicAccessed;
250 }
251
252 MessageBuffer triggerQueue, ordered="false";
253
254 // ** OUT_PORTS **
255
256 out_port(requestNetwork_out, RequestMsg, requestFromCache);
257 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
258 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
259 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
260
261 // ** IN_PORTS **
262
263 // Trigger Queue
264 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
265 if (triggerQueue_in.isReady()) {
266 peek(triggerQueue_in, TriggerMsg) {
267
268 Entry cache_entry := getCacheEntry(in_msg.Address);
269 TBE tbe := TBEs[in_msg.Address];
270
271 if (in_msg.Type == TriggerType:L2_to_L1) {
272 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
273 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
274 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
275 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
276 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
277 } else {
278 error("Unexpected message");
279 }
280 }
281 }
282 }
283
284 // Nothing from the unblock network
285
286 // Response Network
287 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
288 if (responseToCache_in.isReady()) {
289 peek(responseToCache_in, ResponseMsg, block_on="Address") {
290
291 Entry cache_entry := getCacheEntry(in_msg.Address);
292 TBE tbe := TBEs[in_msg.Address];
293
294 if (in_msg.Type == CoherenceResponseType:ACK) {
295 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
296 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
297 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
298 } else if (in_msg.Type == CoherenceResponseType:DATA) {
299 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
300 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
301 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
302 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
303 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
304 } else {
305 error("Unexpected message");
306 }
307 }
308 }
309 }
310
311 // Forward Network
312 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
313 if (forwardToCache_in.isReady()) {
314 peek(forwardToCache_in, RequestMsg, block_on="Address") {
315
316 Entry cache_entry := getCacheEntry(in_msg.Address);
317 TBE tbe := TBEs[in_msg.Address];
318
319 if (in_msg.Type == CoherenceRequestType:GETX) {
320 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
321 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
322 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
323 } else if (in_msg.Type == CoherenceRequestType:GETS) {
324 if (machineCount(MachineType:L1Cache) > 1) {
325 if (is_valid(cache_entry)) {
326 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
327 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
328 } else {
329 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
330 }
331 } else {
332 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
333 }
334 } else {
335 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
336 }
337 } else if (in_msg.Type == CoherenceRequestType:INV) {
338 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
339 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
340 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
341 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
342 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
343 } else {
344 error("Unexpected message");
345 }
346 }
347 }
348 }
349
350 // Nothing from the request network
351
352 // Mandatory Queue
353 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
354 if (mandatoryQueue_in.isReady()) {
355 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
356
357 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
358 TBE tbe := TBEs[in_msg.LineAddress];
359
360 if (in_msg.Type == CacheRequestType:IFETCH) {
361 // ** INSTRUCTION ACCESS ***
362
363 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
364 if (is_valid(L1Icache_entry)) {
365 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
366 trigger(mandatory_request_type_to_event(in_msg.Type),
367 in_msg.LineAddress, L1Icache_entry, tbe);
368 } else {
369 // Check to see if it is in the OTHER L1
370 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
371 if (is_valid(L1Dcache_entry)) {
372 // The block is in the wrong L1, try to write it to the L2
373 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
374 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
375 } else {
376 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
377 trigger(Event:L2_Replacement,
378 l2_victim_addr,
379 getL2CacheEntry(l2_victim_addr),
380 TBEs[l2_victim_addr]);
381 }
382 }
383
384 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
385 // L1 does't have the line, but we have space for it in the L1
386
387 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
388 if (is_valid(L2cache_entry)) {
389 // L2 has it (maybe not with the right permissions)
390 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
391 L2cache_entry, tbe);
392 } else {
393 // We have room, the L2 doesn't have it, so the L1 fetches the line
394 trigger(mandatory_request_type_to_event(in_msg.Type),
395 in_msg.LineAddress, L1Icache_entry, tbe);
396 }
397 } else {
398 // No room in the L1, so we need to make room
399 Address l1i_victim_addr := L1IcacheMemory.cacheProbe(in_msg.LineAddress);
400 if (L2cacheMemory.cacheAvail(l1i_victim_addr)) {
401 // The L2 has room, so we move the line from the L1 to the L2
402 trigger(Event:L1_to_L2,
403 l1i_victim_addr,
404 getL1ICacheEntry(l1i_victim_addr),
405 TBEs[l1i_victim_addr]);
406 } else {
407 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1i_victim_addr);
408 // The L2 does not have room, so we replace a line from the L2
409 trigger(Event:L2_Replacement,
410 l2_victim_addr,
411 getL2CacheEntry(l2_victim_addr),
412 TBEs[l2_victim_addr]);
413 }
414 }
415 }
416 } else {
417 // *** DATA ACCESS ***
418
419 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
420 if (is_valid(L1Dcache_entry)) {
421 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
422 trigger(mandatory_request_type_to_event(in_msg.Type),
423 in_msg.LineAddress, L1Dcache_entry, tbe);
424 } else {
425
426 // Check to see if it is in the OTHER L1
427 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
428 if (is_valid(L1Icache_entry)) {
429 // The block is in the wrong L1, try to write it to the L2
430 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
431 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
432 } else {
433 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
434 trigger(Event:L2_Replacement,
435 l2_victim_addr,
436 getL2CacheEntry(l2_victim_addr),
437 TBEs[l2_victim_addr]);
438 }
439 }
440
441 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
442 // L1 does't have the line, but we have space for it in the L1
443 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
444 if (is_valid(L2cache_entry)) {
445 // L2 has it (maybe not with the right permissions)
446 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
447 L2cache_entry, tbe);
448 } else {
449 // We have room, the L2 doesn't have it, so the L1 fetches the line
450 trigger(mandatory_request_type_to_event(in_msg.Type),
451 in_msg.LineAddress, L1Dcache_entry, tbe);
452 }
453 } else {
454 // No room in the L1, so we need to make room
455 Address l1d_victim_addr := L1DcacheMemory.cacheProbe(in_msg.LineAddress);
456 if (L2cacheMemory.cacheAvail(l1d_victim_addr)) {
457 // The L2 has room, so we move the line from the L1 to the L2
458 trigger(Event:L1_to_L2,
459 l1d_victim_addr,
460 getL1DCacheEntry(l1d_victim_addr),
461 TBEs[l1d_victim_addr]);
462 } else {
463 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1d_victim_addr);
464 // The L2 does not have room, so we replace a line from the L2
465 trigger(Event:L2_Replacement,
466 l2_victim_addr,
467 getL2CacheEntry(l2_victim_addr),
468 TBEs[l2_victim_addr]);
469 }
470 }
471 }
472 }
473 }
474 }
475 }
476
477 // ACTIONS
478
479 action(a_issueGETS, "a", desc="Issue GETS") {
480 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
481 assert(is_valid(tbe));
482 out_msg.Address := address;
483 out_msg.Type := CoherenceRequestType:GETS;
484 out_msg.Requestor := machineID;
485 out_msg.Destination.add(map_Address_to_Directory(address));
486 out_msg.MessageSize := MessageSizeType:Request_Control;
487 out_msg.InitialRequestTime := get_time();
488 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
489 }
490 }
491
492 action(b_issueGETX, "b", desc="Issue GETX") {
493 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
494 assert(is_valid(tbe));
495 out_msg.Address := address;
496 out_msg.Type := CoherenceRequestType:GETX;
497 out_msg.Requestor := machineID;
498 out_msg.Destination.add(map_Address_to_Directory(address));
499 out_msg.MessageSize := MessageSizeType:Request_Control;
500 out_msg.InitialRequestTime := get_time();
501 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
502 }
503 }
504
505 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
506 peek(forwardToCache_in, RequestMsg) {
507 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
508 assert(is_valid(cache_entry));
509 out_msg.Address := address;
510 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
511 out_msg.Sender := machineID;
512 out_msg.Destination.add(in_msg.Requestor);
513 out_msg.DataBlk := cache_entry.DataBlk;
514 out_msg.Dirty := cache_entry.Dirty;
515 if (in_msg.DirectedProbe) {
516 out_msg.Acks := machineCount(MachineType:L1Cache);
517 } else {
518 out_msg.Acks := 2;
519 }
520 out_msg.SilentAcks := in_msg.SilentAcks;
521 out_msg.MessageSize := MessageSizeType:Response_Data;
522 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
523 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
524 }
525 }
526 }
527
528 action(d_issuePUT, "d", desc="Issue PUT") {
529 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
530 out_msg.Address := address;
531 out_msg.Type := CoherenceRequestType:PUT;
532 out_msg.Requestor := machineID;
533 out_msg.Destination.add(map_Address_to_Directory(address));
534 out_msg.MessageSize := MessageSizeType:Writeback_Control;
535 }
536 }
537
538 action(e_sendData, "e", desc="Send data from cache to requestor") {
539 peek(forwardToCache_in, RequestMsg) {
540 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
541 assert(is_valid(cache_entry));
542 out_msg.Address := address;
543 out_msg.Type := CoherenceResponseType:DATA;
544 out_msg.Sender := machineID;
545 out_msg.Destination.add(in_msg.Requestor);
546 out_msg.DataBlk := cache_entry.DataBlk;
547 out_msg.Dirty := cache_entry.Dirty;
548 if (in_msg.DirectedProbe) {
549 out_msg.Acks := machineCount(MachineType:L1Cache);
550 } else {
551 out_msg.Acks := 2;
552 }
553 out_msg.SilentAcks := in_msg.SilentAcks;
554 out_msg.MessageSize := MessageSizeType:Response_Data;
555 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
556 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
557 }
558 }
559 }
560
561 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
562 peek(forwardToCache_in, RequestMsg) {
563 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
564 assert(is_valid(cache_entry));
565 out_msg.Address := address;
566 out_msg.Type := CoherenceResponseType:DATA_SHARED;
567 out_msg.Sender := machineID;
568 out_msg.Destination.add(in_msg.Requestor);
569 out_msg.DataBlk := cache_entry.DataBlk;
570 out_msg.Dirty := cache_entry.Dirty;
571 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
572 if (in_msg.DirectedProbe) {
573 out_msg.Acks := machineCount(MachineType:L1Cache);
574 } else {
575 out_msg.Acks := 2;
576 }
577 out_msg.SilentAcks := in_msg.SilentAcks;
578 out_msg.MessageSize := MessageSizeType:Response_Data;
579 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
580 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
581 }
582 }
583 }
584
585 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
586 peek(forwardToCache_in, RequestMsg) {
587 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
588 assert(is_valid(cache_entry));
589 out_msg.Address := address;
590 out_msg.Type := CoherenceResponseType:DATA_SHARED;
591 out_msg.Sender := machineID;
592 out_msg.Destination := in_msg.MergedRequestors;
593 out_msg.DataBlk := cache_entry.DataBlk;
594 out_msg.Dirty := cache_entry.Dirty;
595 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
596 out_msg.Acks := machineCount(MachineType:L1Cache);
597 out_msg.SilentAcks := in_msg.SilentAcks;
598 out_msg.MessageSize := MessageSizeType:Response_Data;
599 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
600 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
601 }
602 }
603 }
604
605 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
606 peek(forwardToCache_in, RequestMsg) {
607 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
608 out_msg.Address := address;
609 out_msg.Type := CoherenceResponseType:ACK;
610 out_msg.Sender := machineID;
611 out_msg.Destination.add(in_msg.Requestor);
612 out_msg.Acks := 1;
613 out_msg.SilentAcks := in_msg.SilentAcks;
614 assert(in_msg.DirectedProbe == false);
615 out_msg.MessageSize := MessageSizeType:Response_Control;
616 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
617 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
618 }
619 }
620 }
621
622 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
623 peek(forwardToCache_in, RequestMsg) {
624 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
625 out_msg.Address := address;
626 out_msg.Type := CoherenceResponseType:ACK_SHARED;
627 out_msg.Sender := machineID;
628 out_msg.Destination.add(in_msg.Requestor);
629 out_msg.Acks := 1;
630 out_msg.SilentAcks := in_msg.SilentAcks;
631 assert(in_msg.DirectedProbe == false);
632 out_msg.MessageSize := MessageSizeType:Response_Control;
633 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
634 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
635 }
636 }
637 }
638
639 action(g_sendUnblock, "g", desc="Send unblock to memory") {
640 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
641 out_msg.Address := address;
642 out_msg.Type := CoherenceResponseType:UNBLOCK;
643 out_msg.Sender := machineID;
644 out_msg.Destination.add(map_Address_to_Directory(address));
645 out_msg.MessageSize := MessageSizeType:Unblock_Control;
646 }
647 }
648
649 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
650 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
651 out_msg.Address := address;
652 out_msg.Type := CoherenceResponseType:UNBLOCKM;
653 out_msg.Sender := machineID;
654 out_msg.Destination.add(map_Address_to_Directory(address));
655 out_msg.MessageSize := MessageSizeType:Unblock_Control;
656 }
657 }
658
659 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
660 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
661 assert(is_valid(tbe));
662 out_msg.Address := address;
663 out_msg.Type := CoherenceResponseType:UNBLOCKS;
664 out_msg.Sender := machineID;
665 out_msg.CurOwner := tbe.CurOwner;
666 out_msg.Destination.add(map_Address_to_Directory(address));
667 out_msg.MessageSize := MessageSizeType:Unblock_Control;
668 }
669 }
670
671 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
672 assert(is_valid(cache_entry));
673 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
674 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
675 cache_entry.DataBlk);
676 }
677
678 action(hx_external_load_hit, "hx", desc="load required external msgs") {
679 assert(is_valid(cache_entry));
680 assert(is_valid(tbe));
681 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
682 peek(responseToCache_in, ResponseMsg) {
683
684 sequencer.readCallback(address,
685 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
686 cache_entry.DataBlk,
687 tbe.InitialRequestTime,
688 tbe.ForwardRequestTime,
689 tbe.FirstResponseTime);
690 }
691 }
692
693 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
694 assert(is_valid(cache_entry));
695 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
696 peek(mandatoryQueue_in, CacheMsg) {
697 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
698 cache_entry.DataBlk);
699
700 cache_entry.Dirty := true;
701 if (in_msg.Type == CacheRequestType:ATOMIC) {
702 cache_entry.AtomicAccessed := true;
703 }
704 }
705 }
706
707 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
708 assert(is_valid(cache_entry));
709 assert(is_valid(tbe));
710 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
711 peek(responseToCache_in, ResponseMsg) {
712
713 sequencer.writeCallback(address,
714 getNondirectHitMachType(address, in_msg.Sender),
715 cache_entry.DataBlk,
716 tbe.InitialRequestTime,
717 tbe.ForwardRequestTime,
718 tbe.FirstResponseTime);
719 }
720 cache_entry.Dirty := true;
721 }
722
723 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
724 assert(is_valid(cache_entry));
725 assert(is_valid(tbe));
726 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
727
728 sequencer.writeCallback(address,
729 getNondirectHitMachType(address, tbe.LastResponder),
730 cache_entry.DataBlk,
731 tbe.InitialRequestTime,
732 tbe.ForwardRequestTime,
733 tbe.FirstResponseTime);
734
735 cache_entry.Dirty := true;
736 }
737
738 action(i_allocateTBE, "i", desc="Allocate TBE") {
739 check_allocate(TBEs);
740 assert(is_valid(cache_entry));
741 TBEs.allocate(address);
742 set_tbe(TBEs[address]);
743 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
744 tbe.Dirty := cache_entry.Dirty;
745 tbe.Sharers := false;
746 }
747
748 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
749 triggerQueue_in.dequeue();
750 }
751
752 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
753 mandatoryQueue_in.dequeue();
754 }
755
756 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
757 forwardToCache_in.dequeue();
758 }
759
760 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
761 assert(is_valid(cache_entry));
762 assert(is_valid(tbe));
763 cache_entry.Dirty := tbe.Dirty;
764 cache_entry.DataBlk := tbe.DataBlk;
765 }
766
767 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
768 assert(is_valid(cache_entry));
769 assert(is_valid(tbe));
770 cache_entry.Dirty := tbe.Dirty;
771 cache_entry.DataBlk := tbe.DataBlk;
772 cache_entry.FromL2 := true;
773 }
774
775 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
776 peek(responseToCache_in, ResponseMsg) {
777 assert(in_msg.Acks > 0);
778 assert(is_valid(tbe));
779 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
780 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
781 if (tbe.AppliedSilentAcks == false) {
782 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
783 tbe.AppliedSilentAcks := true;
784 }
785 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
786 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
787 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
788 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
789 APPEND_TRANSITION_COMMENT(in_msg.Sender);
790 tbe.LastResponder := in_msg.Sender;
791 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
792 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
793 }
794 if (in_msg.InitialRequestTime != zero_time()) {
795 tbe.InitialRequestTime := in_msg.InitialRequestTime;
796 }
797 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
798 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
799 }
800 if (in_msg.ForwardRequestTime != zero_time()) {
801 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
802 }
803 if (tbe.FirstResponseTime == zero_time()) {
804 tbe.FirstResponseTime := get_time();
805 }
806 }
807 }
808 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
809 peek(responseToCache_in, ResponseMsg) {
810 assert(is_valid(tbe));
811 tbe.CurOwner := in_msg.Sender;
812 }
813 }
814
815 action(n_popResponseQueue, "n", desc="Pop response queue") {
816 responseToCache_in.dequeue();
817 }
818
819 action(ll_L2toL1Transfer, "ll", desc="") {
820 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
821 out_msg.Address := address;
822 out_msg.Type := TriggerType:L2_to_L1;
823 }
824 }
825
826 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
827 assert(is_valid(tbe));
828 if (tbe.NumPendingMsgs == 0) {
829 enqueue(triggerQueue_out, TriggerMsg) {
830 out_msg.Address := address;
831 if (tbe.Sharers) {
832 out_msg.Type := TriggerType:ALL_ACKS;
833 } else {
834 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
835 }
836 }
837 }
838 }
839
840 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
841 assert(is_valid(tbe));
842 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
843 }
844
845 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
846 assert(is_valid(tbe));
847 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
848 }
849
850 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
851 peek(forwardToCache_in, RequestMsg) {
852 assert(in_msg.Requestor != machineID);
853 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
854 assert(is_valid(tbe));
855 out_msg.Address := address;
856 out_msg.Type := CoherenceResponseType:DATA;
857 out_msg.Sender := machineID;
858 out_msg.Destination.add(in_msg.Requestor);
859 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
860 out_msg.DataBlk := tbe.DataBlk;
861 out_msg.Dirty := tbe.Dirty;
862 if (in_msg.DirectedProbe) {
863 out_msg.Acks := machineCount(MachineType:L1Cache);
864 } else {
865 out_msg.Acks := 2;
866 }
867 out_msg.SilentAcks := in_msg.SilentAcks;
868 out_msg.MessageSize := MessageSizeType:Response_Data;
869 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
870 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
871 }
872 }
873 }
874
875 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
876 peek(forwardToCache_in, RequestMsg) {
877 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
878 assert(is_valid(tbe));
879 out_msg.Address := address;
880 out_msg.Type := CoherenceResponseType:DATA;
881 out_msg.Sender := machineID;
882 out_msg.Destination := in_msg.MergedRequestors;
883 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
884 out_msg.DataBlk := tbe.DataBlk;
885 out_msg.Dirty := tbe.Dirty;
886 out_msg.Acks := machineCount(MachineType:L1Cache);
887 out_msg.SilentAcks := in_msg.SilentAcks;
888 out_msg.MessageSize := MessageSizeType:Response_Data;
889 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
890 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
891 }
892 }
893 }
894
895 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
896 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
897 assert(is_valid(tbe));
898 out_msg.Address := address;
899 out_msg.Sender := machineID;
900 out_msg.Destination.add(map_Address_to_Directory(address));
901 out_msg.Dirty := tbe.Dirty;
902 if (tbe.Dirty) {
903 out_msg.Type := CoherenceResponseType:WB_DIRTY;
904 out_msg.DataBlk := tbe.DataBlk;
905 out_msg.MessageSize := MessageSizeType:Writeback_Data;
906 } else {
907 out_msg.Type := CoherenceResponseType:WB_CLEAN;
908 // NOTE: in a real system this would not send data. We send
909 // data here only so we can check it at the memory
910 out_msg.DataBlk := tbe.DataBlk;
911 out_msg.MessageSize := MessageSizeType:Writeback_Control;
912 }
913 }
914 }
915
916 action(r_setSharerBit, "r", desc="We saw other sharers") {
917 assert(is_valid(tbe));
918 tbe.Sharers := true;
919 }
920
921 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
922 TBEs.deallocate(address);
923 unset_tbe();
924 }
925
926 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
927 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
928 assert(is_valid(tbe));
929 out_msg.Address := address;
930 out_msg.Sender := machineID;
931 out_msg.Destination.add(map_Address_to_Directory(address));
932 out_msg.DataBlk := tbe.DataBlk;
933 out_msg.Dirty := tbe.Dirty;
934 if (tbe.Dirty) {
935 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
936 out_msg.DataBlk := tbe.DataBlk;
937 out_msg.MessageSize := MessageSizeType:Writeback_Data;
938 } else {
939 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
940 // NOTE: in a real system this would not send data. We send
941 // data here only so we can check it at the memory
942 out_msg.DataBlk := tbe.DataBlk;
943 out_msg.MessageSize := MessageSizeType:Writeback_Control;
944 }
945 }
946 }
947
948 action(u_writeDataToCache, "u", desc="Write data to cache") {
949 peek(responseToCache_in, ResponseMsg) {
950 assert(is_valid(cache_entry));
951 cache_entry.DataBlk := in_msg.DataBlk;
952 cache_entry.Dirty := in_msg.Dirty;
953 }
954 }
955
956 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
957 peek(responseToCache_in, ResponseMsg) {
958 assert(is_valid(cache_entry));
959 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
960 cache_entry.DataBlk, in_msg.DataBlk);
961 assert(cache_entry.DataBlk == in_msg.DataBlk);
962 cache_entry.DataBlk := in_msg.DataBlk;
963 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
964 }
965 }
966
967 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
968 if (L1DcacheMemory.isTagPresent(address)) {
969 L1DcacheMemory.deallocate(address);
970 } else {
971 L1IcacheMemory.deallocate(address);
972 }
973 unset_cache_entry();
974 }
975
976 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
977 if (is_invalid(cache_entry)) {
978 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
979 }
980 }
981
982 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
983 if (is_invalid(cache_entry)) {
984 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
985 }
986 }
987
988 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
989 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
990 }
991
992 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
993 L2cacheMemory.deallocate(address);
994 unset_cache_entry();
995 }
996
997 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
998 peek(mandatoryQueue_in, CacheMsg) {
999 if (L1IcacheMemory.isTagPresent(address)) {
1000 L1IcacheMemory.profileMiss(in_msg);
1001 } else if (L1DcacheMemory.isTagPresent(address)) {
1002 L1DcacheMemory.profileMiss(in_msg);
1003 }
1004 if (L2cacheMemory.isTagPresent(address) == false) {
1005 L2cacheMemory.profileMiss(in_msg);
1006 }
1007 }
1008 }
1009
1010 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1011 stall_and_wait(mandatoryQueue_in, address);
1012 }
1013
1014 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1015 wake_up_dependents(address);
1016 }
1017
1018 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1019 wake_up_all_dependents();
1020 }
1021
1022 //*****************************************************
1023 // TRANSITIONS
1024 //*****************************************************
1025
1026 // Transitions for Load/Store/L2_Replacement from transient states
1027 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1028 zz_stallAndWaitMandatoryQueue;
1029 }
1030
1031 transition({M_W, MM_W}, {L2_Replacement}) {
1032 zz_stallAndWaitMandatoryQueue;
1033 }
1034
1035 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
1036 zz_stallAndWaitMandatoryQueue;
1037 }
1038
1039 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
1040 zz_stallAndWaitMandatoryQueue;
1041 }
1042
1043 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1044 // stall
1045 }
1046
1047 // Transitions moving data between the L1 and L2 caches
1048 transition({I, S, O, M, MM}, L1_to_L2) {
1049 i_allocateTBE;
1050 gg_deallocateL1CacheBlock;
1051 vv_allocateL2CacheBlock;
1052 hp_copyFromTBEToL2;
1053 s_deallocateTBE;
1054 ka_wakeUpAllDependents;
1055 }
1056
1057 transition(I, Trigger_L2_to_L1D, IT) {
1058 i_allocateTBE;
1059 rr_deallocateL2CacheBlock;
1060 ii_allocateL1DCacheBlock;
1061 nb_copyFromTBEToL1; // Not really needed for state I
1062 s_deallocateTBE;
1063 uu_profileMiss;
1064 zz_stallAndWaitMandatoryQueue;
1065 ll_L2toL1Transfer;
1066 }
1067
1068 transition(S, Trigger_L2_to_L1D, ST) {
1069 i_allocateTBE;
1070 rr_deallocateL2CacheBlock;
1071 ii_allocateL1DCacheBlock;
1072 nb_copyFromTBEToL1;
1073 s_deallocateTBE;
1074 uu_profileMiss;
1075 zz_stallAndWaitMandatoryQueue;
1076 ll_L2toL1Transfer;
1077 }
1078
1079 transition(O, Trigger_L2_to_L1D, OT) {
1080 i_allocateTBE;
1081 rr_deallocateL2CacheBlock;
1082 ii_allocateL1DCacheBlock;
1083 nb_copyFromTBEToL1;
1084 s_deallocateTBE;
1085 uu_profileMiss;
1086 zz_stallAndWaitMandatoryQueue;
1087 ll_L2toL1Transfer;
1088 }
1089
1090 transition(M, Trigger_L2_to_L1D, MT) {
1091 i_allocateTBE;
1092 rr_deallocateL2CacheBlock;
1093 ii_allocateL1DCacheBlock;
1094 nb_copyFromTBEToL1;
1095 s_deallocateTBE;
1096 uu_profileMiss;
1097 zz_stallAndWaitMandatoryQueue;
1098 ll_L2toL1Transfer;
1099 }
1100
1101 transition(MM, Trigger_L2_to_L1D, MMT) {
1102 i_allocateTBE;
1103 rr_deallocateL2CacheBlock;
1104 ii_allocateL1DCacheBlock;
1105 nb_copyFromTBEToL1;
1106 s_deallocateTBE;
1107 uu_profileMiss;
1108 zz_stallAndWaitMandatoryQueue;
1109 ll_L2toL1Transfer;
1110 }
1111
1112 transition(I, Trigger_L2_to_L1I, IT) {
1113 i_allocateTBE;
1114 rr_deallocateL2CacheBlock;
1115 jj_allocateL1ICacheBlock;
1116 nb_copyFromTBEToL1;
1117 s_deallocateTBE;
1118 uu_profileMiss;
1119 zz_stallAndWaitMandatoryQueue;
1120 ll_L2toL1Transfer;
1121 }
1122
1123 transition(S, Trigger_L2_to_L1I, ST) {
1124 i_allocateTBE;
1125 rr_deallocateL2CacheBlock;
1126 jj_allocateL1ICacheBlock;
1127 nb_copyFromTBEToL1;
1128 s_deallocateTBE;
1129 uu_profileMiss;
1130 zz_stallAndWaitMandatoryQueue;
1131 ll_L2toL1Transfer;
1132 }
1133
1134 transition(O, Trigger_L2_to_L1I, OT) {
1135 i_allocateTBE;
1136 rr_deallocateL2CacheBlock;
1137 jj_allocateL1ICacheBlock;
1138 nb_copyFromTBEToL1;
1139 s_deallocateTBE;
1140 uu_profileMiss;
1141 zz_stallAndWaitMandatoryQueue;
1142 ll_L2toL1Transfer;
1143 }
1144
1145 transition(M, Trigger_L2_to_L1I, MT) {
1146 i_allocateTBE;
1147 rr_deallocateL2CacheBlock;
1148 jj_allocateL1ICacheBlock;
1149 nb_copyFromTBEToL1;
1150 s_deallocateTBE;
1151 uu_profileMiss;
1152 zz_stallAndWaitMandatoryQueue;
1153 ll_L2toL1Transfer;
1154 }
1155
1156 transition(MM, Trigger_L2_to_L1I, MMT) {
1157 i_allocateTBE;
1158 rr_deallocateL2CacheBlock;
1159 jj_allocateL1ICacheBlock;
1160 nb_copyFromTBEToL1;
1161 s_deallocateTBE;
1162 uu_profileMiss;
1163 zz_stallAndWaitMandatoryQueue;
1164 ll_L2toL1Transfer;
1165 }
1166
1167 transition(IT, Complete_L2_to_L1, I) {
1168 j_popTriggerQueue;
1169 kd_wakeUpDependents;
1170 }
1171
1172 transition(ST, Complete_L2_to_L1, S) {
1173 j_popTriggerQueue;
1174 kd_wakeUpDependents;
1175 }
1176
1177 transition(OT, Complete_L2_to_L1, O) {
1178 j_popTriggerQueue;
1179 kd_wakeUpDependents;
1180 }
1181
1182 transition(MT, Complete_L2_to_L1, M) {
1183 j_popTriggerQueue;
1184 kd_wakeUpDependents;
1185 }
1186
1187 transition(MMT, Complete_L2_to_L1, MM) {
1188 j_popTriggerQueue;
1189 kd_wakeUpDependents;
1190 }
1191
1192 // Transitions from Idle
1193 transition(I, Load, IS) {
1194 ii_allocateL1DCacheBlock;
1195 i_allocateTBE;
1196 a_issueGETS;
1197 uu_profileMiss;
1198 k_popMandatoryQueue;
1199 }
1200
1201 transition(I, Ifetch, IS) {
1202 jj_allocateL1ICacheBlock;
1203 i_allocateTBE;
1204 a_issueGETS;
1205 uu_profileMiss;
1206 k_popMandatoryQueue;
1207 }
1208
1209 transition(I, Store, IM) {
1210 ii_allocateL1DCacheBlock;
1211 i_allocateTBE;
1212 b_issueGETX;
1213 uu_profileMiss;
1214 k_popMandatoryQueue;
1215 }
1216
1217 transition(I, L2_Replacement) {
1218 rr_deallocateL2CacheBlock;
1219 ka_wakeUpAllDependents;
1220 }
1221
1222 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1223 f_sendAck;
1224 l_popForwardQueue;
1225 }
1226
1227 // Transitions from Shared
1228 transition({S, SM, ISM}, {Load, Ifetch}) {
1229 h_load_hit;
1230 k_popMandatoryQueue;
1231 }
1232
1233 transition(S, Store, SM) {
1234 i_allocateTBE;
1235 b_issueGETX;
1236 uu_profileMiss;
1237 k_popMandatoryQueue;
1238 }
1239
1240 transition(S, L2_Replacement, I) {
1241 rr_deallocateL2CacheBlock;
1242 ka_wakeUpAllDependents;
1243 }
1244
1245 transition(S, {Other_GETX, Invalidate}, I) {
1246 f_sendAck;
1247 l_popForwardQueue;
1248 }
1249
1250 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1251 ff_sendAckShared;
1252 l_popForwardQueue;
1253 }
1254
1255 // Transitions from Owned
1256 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1257 h_load_hit;
1258 k_popMandatoryQueue;
1259 }
1260
1261 transition(O, Store, OM) {
1262 i_allocateTBE;
1263 b_issueGETX;
1264 p_decrementNumberOfMessagesByOne;
1265 uu_profileMiss;
1266 k_popMandatoryQueue;
1267 }
1268
1269 transition(O, L2_Replacement, OI) {
1270 i_allocateTBE;
1271 d_issuePUT;
1272 rr_deallocateL2CacheBlock;
1273 ka_wakeUpAllDependents;
1274 }
1275
1276 transition(O, {Other_GETX, Invalidate}, I) {
1277 e_sendData;
1278 l_popForwardQueue;
1279 }
1280
1281 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1282 ee_sendDataShared;
1283 l_popForwardQueue;
1284 }
1285
1286 transition(O, Merged_GETS) {
1287 em_sendDataSharedMultiple;
1288 l_popForwardQueue;
1289 }
1290
1291 // Transitions from Modified
1292 transition(MM, {Load, Ifetch}) {
1293 h_load_hit;
1294 k_popMandatoryQueue;
1295 }
1296
1297 transition(MM, Store) {
1298 hh_store_hit;
1299 k_popMandatoryQueue;
1300 }
1301
1302 transition(MM, L2_Replacement, MI) {
1303 i_allocateTBE;
1304 d_issuePUT;
1305 rr_deallocateL2CacheBlock;
1306 ka_wakeUpAllDependents;
1307 }
1308
1309 transition(MM, {Other_GETX, Invalidate}, I) {
1310 c_sendExclusiveData;
1311 l_popForwardQueue;
1312 }
1313
1314 transition(MM, Other_GETS, I) {
1315 c_sendExclusiveData;
1316 l_popForwardQueue;
1317 }
1318
1319 transition(MM, NC_DMA_GETS) {
1320 c_sendExclusiveData;
1321 l_popForwardQueue;
1322 }
1323
1324 transition(MM, Other_GETS_No_Mig, O) {
1325 ee_sendDataShared;
1326 l_popForwardQueue;
1327 }
1328
1329 transition(MM, Merged_GETS, O) {
1330 em_sendDataSharedMultiple;
1331 l_popForwardQueue;
1332 }
1333
1334 // Transitions from Dirty Exclusive
1335 transition(M, {Load, Ifetch}) {
1336 h_load_hit;
1337 k_popMandatoryQueue;
1338 }
1339
1340 transition(M, Store, MM) {
1341 hh_store_hit;
1342 k_popMandatoryQueue;
1343 }
1344
1345 transition(M, L2_Replacement, MI) {
1346 i_allocateTBE;
1347 d_issuePUT;
1348 rr_deallocateL2CacheBlock;
1349 ka_wakeUpAllDependents;
1350 }
1351
1352 transition(M, {Other_GETX, Invalidate}, I) {
1353 c_sendExclusiveData;
1354 l_popForwardQueue;
1355 }
1356
1357 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1358 ee_sendDataShared;
1359 l_popForwardQueue;
1360 }
1361
1362 transition(M, NC_DMA_GETS) {
1363 ee_sendDataShared;
1364 l_popForwardQueue;
1365 }
1366
1367 transition(M, Merged_GETS, O) {
1368 em_sendDataSharedMultiple;
1369 l_popForwardQueue;
1370 }
1371
1372 // Transitions from IM
1373
1374 transition(IM, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1375 f_sendAck;
1376 l_popForwardQueue;
1377 }
1378
1379 transition(IM, Ack) {
1380 m_decrementNumberOfMessages;
1381 o_checkForCompletion;
1382 n_popResponseQueue;
1383 }
1384
1385 transition(IM, Data, ISM) {
1386 u_writeDataToCache;
1387 m_decrementNumberOfMessages;
1388 o_checkForCompletion;
1389 n_popResponseQueue;
1390 }
1391
1392 transition(IM, Exclusive_Data, MM_W) {
1393 u_writeDataToCache;
1394 m_decrementNumberOfMessages;
1395 o_checkForCompletion;
1396 sx_external_store_hit;
1397 n_popResponseQueue;
1398 kd_wakeUpDependents;
1399 }
1400
1401 // Transitions from SM
1402 transition(SM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1403 ff_sendAckShared;
1404 l_popForwardQueue;
1405 }
1406
1407 transition(SM, {Other_GETX, Invalidate}, IM) {
1408 f_sendAck;
1409 l_popForwardQueue;
1410 }
1411
1412 transition(SM, Ack) {
1413 m_decrementNumberOfMessages;
1414 o_checkForCompletion;
1415 n_popResponseQueue;
1416 }
1417
1418 transition(SM, {Data, Exclusive_Data}, ISM) {
1419 v_writeDataToCacheVerify;
1420 m_decrementNumberOfMessages;
1421 o_checkForCompletion;
1422 n_popResponseQueue;
1423 }
1424
1425 // Transitions from ISM
1426 transition(ISM, Ack) {
1427 m_decrementNumberOfMessages;
1428 o_checkForCompletion;
1429 n_popResponseQueue;
1430 }
1431
1432 transition(ISM, All_acks_no_sharers, MM) {
1433 sxt_trig_ext_store_hit;
1434 gm_sendUnblockM;
1435 s_deallocateTBE;
1436 j_popTriggerQueue;
1437 kd_wakeUpDependents;
1438 }
1439
1440 // Transitions from OM
1441
1442 transition(OM, {Other_GETX, Invalidate}, IM) {
1443 e_sendData;
1444 pp_incrementNumberOfMessagesByOne;
1445 l_popForwardQueue;
1446 }
1447
1448 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1449 ee_sendDataShared;
1450 l_popForwardQueue;
1451 }
1452
1453 transition(OM, Merged_GETS) {
1454 em_sendDataSharedMultiple;
1455 l_popForwardQueue;
1456 }
1457
1458 transition(OM, Ack) {
1459 m_decrementNumberOfMessages;
1460 o_checkForCompletion;
1461 n_popResponseQueue;
1462 }
1463
1464 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1465 sxt_trig_ext_store_hit;
1466 gm_sendUnblockM;
1467 s_deallocateTBE;
1468 j_popTriggerQueue;
1469 kd_wakeUpDependents;
1470 }
1471
1472 // Transitions from IS
1473
1474 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1475 f_sendAck;
1476 l_popForwardQueue;
1477 }
1478
1479 transition(IS, Ack) {
1480 m_decrementNumberOfMessages;
1481 o_checkForCompletion;
1482 n_popResponseQueue;
1483 }
1484
1485 transition(IS, Shared_Ack) {
1486 m_decrementNumberOfMessages;
1487 r_setSharerBit;
1488 o_checkForCompletion;
1489 n_popResponseQueue;
1490 }
1491
1492 transition(IS, Data, SS) {
1493 u_writeDataToCache;
1494 m_decrementNumberOfMessages;
1495 o_checkForCompletion;
1496 hx_external_load_hit;
1497 uo_updateCurrentOwner;
1498 n_popResponseQueue;
1499 kd_wakeUpDependents;
1500 }
1501
1502 transition(IS, Exclusive_Data, M_W) {
1503 u_writeDataToCache;
1504 m_decrementNumberOfMessages;
1505 o_checkForCompletion;
1506 hx_external_load_hit;
1507 n_popResponseQueue;
1508 kd_wakeUpDependents;
1509 }
1510
1511 transition(IS, Shared_Data, SS) {
1512 u_writeDataToCache;
1513 r_setSharerBit;
1514 m_decrementNumberOfMessages;
1515 o_checkForCompletion;
1516 hx_external_load_hit;
1517 uo_updateCurrentOwner;
1518 n_popResponseQueue;
1519 kd_wakeUpDependents;
1520 }
1521
1522 // Transitions from SS
1523
1524 transition(SS, Ack) {
1525 m_decrementNumberOfMessages;
1526 o_checkForCompletion;
1527 n_popResponseQueue;
1528 }
1529
1530 transition(SS, Shared_Ack) {
1531 m_decrementNumberOfMessages;
1532 r_setSharerBit;
1533 o_checkForCompletion;
1534 n_popResponseQueue;
1535 }
1536
1537 transition(SS, All_acks, S) {
1538 gs_sendUnblockS;
1539 s_deallocateTBE;
1540 j_popTriggerQueue;
1541 kd_wakeUpDependents;
1542 }
1543
1544 transition(SS, All_acks_no_sharers, S) {
1545 // Note: The directory might still be the owner, so that is why we go to S
1546 gs_sendUnblockS;
1547 s_deallocateTBE;
1548 j_popTriggerQueue;
1549 kd_wakeUpDependents;
1550 }
1551
1552 // Transitions from MM_W
1553
1554 transition(MM_W, Store) {
1555 hh_store_hit;
1556 k_popMandatoryQueue;
1557 }
1558
1559 transition(MM_W, Ack) {
1560 m_decrementNumberOfMessages;
1561 o_checkForCompletion;
1562 n_popResponseQueue;
1563 }
1564
1565 transition(MM_W, All_acks_no_sharers, MM) {
1566 gm_sendUnblockM;
1567 s_deallocateTBE;
1568 j_popTriggerQueue;
1569 kd_wakeUpDependents;
1570 }
1571
1572 // Transitions from M_W
1573
1574 transition(M_W, Store, MM_W) {
1575 hh_store_hit;
1576 k_popMandatoryQueue;
1577 }
1578
1579 transition(M_W, Ack) {
1580 m_decrementNumberOfMessages;
1581 o_checkForCompletion;
1582 n_popResponseQueue;
1583 }
1584
1585 transition(M_W, All_acks_no_sharers, M) {
1586 gm_sendUnblockM;
1587 s_deallocateTBE;
1588 j_popTriggerQueue;
1589 kd_wakeUpDependents;
1590 }
1591
1592 // Transitions from OI/MI
1593
1594 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1595 q_sendDataFromTBEToCache;
1596 l_popForwardQueue;
1597 }
1598
1599 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1600 q_sendDataFromTBEToCache;
1601 l_popForwardQueue;
1602 }
1603
1604 transition({OI, MI}, Merged_GETS, OI) {
1605 qm_sendDataFromTBEToCache;
1606 l_popForwardQueue;
1607 }
1608
1609 transition(MI, Writeback_Ack, I) {
1610 t_sendExclusiveDataFromTBEToMemory;
1611 s_deallocateTBE;
1612 l_popForwardQueue;
1613 kd_wakeUpDependents;
1614 }
1615
1616 transition(OI, Writeback_Ack, I) {
1617 qq_sendDataFromTBEToMemory;
1618 s_deallocateTBE;
1619 l_popForwardQueue;
1620 kd_wakeUpDependents;
1621 }
1622
1623 // Transitions from II
1624 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1625 f_sendAck;
1626 l_popForwardQueue;
1627 }
1628
1629 transition(II, Writeback_Ack, I) {
1630 g_sendUnblock;
1631 s_deallocateTBE;
1632 l_popForwardQueue;
1633 kd_wakeUpDependents;
1634 }
1635
1636 transition(II, Writeback_Nack, I) {
1637 s_deallocateTBE;
1638 l_popForwardQueue;
1639 kd_wakeUpDependents;
1640 }
1641 }