2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
32 * Authors: Milo Martin
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer;
38 CacheMemory * L1Icache;
39 CacheMemory * L1Dcache;
40 CacheMemory * L2cache;
41 Cycles cache_response_latency := 10;
42 Cycles issue_latency := 2;
43 Cycles l2_cache_hit_latency := 10;
44 bool no_mig_atomic := "True";
48 MessageBuffer * requestFromCache, network="To", virtual_network="2",
50 MessageBuffer * responseFromCache, network="To", virtual_network="4",
52 MessageBuffer * unblockFromCache, network="To", virtual_network="5",
55 MessageBuffer * forwardToCache, network="From", virtual_network="3",
57 MessageBuffer * responseToCache, network="From", virtual_network="4",
60 MessageBuffer * mandatoryQueue;
62 MessageBuffer * triggerQueue;
65 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
67 I, AccessPermission:Invalid, desc="Idle";
68 S, AccessPermission:Read_Only, desc="Shared";
69 O, AccessPermission:Read_Only, desc="Owned";
70 M, AccessPermission:Read_Only, desc="Modified (dirty)";
71 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
73 // Base states, locked and ready to service the mandatory queue
74 IR, AccessPermission:Invalid, desc="Idle";
75 SR, AccessPermission:Read_Only, desc="Shared";
76 OR, AccessPermission:Read_Only, desc="Owned";
77 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
78 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
81 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
82 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
83 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
84 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
85 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
86 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
87 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
88 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
89 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
90 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
91 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
92 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
93 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
94 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
95 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
97 //Transition States Related to Flushing
98 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
99 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
100 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
101 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
102 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
103 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
104 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
108 enumeration(Event, desc="Cache events") {
109 Load, desc="Load request from the processor";
110 Ifetch, desc="I-fetch request from the processor";
111 Store, desc="Store request from the processor";
112 L2_Replacement, desc="L2 Replacement";
113 L1_to_L2, desc="L1 to L2 transfer";
114 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
115 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
116 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
119 Other_GETX, desc="A GetX from another processor";
120 Other_GETS, desc="A GetS from another processor";
121 Merged_GETS, desc="A Merged GetS from another processor";
122 Other_GETS_No_Mig, desc="A GetS from another processor";
123 NC_DMA_GETS, desc="special GetS when only DMA exists";
124 Invalidate, desc="Invalidate block";
127 Ack, desc="Received an ack message";
128 Shared_Ack, desc="Received an ack message, responder has a shared copy";
129 Data, desc="Received a data message";
130 Shared_Data, desc="Received a data message, responder has a shared copy";
131 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
133 Writeback_Ack, desc="Writeback O.K. from directory";
134 Writeback_Nack, desc="Writeback not O.K. from directory";
137 All_acks, desc="Received all required data and message acks";
138 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
141 Flush_line, desc="flush the cache line from all caches";
142 Block_Ack, desc="the directory is blocked and ready for the flush";
145 // STRUCTURE DEFINITIONS
147 structure(Entry, desc="...", interface="AbstractCacheEntry") {
148 State CacheState, desc="cache state";
149 bool Dirty, desc="Is the data dirty (different than memory)?";
150 DataBlock DataBlk, desc="data for the block";
151 bool FromL2, default="false", desc="block just moved from L2";
152 bool AtomicAccessed, default="false", desc="block just moved from L2";
156 structure(TBE, desc="...") {
157 State TBEState, desc="Transient state";
158 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
159 bool Dirty, desc="Is the data dirty (different than memory)?";
160 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
161 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
162 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
163 MachineID LastResponder, desc="last machine to send a response for this request";
164 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
166 Cycles InitialRequestTime, default="Cycles(0)",
167 desc="time the initial requests was sent from the L1Cache";
168 Cycles ForwardRequestTime, default="Cycles(0)",
169 desc="time the dir forwarded the request";
170 Cycles FirstResponseTime, default="Cycles(0)",
171 desc="the time the first response was received";
174 structure(TBETable, external="yes") {
177 void deallocate(Addr);
178 bool isPresent(Addr);
181 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
184 void set_cache_entry(AbstractCacheEntry b);
185 void unset_cache_entry();
188 void wakeUpAllBuffers();
189 void wakeUpBuffers(Addr a);
192 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
193 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
194 if(is_valid(L2cache_entry)) {
195 return L2cache_entry;
198 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
199 if(is_valid(L1Dcache_entry)) {
200 return L1Dcache_entry;
203 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
204 return L1Icache_entry;
207 void functionalRead(Addr addr, Packet *pkt) {
208 Entry cache_entry := getCacheEntry(addr);
209 if(is_valid(cache_entry)) {
210 testAndRead(addr, cache_entry.DataBlk, pkt);
212 TBE tbe := TBEs[addr];
214 testAndRead(addr, tbe.DataBlk, pkt);
216 error("Missing data block");
221 int functionalWrite(Addr addr, Packet *pkt) {
222 int num_functional_writes := 0;
224 Entry cache_entry := getCacheEntry(addr);
225 if(is_valid(cache_entry)) {
226 num_functional_writes := num_functional_writes +
227 testAndWrite(addr, cache_entry.DataBlk, pkt);
228 return num_functional_writes;
231 TBE tbe := TBEs[addr];
232 num_functional_writes := num_functional_writes +
233 testAndWrite(addr, tbe.DataBlk, pkt);
234 return num_functional_writes;
237 Entry getL2CacheEntry(Addr address), return_by_pointer="yes" {
238 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
239 return L2cache_entry;
242 Entry getL1DCacheEntry(Addr address), return_by_pointer="yes" {
243 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
244 return L1Dcache_entry;
247 Entry getL1ICacheEntry(Addr address), return_by_pointer="yes" {
248 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
249 return L1Icache_entry;
252 State getState(TBE tbe, Entry cache_entry, Addr addr) {
255 } else if (is_valid(cache_entry)) {
256 return cache_entry.CacheState;
261 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
262 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
263 assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
264 assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
267 tbe.TBEState := state;
270 if (is_valid(cache_entry)) {
271 cache_entry.CacheState := state;
275 AccessPermission getAccessPermission(Addr addr) {
276 TBE tbe := TBEs[addr];
278 return L1Cache_State_to_permission(tbe.TBEState);
281 Entry cache_entry := getCacheEntry(addr);
282 if(is_valid(cache_entry)) {
283 return L1Cache_State_to_permission(cache_entry.CacheState);
286 return AccessPermission:NotPresent;
289 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
290 if (is_valid(cache_entry)) {
291 cache_entry.changePermission(L1Cache_State_to_permission(state));
295 Event mandatory_request_type_to_event(RubyRequestType type) {
296 if (type == RubyRequestType:LD) {
298 } else if (type == RubyRequestType:IFETCH) {
300 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
302 } else if ((type == RubyRequestType:FLUSH)) {
303 return Event:Flush_line;
305 error("Invalid RubyRequestType");
309 MachineType testAndClearLocalHit(Entry cache_entry) {
310 if (is_valid(cache_entry) && cache_entry.FromL2) {
311 cache_entry.FromL2 := false;
312 return MachineType:L2Cache;
314 return MachineType:L1Cache;
317 bool IsAtomicAccessed(Entry cache_entry) {
318 assert(is_valid(cache_entry));
319 return cache_entry.AtomicAccessed;
323 out_port(requestNetwork_out, RequestMsg, requestFromCache);
324 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
325 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
326 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
331 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
332 if (triggerQueue_in.isReady(clockEdge())) {
333 peek(triggerQueue_in, TriggerMsg) {
335 Entry cache_entry := getCacheEntry(in_msg.addr);
336 TBE tbe := TBEs[in_msg.addr];
338 if (in_msg.Type == TriggerType:L2_to_L1) {
339 trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
340 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
341 trigger(Event:All_acks, in_msg.addr, cache_entry, tbe);
342 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
343 trigger(Event:All_acks_no_sharers, in_msg.addr, cache_entry, tbe);
345 error("Unexpected message");
351 // Nothing from the unblock network
354 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
355 if (responseToCache_in.isReady(clockEdge())) {
356 peek(responseToCache_in, ResponseMsg, block_on="addr") {
358 Entry cache_entry := getCacheEntry(in_msg.addr);
359 TBE tbe := TBEs[in_msg.addr];
361 if (in_msg.Type == CoherenceResponseType:ACK) {
362 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
363 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
364 trigger(Event:Shared_Ack, in_msg.addr, cache_entry, tbe);
365 } else if (in_msg.Type == CoherenceResponseType:DATA) {
366 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
367 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
368 trigger(Event:Shared_Data, in_msg.addr, cache_entry, tbe);
369 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
370 trigger(Event:Exclusive_Data, in_msg.addr, cache_entry, tbe);
372 error("Unexpected message");
379 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
380 if (forwardToCache_in.isReady(clockEdge())) {
381 peek(forwardToCache_in, RequestMsg, block_on="addr") {
383 Entry cache_entry := getCacheEntry(in_msg.addr);
384 TBE tbe := TBEs[in_msg.addr];
386 if ((in_msg.Type == CoherenceRequestType:GETX) ||
387 (in_msg.Type == CoherenceRequestType:GETF)) {
388 trigger(Event:Other_GETX, in_msg.addr, cache_entry, tbe);
389 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
390 trigger(Event:Merged_GETS, in_msg.addr, cache_entry, tbe);
391 } else if (in_msg.Type == CoherenceRequestType:GETS) {
392 if (machineCount(MachineType:L1Cache) > 1) {
393 if (is_valid(cache_entry)) {
394 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
395 trigger(Event:Other_GETS_No_Mig, in_msg.addr, cache_entry, tbe);
397 trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
400 trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
403 trigger(Event:NC_DMA_GETS, in_msg.addr, cache_entry, tbe);
405 } else if (in_msg.Type == CoherenceRequestType:INV) {
406 trigger(Event:Invalidate, in_msg.addr, cache_entry, tbe);
407 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
408 trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
409 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
410 trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
411 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
412 trigger(Event:Block_Ack, in_msg.addr, cache_entry, tbe);
414 error("Unexpected message");
420 // Nothing from the request network
423 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
424 if (mandatoryQueue_in.isReady(clockEdge())) {
425 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
427 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
428 TBE tbe := TBEs[in_msg.LineAddress];
430 if (in_msg.Type == RubyRequestType:IFETCH) {
431 // ** INSTRUCTION ACCESS ***
433 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
434 if (is_valid(L1Icache_entry)) {
435 // The tag matches for the L1, so the L1 fetches the line.
436 // We know it can't be in the L2 due to exclusion
437 trigger(mandatory_request_type_to_event(in_msg.Type),
438 in_msg.LineAddress, L1Icache_entry, tbe);
440 // Check to see if it is in the OTHER L1
441 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
442 if (is_valid(L1Dcache_entry)) {
443 // The block is in the wrong L1, try to write it to the L2
444 if (L2cache.cacheAvail(in_msg.LineAddress)) {
445 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
447 Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
448 trigger(Event:L2_Replacement,
450 getL2CacheEntry(l2_victim_addr),
451 TBEs[l2_victim_addr]);
455 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
456 // L1 does't have the line, but we have space for it in the L1
458 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
459 if (is_valid(L2cache_entry)) {
460 // L2 has it (maybe not with the right permissions)
461 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
464 // We have room, the L2 doesn't have it, so the L1 fetches the line
465 trigger(mandatory_request_type_to_event(in_msg.Type),
466 in_msg.LineAddress, L1Icache_entry, tbe);
469 // No room in the L1, so we need to make room
470 Addr l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
471 if (L2cache.cacheAvail(l1i_victim_addr)) {
472 // The L2 has room, so we move the line from the L1 to the L2
473 trigger(Event:L1_to_L2,
475 getL1ICacheEntry(l1i_victim_addr),
476 TBEs[l1i_victim_addr]);
478 Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
479 // The L2 does not have room, so we replace a line from the L2
480 trigger(Event:L2_Replacement,
482 getL2CacheEntry(l2_victim_addr),
483 TBEs[l2_victim_addr]);
488 // *** DATA ACCESS ***
490 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
491 if (is_valid(L1Dcache_entry)) {
492 // The tag matches for the L1, so the L1 fetches the line.
493 // We know it can't be in the L2 due to exclusion
494 trigger(mandatory_request_type_to_event(in_msg.Type),
495 in_msg.LineAddress, L1Dcache_entry, tbe);
498 // Check to see if it is in the OTHER L1
499 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
500 if (is_valid(L1Icache_entry)) {
501 // The block is in the wrong L1, try to write it to the L2
502 if (L2cache.cacheAvail(in_msg.LineAddress)) {
503 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
505 Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
506 trigger(Event:L2_Replacement,
508 getL2CacheEntry(l2_victim_addr),
509 TBEs[l2_victim_addr]);
513 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
514 // L1 does't have the line, but we have space for it in the L1
515 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
516 if (is_valid(L2cache_entry)) {
517 // L2 has it (maybe not with the right permissions)
518 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
521 // We have room, the L2 doesn't have it, so the L1 fetches the line
522 trigger(mandatory_request_type_to_event(in_msg.Type),
523 in_msg.LineAddress, L1Dcache_entry, tbe);
526 // No room in the L1, so we need to make room
527 Addr l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
528 if (L2cache.cacheAvail(l1d_victim_addr)) {
529 // The L2 has room, so we move the line from the L1 to the L2
530 trigger(Event:L1_to_L2,
532 getL1DCacheEntry(l1d_victim_addr),
533 TBEs[l1d_victim_addr]);
535 Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
536 // The L2 does not have room, so we replace a line from the L2
537 trigger(Event:L2_Replacement,
539 getL2CacheEntry(l2_victim_addr),
540 TBEs[l2_victim_addr]);
551 action(a_issueGETS, "a", desc="Issue GETS") {
552 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
553 assert(is_valid(tbe));
554 out_msg.addr := address;
555 out_msg.Type := CoherenceRequestType:GETS;
556 out_msg.Requestor := machineID;
557 out_msg.Destination.add(map_Address_to_Directory(address));
558 out_msg.MessageSize := MessageSizeType:Request_Control;
559 out_msg.InitialRequestTime := curCycle();
561 // One from each other cache (n-1) plus the memory (+1)
562 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
566 action(b_issueGETX, "b", desc="Issue GETX") {
567 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
568 assert(is_valid(tbe));
569 out_msg.addr := address;
570 out_msg.Type := CoherenceRequestType:GETX;
571 out_msg.Requestor := machineID;
572 out_msg.Destination.add(map_Address_to_Directory(address));
573 out_msg.MessageSize := MessageSizeType:Request_Control;
574 out_msg.InitialRequestTime := curCycle();
576 // One from each other cache (n-1) plus the memory (+1)
577 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
581 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
582 if (machineCount(MachineType:L1Cache) > 1) {
583 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
584 assert(is_valid(tbe));
585 out_msg.addr := address;
586 out_msg.Type := CoherenceRequestType:GETX;
587 out_msg.Requestor := machineID;
588 out_msg.Destination.add(map_Address_to_Directory(address));
589 out_msg.MessageSize := MessageSizeType:Request_Control;
590 out_msg.InitialRequestTime := curCycle();
594 // One from each other cache (n-1) plus the memory (+1)
595 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
598 action(bf_issueGETF, "bf", desc="Issue GETF") {
599 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
600 assert(is_valid(tbe));
601 out_msg.addr := address;
602 out_msg.Type := CoherenceRequestType:GETF;
603 out_msg.Requestor := machineID;
604 out_msg.Destination.add(map_Address_to_Directory(address));
605 out_msg.MessageSize := MessageSizeType:Request_Control;
606 out_msg.InitialRequestTime := curCycle();
608 // One from each other cache (n-1) plus the memory (+1)
609 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
613 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
614 peek(forwardToCache_in, RequestMsg) {
615 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
616 assert(is_valid(cache_entry));
617 out_msg.addr := address;
618 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
619 out_msg.Sender := machineID;
620 out_msg.Destination.add(in_msg.Requestor);
621 out_msg.DataBlk := cache_entry.DataBlk;
622 out_msg.Dirty := cache_entry.Dirty;
623 if (in_msg.DirectedProbe) {
624 out_msg.Acks := machineCount(MachineType:L1Cache);
628 out_msg.SilentAcks := in_msg.SilentAcks;
629 out_msg.MessageSize := MessageSizeType:Response_Data;
630 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
631 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
636 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
637 peek(forwardToCache_in, RequestMsg) {
638 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
639 assert(is_valid(tbe));
640 out_msg.addr := address;
641 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
642 out_msg.Sender := machineID;
643 out_msg.Destination.add(in_msg.Requestor);
644 out_msg.DataBlk := tbe.DataBlk;
645 out_msg.Dirty := tbe.Dirty;
646 if (in_msg.DirectedProbe) {
647 out_msg.Acks := machineCount(MachineType:L1Cache);
651 out_msg.SilentAcks := in_msg.SilentAcks;
652 out_msg.MessageSize := MessageSizeType:Response_Data;
653 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
654 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
659 action(d_issuePUT, "d", desc="Issue PUT") {
660 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
661 out_msg.addr := address;
662 out_msg.Type := CoherenceRequestType:PUT;
663 out_msg.Requestor := machineID;
664 out_msg.Destination.add(map_Address_to_Directory(address));
665 out_msg.MessageSize := MessageSizeType:Writeback_Control;
669 action(df_issuePUTF, "df", desc="Issue PUTF") {
670 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
671 out_msg.addr := address;
672 out_msg.Type := CoherenceRequestType:PUTF;
673 out_msg.Requestor := machineID;
674 out_msg.Destination.add(map_Address_to_Directory(address));
675 out_msg.MessageSize := MessageSizeType:Writeback_Control;
679 action(e_sendData, "e", desc="Send data from cache to requestor") {
680 peek(forwardToCache_in, RequestMsg) {
681 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
682 assert(is_valid(cache_entry));
683 out_msg.addr := address;
684 out_msg.Type := CoherenceResponseType:DATA;
685 out_msg.Sender := machineID;
686 out_msg.Destination.add(in_msg.Requestor);
687 out_msg.DataBlk := cache_entry.DataBlk;
688 out_msg.Dirty := cache_entry.Dirty;
689 if (in_msg.DirectedProbe) {
690 out_msg.Acks := machineCount(MachineType:L1Cache);
694 out_msg.SilentAcks := in_msg.SilentAcks;
695 out_msg.MessageSize := MessageSizeType:Response_Data;
696 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
697 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
702 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
703 peek(forwardToCache_in, RequestMsg) {
704 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
705 assert(is_valid(cache_entry));
706 out_msg.addr := address;
707 out_msg.Type := CoherenceResponseType:DATA_SHARED;
708 out_msg.Sender := machineID;
709 out_msg.Destination.add(in_msg.Requestor);
710 out_msg.DataBlk := cache_entry.DataBlk;
711 out_msg.Dirty := cache_entry.Dirty;
712 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
713 if (in_msg.DirectedProbe) {
714 out_msg.Acks := machineCount(MachineType:L1Cache);
718 out_msg.SilentAcks := in_msg.SilentAcks;
719 out_msg.MessageSize := MessageSizeType:Response_Data;
720 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
721 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
726 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
727 peek(forwardToCache_in, RequestMsg) {
728 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
729 assert(is_valid(tbe));
730 out_msg.addr := address;
731 out_msg.Type := CoherenceResponseType:DATA_SHARED;
732 out_msg.Sender := machineID;
733 out_msg.Destination.add(in_msg.Requestor);
734 out_msg.DataBlk := tbe.DataBlk;
735 out_msg.Dirty := tbe.Dirty;
736 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
737 if (in_msg.DirectedProbe) {
738 out_msg.Acks := machineCount(MachineType:L1Cache);
742 out_msg.SilentAcks := in_msg.SilentAcks;
743 out_msg.MessageSize := MessageSizeType:Response_Data;
744 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
745 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
750 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
751 peek(forwardToCache_in, RequestMsg) {
752 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
753 assert(is_valid(cache_entry));
754 out_msg.addr := address;
755 out_msg.Type := CoherenceResponseType:DATA_SHARED;
756 out_msg.Sender := machineID;
757 out_msg.Destination := in_msg.MergedRequestors;
758 out_msg.DataBlk := cache_entry.DataBlk;
759 out_msg.Dirty := cache_entry.Dirty;
760 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
761 out_msg.Acks := machineCount(MachineType:L1Cache);
762 out_msg.SilentAcks := in_msg.SilentAcks;
763 out_msg.MessageSize := MessageSizeType:Response_Data;
764 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
765 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
770 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
771 peek(forwardToCache_in, RequestMsg) {
772 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
773 assert(is_valid(tbe));
774 out_msg.addr := address;
775 out_msg.Type := CoherenceResponseType:DATA_SHARED;
776 out_msg.Sender := machineID;
777 out_msg.Destination := in_msg.MergedRequestors;
778 out_msg.DataBlk := tbe.DataBlk;
779 out_msg.Dirty := tbe.Dirty;
780 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
781 out_msg.Acks := machineCount(MachineType:L1Cache);
782 out_msg.SilentAcks := in_msg.SilentAcks;
783 out_msg.MessageSize := MessageSizeType:Response_Data;
784 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
785 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
790 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
791 peek(forwardToCache_in, RequestMsg) {
792 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
793 out_msg.addr := address;
794 out_msg.Type := CoherenceResponseType:ACK;
795 out_msg.Sender := machineID;
796 out_msg.Destination.add(in_msg.Requestor);
798 out_msg.SilentAcks := in_msg.SilentAcks;
799 assert(in_msg.DirectedProbe == false);
800 out_msg.MessageSize := MessageSizeType:Response_Control;
801 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
802 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
807 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
808 peek(forwardToCache_in, RequestMsg) {
809 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
810 out_msg.addr := address;
811 out_msg.Type := CoherenceResponseType:ACK_SHARED;
812 out_msg.Sender := machineID;
813 out_msg.Destination.add(in_msg.Requestor);
815 out_msg.SilentAcks := in_msg.SilentAcks;
816 assert(in_msg.DirectedProbe == false);
817 out_msg.MessageSize := MessageSizeType:Response_Control;
818 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
819 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
824 action(g_sendUnblock, "g", desc="Send unblock to memory") {
825 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
826 out_msg.addr := address;
827 out_msg.Type := CoherenceResponseType:UNBLOCK;
828 out_msg.Sender := machineID;
829 out_msg.Destination.add(map_Address_to_Directory(address));
830 out_msg.MessageSize := MessageSizeType:Unblock_Control;
834 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
835 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
836 out_msg.addr := address;
837 out_msg.Type := CoherenceResponseType:UNBLOCKM;
838 out_msg.Sender := machineID;
839 out_msg.Destination.add(map_Address_to_Directory(address));
840 out_msg.MessageSize := MessageSizeType:Unblock_Control;
844 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
845 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
846 assert(is_valid(tbe));
847 out_msg.addr := address;
848 out_msg.Type := CoherenceResponseType:UNBLOCKS;
849 out_msg.Sender := machineID;
850 out_msg.CurOwner := tbe.CurOwner;
851 out_msg.Destination.add(map_Address_to_Directory(address));
852 out_msg.MessageSize := MessageSizeType:Unblock_Control;
856 action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
857 assert(is_valid(cache_entry));
858 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
859 L1Dcache.setMRU(cache_entry);
860 sequencer.readCallback(address, cache_entry.DataBlk, false,
861 testAndClearLocalHit(cache_entry));
864 action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
865 assert(is_valid(cache_entry));
866 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
867 L1Icache.setMRU(cache_entry);
868 sequencer.readCallback(address, cache_entry.DataBlk, false,
869 testAndClearLocalHit(cache_entry));
872 action(hx_external_load_hit, "hx", desc="load required external msgs") {
873 assert(is_valid(cache_entry));
874 assert(is_valid(tbe));
875 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
876 peek(responseToCache_in, ResponseMsg) {
877 L1Icache.setMRU(address);
878 L1Dcache.setMRU(address);
879 sequencer.readCallback(address, cache_entry.DataBlk, true,
880 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
881 tbe.ForwardRequestTime, tbe.FirstResponseTime);
885 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
886 assert(is_valid(cache_entry));
887 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
888 peek(mandatoryQueue_in, RubyRequest) {
889 L1Dcache.setMRU(cache_entry);
890 sequencer.writeCallback(address, cache_entry.DataBlk, false,
891 testAndClearLocalHit(cache_entry));
893 cache_entry.Dirty := true;
894 if (in_msg.Type == RubyRequestType:ATOMIC) {
895 cache_entry.AtomicAccessed := true;
900 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
901 assert(is_valid(tbe));
902 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
903 sequencer.writeCallback(address, tbe.DataBlk, false, MachineType:L1Cache);
906 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
907 assert(is_valid(cache_entry));
908 assert(is_valid(tbe));
909 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
910 peek(responseToCache_in, ResponseMsg) {
911 L1Icache.setMRU(address);
912 L1Dcache.setMRU(address);
913 sequencer.writeCallback(address, cache_entry.DataBlk, true,
914 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
915 tbe.ForwardRequestTime, tbe.FirstResponseTime);
917 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
918 cache_entry.Dirty := true;
921 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
922 assert(is_valid(cache_entry));
923 assert(is_valid(tbe));
924 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
925 L1Icache.setMRU(address);
926 L1Dcache.setMRU(address);
927 sequencer.writeCallback(address, cache_entry.DataBlk, true,
928 machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
929 tbe.ForwardRequestTime, tbe.FirstResponseTime);
931 cache_entry.Dirty := true;
934 action(i_allocateTBE, "i", desc="Allocate TBE") {
935 check_allocate(TBEs);
936 assert(is_valid(cache_entry));
937 TBEs.allocate(address);
938 set_tbe(TBEs[address]);
939 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
940 tbe.Dirty := cache_entry.Dirty;
941 tbe.Sharers := false;
944 action(it_allocateTBE, "it", desc="Allocate TBE") {
945 check_allocate(TBEs);
946 TBEs.allocate(address);
947 set_tbe(TBEs[address]);
949 tbe.Sharers := false;
952 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
953 triggerQueue_in.dequeue(clockEdge());
956 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
957 mandatoryQueue_in.dequeue(clockEdge());
960 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
961 forwardToCache_in.dequeue(clockEdge());
964 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
965 assert(is_valid(cache_entry));
966 assert(is_valid(tbe));
967 cache_entry.Dirty := tbe.Dirty;
968 cache_entry.DataBlk := tbe.DataBlk;
971 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
972 assert(is_valid(cache_entry));
973 assert(is_valid(tbe));
974 cache_entry.Dirty := tbe.Dirty;
975 cache_entry.DataBlk := tbe.DataBlk;
976 cache_entry.FromL2 := true;
979 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
980 peek(responseToCache_in, ResponseMsg) {
981 assert(in_msg.Acks >= 0);
982 assert(is_valid(tbe));
983 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
984 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
985 if (tbe.AppliedSilentAcks == false) {
986 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
987 tbe.AppliedSilentAcks := true;
989 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
990 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
991 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
992 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
993 APPEND_TRANSITION_COMMENT(in_msg.Sender);
994 tbe.LastResponder := in_msg.Sender;
995 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
996 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
998 if (in_msg.InitialRequestTime != zero_time()) {
999 tbe.InitialRequestTime := in_msg.InitialRequestTime;
1001 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
1002 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
1004 if (in_msg.ForwardRequestTime != zero_time()) {
1005 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
1007 if (tbe.FirstResponseTime == zero_time()) {
1008 tbe.FirstResponseTime := curCycle();
1012 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
1013 peek(responseToCache_in, ResponseMsg) {
1014 assert(is_valid(tbe));
1015 tbe.CurOwner := in_msg.Sender;
1019 action(n_popResponseQueue, "n", desc="Pop response queue") {
1020 responseToCache_in.dequeue(clockEdge());
1023 action(ll_L2toL1Transfer, "ll", desc="") {
1024 enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
1025 out_msg.addr := address;
1026 out_msg.Type := TriggerType:L2_to_L1;
1030 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1031 assert(is_valid(tbe));
1032 if (tbe.NumPendingMsgs == 0) {
1033 enqueue(triggerQueue_out, TriggerMsg) {
1034 out_msg.addr := address;
1036 out_msg.Type := TriggerType:ALL_ACKS;
1038 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1044 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1045 assert(is_valid(tbe));
1046 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1049 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1050 assert(is_valid(tbe));
1051 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1054 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1055 peek(forwardToCache_in, RequestMsg) {
1056 assert(in_msg.Requestor != machineID);
1057 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1058 assert(is_valid(tbe));
1059 out_msg.addr := address;
1060 out_msg.Type := CoherenceResponseType:DATA;
1061 out_msg.Sender := machineID;
1062 out_msg.Destination.add(in_msg.Requestor);
1063 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1064 out_msg.DataBlk := tbe.DataBlk;
1065 out_msg.Dirty := tbe.Dirty;
1066 if (in_msg.DirectedProbe) {
1067 out_msg.Acks := machineCount(MachineType:L1Cache);
1071 out_msg.SilentAcks := in_msg.SilentAcks;
1072 out_msg.MessageSize := MessageSizeType:Response_Data;
1073 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1074 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1079 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1080 peek(forwardToCache_in, RequestMsg) {
1081 assert(in_msg.Requestor != machineID);
1082 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1083 assert(is_valid(tbe));
1084 out_msg.addr := address;
1085 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1086 out_msg.Sender := machineID;
1087 out_msg.Destination.add(in_msg.Requestor);
1088 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1089 out_msg.DataBlk := tbe.DataBlk;
1090 out_msg.Dirty := tbe.Dirty;
1091 if (in_msg.DirectedProbe) {
1092 out_msg.Acks := machineCount(MachineType:L1Cache);
1096 out_msg.SilentAcks := in_msg.SilentAcks;
1097 out_msg.MessageSize := MessageSizeType:Response_Data;
1098 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1099 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1104 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1105 peek(forwardToCache_in, RequestMsg) {
1106 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1107 assert(is_valid(tbe));
1108 out_msg.addr := address;
1109 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1110 out_msg.Sender := machineID;
1111 out_msg.Destination := in_msg.MergedRequestors;
1112 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1113 out_msg.DataBlk := tbe.DataBlk;
1114 out_msg.Dirty := tbe.Dirty;
1115 out_msg.Acks := machineCount(MachineType:L1Cache);
1116 out_msg.SilentAcks := in_msg.SilentAcks;
1117 out_msg.MessageSize := MessageSizeType:Response_Data;
1118 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1119 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1124 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1125 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1126 assert(is_valid(tbe));
1127 out_msg.addr := address;
1128 out_msg.Sender := machineID;
1129 out_msg.Destination.add(map_Address_to_Directory(address));
1130 out_msg.Dirty := tbe.Dirty;
1132 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1133 out_msg.DataBlk := tbe.DataBlk;
1134 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1136 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1137 // NOTE: in a real system this would not send data. We send
1138 // data here only so we can check it at the memory
1139 out_msg.DataBlk := tbe.DataBlk;
1140 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1145 action(r_setSharerBit, "r", desc="We saw other sharers") {
1146 assert(is_valid(tbe));
1147 tbe.Sharers := true;
1150 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1151 TBEs.deallocate(address);
1155 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1156 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1157 assert(is_valid(tbe));
1158 out_msg.addr := address;
1159 out_msg.Sender := machineID;
1160 out_msg.Destination.add(map_Address_to_Directory(address));
1161 out_msg.DataBlk := tbe.DataBlk;
1162 out_msg.Dirty := tbe.Dirty;
1164 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1165 out_msg.DataBlk := tbe.DataBlk;
1166 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1168 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1169 // NOTE: in a real system this would not send data. We send
1170 // data here only so we can check it at the memory
1171 out_msg.DataBlk := tbe.DataBlk;
1172 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1177 action(u_writeDataToCache, "u", desc="Write data to cache") {
1178 peek(responseToCache_in, ResponseMsg) {
1179 assert(is_valid(cache_entry));
1180 cache_entry.DataBlk := in_msg.DataBlk;
1181 cache_entry.Dirty := in_msg.Dirty;
1185 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1186 peek(responseToCache_in, ResponseMsg) {
1187 assert(is_valid(tbe));
1188 tbe.DataBlk := in_msg.DataBlk;
1189 tbe.Dirty := in_msg.Dirty;
1193 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1194 peek(responseToCache_in, ResponseMsg) {
1195 assert(is_valid(cache_entry));
1196 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1197 cache_entry.DataBlk, in_msg.DataBlk);
1198 assert(cache_entry.DataBlk == in_msg.DataBlk);
1199 cache_entry.DataBlk := in_msg.DataBlk;
1200 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1204 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1205 peek(responseToCache_in, ResponseMsg) {
1206 assert(is_valid(tbe));
1207 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1208 tbe.DataBlk, in_msg.DataBlk);
1209 assert(tbe.DataBlk == in_msg.DataBlk);
1210 tbe.DataBlk := in_msg.DataBlk;
1211 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1215 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1216 if (L1Dcache.isTagPresent(address)) {
1217 L1Dcache.deallocate(address);
1219 L1Icache.deallocate(address);
1221 unset_cache_entry();
1224 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1225 if (is_invalid(cache_entry)) {
1226 set_cache_entry(L1Dcache.allocate(address, new Entry));
1230 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1231 if (is_invalid(cache_entry)) {
1232 set_cache_entry(L1Icache.allocate(address, new Entry));
1236 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1237 set_cache_entry(L2cache.allocate(address, new Entry));
1240 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1241 L2cache.deallocate(address);
1242 unset_cache_entry();
1245 action(gr_deallocateCacheBlock, "\gr", desc="Deallocate an L1 or L2 cache block.") {
1246 if (L1Dcache.isTagPresent(address)) {
1247 L1Dcache.deallocate(address);
1249 else if (L1Icache.isTagPresent(address)){
1250 L1Icache.deallocate(address);
1253 assert(L2cache.isTagPresent(address));
1254 L2cache.deallocate(address);
1256 unset_cache_entry();
1259 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1260 if (send_evictions) {
1261 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
1262 sequencer.evictionCallback(address);
1266 action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
1267 ++L1Dcache.demand_misses;
1270 action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
1271 ++L1Dcache.demand_hits;
1274 action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
1275 ++L1Icache.demand_misses;
1278 action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
1279 ++L1Icache.demand_hits;
1282 action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
1283 ++L2cache.demand_misses;
1286 action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
1287 ++L2cache.demand_hits;
1290 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1291 stall_and_wait(mandatoryQueue_in, address);
1294 action(z_stall, "z", desc="stall") {
1295 // do nothing and the special z_stall action will return a protocol stall
1296 // so that the next port is checked
1299 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1300 wakeUpBuffers(address);
1303 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1307 //*****************************************************
1309 //*****************************************************
1311 // Transitions for Load/Store/L2_Replacement from transient states
1312 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1313 zz_stallAndWaitMandatoryQueue;
1316 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1317 zz_stallAndWaitMandatoryQueue;
1320 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1321 zz_stallAndWaitMandatoryQueue;
1324 transition({IM, IS, OI, MI, II, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1325 zz_stallAndWaitMandatoryQueue;
1328 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1329 zz_stallAndWaitMandatoryQueue;
1332 transition({MI_F, MM_F}, {Store}) {
1333 zz_stallAndWaitMandatoryQueue;
1336 transition({MM_F, MI_F}, {Flush_line}) {
1337 zz_stallAndWaitMandatoryQueue;
1340 transition({ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1344 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1348 // Transitions moving data between the L1 and L2 caches
1349 transition({S, O, M, MM}, L1_to_L2) {
1351 gg_deallocateL1CacheBlock;
1352 vv_allocateL2CacheBlock;
1357 transition(S, Trigger_L2_to_L1D, ST) {
1359 rr_deallocateL2CacheBlock;
1360 ii_allocateL1DCacheBlock;
1363 zz_stallAndWaitMandatoryQueue;
1367 transition(O, Trigger_L2_to_L1D, OT) {
1369 rr_deallocateL2CacheBlock;
1370 ii_allocateL1DCacheBlock;
1373 zz_stallAndWaitMandatoryQueue;
1377 transition(M, Trigger_L2_to_L1D, MT) {
1379 rr_deallocateL2CacheBlock;
1380 ii_allocateL1DCacheBlock;
1383 zz_stallAndWaitMandatoryQueue;
1387 transition(MM, Trigger_L2_to_L1D, MMT) {
1389 rr_deallocateL2CacheBlock;
1390 ii_allocateL1DCacheBlock;
1393 zz_stallAndWaitMandatoryQueue;
1397 transition(S, Trigger_L2_to_L1I, ST) {
1399 rr_deallocateL2CacheBlock;
1400 jj_allocateL1ICacheBlock;
1403 zz_stallAndWaitMandatoryQueue;
1407 transition(O, Trigger_L2_to_L1I, OT) {
1409 rr_deallocateL2CacheBlock;
1410 jj_allocateL1ICacheBlock;
1413 zz_stallAndWaitMandatoryQueue;
1417 transition(M, Trigger_L2_to_L1I, MT) {
1419 rr_deallocateL2CacheBlock;
1420 jj_allocateL1ICacheBlock;
1423 zz_stallAndWaitMandatoryQueue;
1427 transition(MM, Trigger_L2_to_L1I, MMT) {
1429 rr_deallocateL2CacheBlock;
1430 jj_allocateL1ICacheBlock;
1433 zz_stallAndWaitMandatoryQueue;
1437 transition(ST, Complete_L2_to_L1, SR) {
1439 kd_wakeUpDependents;
1442 transition(OT, Complete_L2_to_L1, OR) {
1444 kd_wakeUpDependents;
1447 transition(MT, Complete_L2_to_L1, MR) {
1449 kd_wakeUpDependents;
1452 transition(MMT, Complete_L2_to_L1, MMR) {
1454 kd_wakeUpDependents;
1457 // Transitions from Idle
1458 transition({I,IR}, Load, IS) {
1459 ii_allocateL1DCacheBlock;
1462 uu_profileL1DataMiss;
1464 k_popMandatoryQueue;
1467 transition({I,IR}, Ifetch, IS) {
1468 jj_allocateL1ICacheBlock;
1471 uu_profileL1InstMiss;
1473 k_popMandatoryQueue;
1476 transition({I,IR}, Store, IM) {
1477 ii_allocateL1DCacheBlock;
1480 uu_profileL1DataMiss;
1482 k_popMandatoryQueue;
1485 transition({I, IR}, Flush_line, IM_F) {
1488 k_popMandatoryQueue;
1491 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1496 // Transitions from Shared
1497 transition({S, SM, ISM}, Load) {
1499 uu_profileL1DataHit;
1500 k_popMandatoryQueue;
1503 transition({S, SM, ISM}, Ifetch) {
1505 uu_profileL1InstHit;
1506 k_popMandatoryQueue;
1509 transition(SR, Load, S) {
1511 uu_profileL1DataMiss;
1513 k_popMandatoryQueue;
1514 ka_wakeUpAllDependents;
1517 transition(SR, Ifetch, S) {
1519 uu_profileL1InstMiss;
1521 k_popMandatoryQueue;
1522 ka_wakeUpAllDependents;
1525 transition({S,SR}, Store, SM) {
1528 uu_profileL1DataMiss;
1530 k_popMandatoryQueue;
1533 transition({S, SR}, Flush_line, SM_F) {
1536 forward_eviction_to_cpu;
1537 gg_deallocateL1CacheBlock;
1538 k_popMandatoryQueue;
1541 transition(S, L2_Replacement, I) {
1542 forward_eviction_to_cpu;
1543 rr_deallocateL2CacheBlock;
1544 ka_wakeUpAllDependents;
1547 transition(S, {Other_GETX, Invalidate}, I) {
1549 forward_eviction_to_cpu;
1550 gr_deallocateCacheBlock;
1554 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1559 // Transitions from Owned
1560 transition({O, OM, SS, MM_W, M_W}, {Load}) {
1562 uu_profileL1DataHit;
1563 k_popMandatoryQueue;
1566 transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
1568 uu_profileL1InstHit;
1569 k_popMandatoryQueue;
1572 transition(OR, Load, O) {
1574 uu_profileL1DataMiss;
1576 k_popMandatoryQueue;
1577 ka_wakeUpAllDependents;
1580 transition(OR, Ifetch, O) {
1582 uu_profileL1InstMiss;
1584 k_popMandatoryQueue;
1585 ka_wakeUpAllDependents;
1588 transition({O,OR}, Store, OM) {
1591 p_decrementNumberOfMessagesByOne;
1592 uu_profileL1DataMiss;
1594 k_popMandatoryQueue;
1597 transition({O, OR}, Flush_line, OM_F) {
1600 p_decrementNumberOfMessagesByOne;
1601 forward_eviction_to_cpu;
1602 gg_deallocateL1CacheBlock;
1603 k_popMandatoryQueue;
1606 transition(O, L2_Replacement, OI) {
1609 forward_eviction_to_cpu;
1610 rr_deallocateL2CacheBlock;
1611 ka_wakeUpAllDependents;
1614 transition(O, {Other_GETX, Invalidate}, I) {
1616 forward_eviction_to_cpu;
1617 gr_deallocateCacheBlock;
1621 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1626 transition(O, Merged_GETS) {
1627 em_sendDataSharedMultiple;
1631 // Transitions from Modified
1632 transition({MM, M}, {Ifetch}) {
1634 uu_profileL1InstHit;
1635 k_popMandatoryQueue;
1638 transition({MM, M}, {Load}) {
1640 uu_profileL1DataHit;
1641 k_popMandatoryQueue;
1644 transition(MM, Store) {
1646 uu_profileL1DataHit;
1647 k_popMandatoryQueue;
1650 transition(MMR, Load, MM) {
1652 uu_profileL1DataMiss;
1654 k_popMandatoryQueue;
1655 ka_wakeUpAllDependents;
1658 transition(MMR, Ifetch, MM) {
1660 uu_profileL1InstMiss;
1662 k_popMandatoryQueue;
1663 ka_wakeUpAllDependents;
1666 transition(MMR, Store, MM) {
1668 uu_profileL1DataMiss;
1670 k_popMandatoryQueue;
1671 ka_wakeUpAllDependents;
1674 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1677 p_decrementNumberOfMessagesByOne;
1678 forward_eviction_to_cpu;
1679 gg_deallocateL1CacheBlock;
1680 k_popMandatoryQueue;
1683 transition(MM_F, Block_Ack, MI_F) {
1686 kd_wakeUpDependents;
1689 transition(MM, L2_Replacement, MI) {
1692 forward_eviction_to_cpu;
1693 rr_deallocateL2CacheBlock;
1694 ka_wakeUpAllDependents;
1697 transition(MM, {Other_GETX, Invalidate}, I) {
1698 c_sendExclusiveData;
1699 forward_eviction_to_cpu;
1700 gr_deallocateCacheBlock;
1704 transition(MM, Other_GETS, I) {
1705 c_sendExclusiveData;
1706 forward_eviction_to_cpu;
1707 gr_deallocateCacheBlock;
1711 transition(MM, NC_DMA_GETS, O) {
1716 transition(MM, Other_GETS_No_Mig, O) {
1721 transition(MM, Merged_GETS, O) {
1722 em_sendDataSharedMultiple;
1726 // Transitions from Dirty Exclusive
1727 transition(M, Store, MM) {
1729 uu_profileL1DataHit;
1730 k_popMandatoryQueue;
1733 transition(MR, Load, M) {
1735 uu_profileL1DataMiss;
1737 k_popMandatoryQueue;
1738 ka_wakeUpAllDependents;
1741 transition(MR, Ifetch, M) {
1743 uu_profileL1InstMiss;
1745 k_popMandatoryQueue;
1746 ka_wakeUpAllDependents;
1749 transition(MR, Store, MM) {
1751 uu_profileL1DataMiss;
1753 k_popMandatoryQueue;
1754 ka_wakeUpAllDependents;
1757 transition(M, L2_Replacement, MI) {
1760 forward_eviction_to_cpu;
1761 rr_deallocateL2CacheBlock;
1762 ka_wakeUpAllDependents;
1765 transition(M, {Other_GETX, Invalidate}, I) {
1766 c_sendExclusiveData;
1767 forward_eviction_to_cpu;
1768 gr_deallocateCacheBlock;
1772 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1777 transition(M, NC_DMA_GETS, O) {
1782 transition(M, Merged_GETS, O) {
1783 em_sendDataSharedMultiple;
1787 // Transitions from IM
1789 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1794 transition({IM, IM_F, MM_F}, Ack) {
1795 m_decrementNumberOfMessages;
1796 o_checkForCompletion;
1800 transition(IM, Data, ISM) {
1802 m_decrementNumberOfMessages;
1803 o_checkForCompletion;
1807 transition(IM_F, Data, ISM_F) {
1808 uf_writeDataToCacheTBE;
1809 m_decrementNumberOfMessages;
1810 o_checkForCompletion;
1814 transition(IM, Exclusive_Data, MM_W) {
1816 m_decrementNumberOfMessages;
1817 o_checkForCompletion;
1818 sx_external_store_hit;
1820 kd_wakeUpDependents;
1823 transition(IM_F, Exclusive_Data, MM_WF) {
1824 uf_writeDataToCacheTBE;
1825 m_decrementNumberOfMessages;
1826 o_checkForCompletion;
1830 // Transitions from SM
1831 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1836 transition(SM, {Other_GETX, Invalidate}, IM) {
1838 forward_eviction_to_cpu;
1842 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1844 forward_eviction_to_cpu;
1848 transition({SM, SM_F}, Ack) {
1849 m_decrementNumberOfMessages;
1850 o_checkForCompletion;
1854 transition(SM, {Data, Exclusive_Data}, ISM) {
1855 v_writeDataToCacheVerify;
1856 m_decrementNumberOfMessages;
1857 o_checkForCompletion;
1861 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1862 vt_writeDataToTBEVerify;
1863 m_decrementNumberOfMessages;
1864 o_checkForCompletion;
1868 // Transitions from ISM
1869 transition({ISM, ISM_F}, Ack) {
1870 m_decrementNumberOfMessages;
1871 o_checkForCompletion;
1875 transition(ISM, All_acks_no_sharers, MM) {
1876 sxt_trig_ext_store_hit;
1880 kd_wakeUpDependents;
1883 transition(ISM_F, All_acks_no_sharers, MI_F) {
1886 kd_wakeUpDependents;
1889 // Transitions from OM
1891 transition(OM, {Other_GETX, Invalidate}, IM) {
1893 pp_incrementNumberOfMessagesByOne;
1894 forward_eviction_to_cpu;
1898 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1899 q_sendDataFromTBEToCache;
1900 pp_incrementNumberOfMessagesByOne;
1901 forward_eviction_to_cpu;
1905 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1910 transition(OM, Merged_GETS) {
1911 em_sendDataSharedMultiple;
1915 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1916 et_sendDataSharedFromTBE;
1920 transition(OM_F, Merged_GETS) {
1921 emt_sendDataSharedMultipleFromTBE;
1925 transition({OM, OM_F}, Ack) {
1926 m_decrementNumberOfMessages;
1927 o_checkForCompletion;
1931 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1932 sxt_trig_ext_store_hit;
1936 kd_wakeUpDependents;
1939 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1942 kd_wakeUpDependents;
1944 // Transitions from IS
1946 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1951 transition(IS, Ack) {
1952 m_decrementNumberOfMessages;
1953 o_checkForCompletion;
1957 transition(IS, Shared_Ack) {
1958 m_decrementNumberOfMessages;
1960 o_checkForCompletion;
1964 transition(IS, Data, SS) {
1966 m_decrementNumberOfMessages;
1967 o_checkForCompletion;
1968 hx_external_load_hit;
1969 uo_updateCurrentOwner;
1971 kd_wakeUpDependents;
1974 transition(IS, Exclusive_Data, M_W) {
1976 m_decrementNumberOfMessages;
1977 o_checkForCompletion;
1978 hx_external_load_hit;
1980 kd_wakeUpDependents;
1983 transition(IS, Shared_Data, SS) {
1986 m_decrementNumberOfMessages;
1987 o_checkForCompletion;
1988 hx_external_load_hit;
1989 uo_updateCurrentOwner;
1991 kd_wakeUpDependents;
1994 // Transitions from SS
1996 transition(SS, Ack) {
1997 m_decrementNumberOfMessages;
1998 o_checkForCompletion;
2002 transition(SS, Shared_Ack) {
2003 m_decrementNumberOfMessages;
2005 o_checkForCompletion;
2009 transition(SS, All_acks, S) {
2013 kd_wakeUpDependents;
2016 transition(SS, All_acks_no_sharers, S) {
2017 // Note: The directory might still be the owner, so that is why we go to S
2021 kd_wakeUpDependents;
2024 // Transitions from MM_W
2026 transition(MM_W, Store) {
2028 uu_profileL1DataHit;
2029 k_popMandatoryQueue;
2032 transition({MM_W, MM_WF}, Ack) {
2033 m_decrementNumberOfMessages;
2034 o_checkForCompletion;
2038 transition(MM_W, All_acks_no_sharers, MM) {
2042 kd_wakeUpDependents;
2045 transition(MM_WF, All_acks_no_sharers, MI_F) {
2048 kd_wakeUpDependents;
2050 // Transitions from M_W
2052 transition(M_W, Store, MM_W) {
2054 uu_profileL1DataHit;
2055 k_popMandatoryQueue;
2058 transition(M_W, Ack) {
2059 m_decrementNumberOfMessages;
2060 o_checkForCompletion;
2064 transition(M_W, All_acks_no_sharers, M) {
2068 kd_wakeUpDependents;
2071 // Transitions from OI/MI
2073 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
2074 q_sendDataFromTBEToCache;
2078 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
2079 sq_sendSharedDataFromTBEToCache;
2083 transition({OI, MI}, Merged_GETS, OI) {
2084 qm_sendDataFromTBEToCache;
2088 transition(MI, Writeback_Ack, I) {
2089 t_sendExclusiveDataFromTBEToMemory;
2092 kd_wakeUpDependents;
2095 transition(MI_F, Writeback_Ack, I) {
2097 t_sendExclusiveDataFromTBEToMemory;
2100 kd_wakeUpDependents;
2103 transition(OI, Writeback_Ack, I) {
2104 qq_sendDataFromTBEToMemory;
2107 kd_wakeUpDependents;
2110 // Transitions from II
2111 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2116 transition(II, Writeback_Ack, I) {
2120 kd_wakeUpDependents;
2123 transition(II, Writeback_Nack, I) {
2126 kd_wakeUpDependents;
2129 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2130 ct_sendExclusiveDataFromTBE;
2131 pp_incrementNumberOfMessagesByOne;
2135 transition(MM_F, Other_GETS, IM_F) {
2136 ct_sendExclusiveDataFromTBE;
2137 pp_incrementNumberOfMessagesByOne;
2141 transition(MM_F, NC_DMA_GETS, OM_F) {
2142 sq_sendSharedDataFromTBEToCache;
2146 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2147 et_sendDataSharedFromTBE;
2151 transition(MM_F, Merged_GETS, OM_F) {
2152 emt_sendDataSharedMultipleFromTBE;