mem: Fix guest corruption when caches handle uncacheable accesses
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true,
45 bool send_evictions
46 {
47
48 // NETWORK BUFFERS
49 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
50 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
51 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
52
53 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
54 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
55
56
57 // STATES
58 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
59 // Base states
60 I, AccessPermission:Invalid, desc="Idle";
61 S, AccessPermission:Read_Only, desc="Shared";
62 O, AccessPermission:Read_Only, desc="Owned";
63 M, AccessPermission:Read_Only, desc="Modified (dirty)";
64 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
65
66 // Base states, locked and ready to service the mandatory queue
67 IR, AccessPermission:Invalid, desc="Idle";
68 SR, AccessPermission:Read_Only, desc="Shared";
69 OR, AccessPermission:Read_Only, desc="Owned";
70 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
71 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73 // Transient States
74 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
75 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
76 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
77 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
78 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
79 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
80 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
81 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
82 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
83 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
84 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
85 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
86 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
87 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
88 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
89 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
90
91 //Transition States Related to Flushing
92 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
93 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
94 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
95 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
96 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
97 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
98 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
99 }
100
101 // EVENTS
102 enumeration(Event, desc="Cache events") {
103 Load, desc="Load request from the processor";
104 Ifetch, desc="I-fetch request from the processor";
105 Store, desc="Store request from the processor";
106 L2_Replacement, desc="L2 Replacement";
107 L1_to_L2, desc="L1 to L2 transfer";
108 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
109 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
110 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
111
112 // Requests
113 Other_GETX, desc="A GetX from another processor";
114 Other_GETS, desc="A GetS from another processor";
115 Merged_GETS, desc="A Merged GetS from another processor";
116 Other_GETS_No_Mig, desc="A GetS from another processor";
117 NC_DMA_GETS, desc="special GetS when only DMA exists";
118 Invalidate, desc="Invalidate block";
119
120 // Responses
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Data, desc="Received a data message";
124 Shared_Data, desc="Received a data message, responder has a shared copy";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 Writeback_Ack, desc="Writeback O.K. from directory";
128 Writeback_Nack, desc="Writeback not O.K. from directory";
129
130 // Triggers
131 All_acks, desc="Received all required data and message acks";
132 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
133
134 // For Flush
135 Flush_line, desc="flush the cache line from all caches";
136 Block_Ack, desc="the directory is blocked and ready for the flush";
137 }
138
139 // TYPES
140
141 // STRUCTURE DEFINITIONS
142
143 MessageBuffer mandatoryQueue, ordered="false";
144
145 // CacheEntry
146 structure(Entry, desc="...", interface="AbstractCacheEntry") {
147 State CacheState, desc="cache state";
148 bool Dirty, desc="Is the data dirty (different than memory)?";
149 DataBlock DataBlk, desc="data for the block";
150 bool FromL2, default="false", desc="block just moved from L2";
151 bool AtomicAccessed, default="false", desc="block just moved from L2";
152 }
153
154 // TBE fields
155 structure(TBE, desc="...") {
156 State TBEState, desc="Transient state";
157 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
158 bool Dirty, desc="Is the data dirty (different than memory)?";
159 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
160 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
161 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
162 MachineID LastResponder, desc="last machine to send a response for this request";
163 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
164 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
165 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
166 Time FirstResponseTime, default="0", desc="the time the first response was received";
167 }
168
169 structure(TBETable, external="yes") {
170 TBE lookup(Address);
171 void allocate(Address);
172 void deallocate(Address);
173 bool isPresent(Address);
174 }
175
176 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
177
178 void set_cache_entry(AbstractCacheEntry b);
179 void unset_cache_entry();
180 void set_tbe(TBE b);
181 void unset_tbe();
182 void wakeUpAllBuffers();
183 void wakeUpBuffers(Address a);
184
185 Entry getCacheEntry(Address address), return_by_pointer="yes" {
186 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
187 if(is_valid(L2cache_entry)) {
188 return L2cache_entry;
189 }
190
191 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
192 if(is_valid(L1Dcache_entry)) {
193 return L1Dcache_entry;
194 }
195
196 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
197 return L1Icache_entry;
198 }
199
200 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
201 Entry cache_entry := getCacheEntry(addr);
202 if(is_valid(cache_entry)) {
203 return cache_entry.DataBlk;
204 }
205
206 TBE tbe := TBEs[addr];
207 if(is_valid(tbe)) {
208 return tbe.DataBlk;
209 }
210
211 error("Missing data block");
212 }
213
214 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
215 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
216 return L2cache_entry;
217 }
218
219 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
220 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
221 return L1Dcache_entry;
222 }
223
224 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
225 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
226 return L1Icache_entry;
227 }
228
229 State getState(TBE tbe, Entry cache_entry, Address addr) {
230 if(is_valid(tbe)) {
231 return tbe.TBEState;
232 } else if (is_valid(cache_entry)) {
233 return cache_entry.CacheState;
234 }
235 return State:I;
236 }
237
238 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
239 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
240 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
241 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
242
243 if (is_valid(tbe)) {
244 tbe.TBEState := state;
245 }
246
247 if (is_valid(cache_entry)) {
248 cache_entry.CacheState := state;
249 }
250 }
251
252 AccessPermission getAccessPermission(Address addr) {
253 TBE tbe := TBEs[addr];
254 if(is_valid(tbe)) {
255 return L1Cache_State_to_permission(tbe.TBEState);
256 }
257
258 Entry cache_entry := getCacheEntry(addr);
259 if(is_valid(cache_entry)) {
260 return L1Cache_State_to_permission(cache_entry.CacheState);
261 }
262
263 return AccessPermission:NotPresent;
264 }
265
266 void setAccessPermission(Entry cache_entry, Address addr, State state) {
267 if (is_valid(cache_entry)) {
268 cache_entry.changePermission(L1Cache_State_to_permission(state));
269 }
270 }
271
272 Event mandatory_request_type_to_event(RubyRequestType type) {
273 if (type == RubyRequestType:LD) {
274 return Event:Load;
275 } else if (type == RubyRequestType:IFETCH) {
276 return Event:Ifetch;
277 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
278 return Event:Store;
279 } else if ((type == RubyRequestType:FLUSH)) {
280 return Event:Flush_line;
281 } else {
282 error("Invalid RubyRequestType");
283 }
284 }
285
286 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
287 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
288 //
289 // NOTE direct local hits should not call this
290 //
291 return GenericMachineType:L1Cache_wCC;
292 } else {
293 return ConvertMachToGenericMach(machineIDToMachineType(sender));
294 }
295 }
296
297 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
298 if (is_valid(cache_entry) && cache_entry.FromL2) {
299 cache_entry.FromL2 := false;
300 return GenericMachineType:L2Cache;
301 } else {
302 return GenericMachineType:L1Cache;
303 }
304 }
305
306 bool IsAtomicAccessed(Entry cache_entry) {
307 assert(is_valid(cache_entry));
308 return cache_entry.AtomicAccessed;
309 }
310
311 MessageBuffer triggerQueue, ordered="false";
312
313 // ** OUT_PORTS **
314
315 out_port(requestNetwork_out, RequestMsg, requestFromCache);
316 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
317 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
318 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
319
320 // ** IN_PORTS **
321
322 // Trigger Queue
323 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
324 if (triggerQueue_in.isReady()) {
325 peek(triggerQueue_in, TriggerMsg) {
326
327 Entry cache_entry := getCacheEntry(in_msg.Address);
328 TBE tbe := TBEs[in_msg.Address];
329
330 if (in_msg.Type == TriggerType:L2_to_L1) {
331 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
332 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
333 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
334 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
335 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
336 } else {
337 error("Unexpected message");
338 }
339 }
340 }
341 }
342
343 // Nothing from the unblock network
344
345 // Response Network
346 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
347 if (responseToCache_in.isReady()) {
348 peek(responseToCache_in, ResponseMsg, block_on="Address") {
349
350 Entry cache_entry := getCacheEntry(in_msg.Address);
351 TBE tbe := TBEs[in_msg.Address];
352
353 if (in_msg.Type == CoherenceResponseType:ACK) {
354 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
355 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
356 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
357 } else if (in_msg.Type == CoherenceResponseType:DATA) {
358 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
359 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
360 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
361 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
362 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
363 } else {
364 error("Unexpected message");
365 }
366 }
367 }
368 }
369
370 // Forward Network
371 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
372 if (forwardToCache_in.isReady()) {
373 peek(forwardToCache_in, RequestMsg, block_on="Address") {
374
375 Entry cache_entry := getCacheEntry(in_msg.Address);
376 TBE tbe := TBEs[in_msg.Address];
377
378 if ((in_msg.Type == CoherenceRequestType:GETX) || (in_msg.Type == CoherenceRequestType:GETF)) {
379 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
380 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
381 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
382 } else if (in_msg.Type == CoherenceRequestType:GETS) {
383 if (machineCount(MachineType:L1Cache) > 1) {
384 if (is_valid(cache_entry)) {
385 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
386 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
387 } else {
388 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
389 }
390 } else {
391 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
392 }
393 } else {
394 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
395 }
396 } else if (in_msg.Type == CoherenceRequestType:INV) {
397 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
398 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
399 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
400 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
401 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
402 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
403 trigger(Event:Block_Ack, in_msg.Address, cache_entry, tbe);
404 } else {
405 error("Unexpected message");
406 }
407 }
408 }
409 }
410
411 // Nothing from the request network
412
413 // Mandatory Queue
414 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
415 if (mandatoryQueue_in.isReady()) {
416 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
417
418 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
419 TBE tbe := TBEs[in_msg.LineAddress];
420
421 if (in_msg.Type == RubyRequestType:IFETCH) {
422 // ** INSTRUCTION ACCESS ***
423
424 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
425 if (is_valid(L1Icache_entry)) {
426 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
427 trigger(mandatory_request_type_to_event(in_msg.Type),
428 in_msg.LineAddress, L1Icache_entry, tbe);
429 } else {
430 // Check to see if it is in the OTHER L1
431 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
432 if (is_valid(L1Dcache_entry)) {
433 // The block is in the wrong L1, try to write it to the L2
434 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
435 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
436 } else {
437 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
438 trigger(Event:L2_Replacement,
439 l2_victim_addr,
440 getL2CacheEntry(l2_victim_addr),
441 TBEs[l2_victim_addr]);
442 }
443 }
444
445 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
446 // L1 does't have the line, but we have space for it in the L1
447
448 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
449 if (is_valid(L2cache_entry)) {
450 // L2 has it (maybe not with the right permissions)
451 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
452 L2cache_entry, tbe);
453 } else {
454 // We have room, the L2 doesn't have it, so the L1 fetches the line
455 trigger(mandatory_request_type_to_event(in_msg.Type),
456 in_msg.LineAddress, L1Icache_entry, tbe);
457 }
458 } else {
459 // No room in the L1, so we need to make room
460 Address l1i_victim_addr := L1IcacheMemory.cacheProbe(in_msg.LineAddress);
461 if (L2cacheMemory.cacheAvail(l1i_victim_addr)) {
462 // The L2 has room, so we move the line from the L1 to the L2
463 trigger(Event:L1_to_L2,
464 l1i_victim_addr,
465 getL1ICacheEntry(l1i_victim_addr),
466 TBEs[l1i_victim_addr]);
467 } else {
468 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1i_victim_addr);
469 // The L2 does not have room, so we replace a line from the L2
470 trigger(Event:L2_Replacement,
471 l2_victim_addr,
472 getL2CacheEntry(l2_victim_addr),
473 TBEs[l2_victim_addr]);
474 }
475 }
476 }
477 } else {
478 // *** DATA ACCESS ***
479
480 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
481 if (is_valid(L1Dcache_entry)) {
482 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
483 trigger(mandatory_request_type_to_event(in_msg.Type),
484 in_msg.LineAddress, L1Dcache_entry, tbe);
485 } else {
486
487 // Check to see if it is in the OTHER L1
488 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
489 if (is_valid(L1Icache_entry)) {
490 // The block is in the wrong L1, try to write it to the L2
491 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
492 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
493 } else {
494 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
495 trigger(Event:L2_Replacement,
496 l2_victim_addr,
497 getL2CacheEntry(l2_victim_addr),
498 TBEs[l2_victim_addr]);
499 }
500 }
501
502 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
503 // L1 does't have the line, but we have space for it in the L1
504 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
505 if (is_valid(L2cache_entry)) {
506 // L2 has it (maybe not with the right permissions)
507 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
508 L2cache_entry, tbe);
509 } else {
510 // We have room, the L2 doesn't have it, so the L1 fetches the line
511 trigger(mandatory_request_type_to_event(in_msg.Type),
512 in_msg.LineAddress, L1Dcache_entry, tbe);
513 }
514 } else {
515 // No room in the L1, so we need to make room
516 Address l1d_victim_addr := L1DcacheMemory.cacheProbe(in_msg.LineAddress);
517 if (L2cacheMemory.cacheAvail(l1d_victim_addr)) {
518 // The L2 has room, so we move the line from the L1 to the L2
519 trigger(Event:L1_to_L2,
520 l1d_victim_addr,
521 getL1DCacheEntry(l1d_victim_addr),
522 TBEs[l1d_victim_addr]);
523 } else {
524 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1d_victim_addr);
525 // The L2 does not have room, so we replace a line from the L2
526 trigger(Event:L2_Replacement,
527 l2_victim_addr,
528 getL2CacheEntry(l2_victim_addr),
529 TBEs[l2_victim_addr]);
530 }
531 }
532 }
533 }
534 }
535 }
536 }
537
538 // ACTIONS
539
540 action(a_issueGETS, "a", desc="Issue GETS") {
541 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
542 assert(is_valid(tbe));
543 out_msg.Address := address;
544 out_msg.Type := CoherenceRequestType:GETS;
545 out_msg.Requestor := machineID;
546 out_msg.Destination.add(map_Address_to_Directory(address));
547 out_msg.MessageSize := MessageSizeType:Request_Control;
548 out_msg.InitialRequestTime := get_time();
549 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
550 }
551 }
552
553 action(b_issueGETX, "b", desc="Issue GETX") {
554 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
555 assert(is_valid(tbe));
556 out_msg.Address := address;
557 out_msg.Type := CoherenceRequestType:GETX;
558 out_msg.Requestor := machineID;
559 out_msg.Destination.add(map_Address_to_Directory(address));
560 out_msg.MessageSize := MessageSizeType:Request_Control;
561 out_msg.InitialRequestTime := get_time();
562 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
563 }
564 }
565
566 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
567 if (machineCount(MachineType:L1Cache) > 1) {
568 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
569 assert(is_valid(tbe));
570 out_msg.Address := address;
571 out_msg.Type := CoherenceRequestType:GETX;
572 out_msg.Requestor := machineID;
573 out_msg.Destination.add(map_Address_to_Directory(address));
574 out_msg.MessageSize := MessageSizeType:Request_Control;
575 out_msg.InitialRequestTime := get_time();
576 }
577 }
578 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
579 }
580
581 action(bf_issueGETF, "bf", desc="Issue GETF") {
582 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
583 assert(is_valid(tbe));
584 out_msg.Address := address;
585 out_msg.Type := CoherenceRequestType:GETF;
586 out_msg.Requestor := machineID;
587 out_msg.Destination.add(map_Address_to_Directory(address));
588 out_msg.MessageSize := MessageSizeType:Request_Control;
589 out_msg.InitialRequestTime := get_time();
590 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
591 }
592 }
593
594 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
595 peek(forwardToCache_in, RequestMsg) {
596 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
597 assert(is_valid(cache_entry));
598 out_msg.Address := address;
599 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
600 out_msg.Sender := machineID;
601 out_msg.Destination.add(in_msg.Requestor);
602 out_msg.DataBlk := cache_entry.DataBlk;
603 out_msg.Dirty := cache_entry.Dirty;
604 if (in_msg.DirectedProbe) {
605 out_msg.Acks := machineCount(MachineType:L1Cache);
606 } else {
607 out_msg.Acks := 2;
608 }
609 out_msg.SilentAcks := in_msg.SilentAcks;
610 out_msg.MessageSize := MessageSizeType:Response_Data;
611 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
612 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
613 }
614 }
615 }
616
617 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
618 peek(forwardToCache_in, RequestMsg) {
619 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
620 assert(is_valid(tbe));
621 out_msg.Address := address;
622 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
623 out_msg.Sender := machineID;
624 out_msg.Destination.add(in_msg.Requestor);
625 out_msg.DataBlk := tbe.DataBlk;
626 out_msg.Dirty := tbe.Dirty;
627 if (in_msg.DirectedProbe) {
628 out_msg.Acks := machineCount(MachineType:L1Cache);
629 } else {
630 out_msg.Acks := 2;
631 }
632 out_msg.SilentAcks := in_msg.SilentAcks;
633 out_msg.MessageSize := MessageSizeType:Response_Data;
634 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
635 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
636 }
637 }
638 }
639
640 action(d_issuePUT, "d", desc="Issue PUT") {
641 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
642 out_msg.Address := address;
643 out_msg.Type := CoherenceRequestType:PUT;
644 out_msg.Requestor := machineID;
645 out_msg.Destination.add(map_Address_to_Directory(address));
646 out_msg.MessageSize := MessageSizeType:Writeback_Control;
647 }
648 }
649
650 action(df_issuePUTF, "df", desc="Issue PUTF") {
651 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
652 out_msg.Address := address;
653 out_msg.Type := CoherenceRequestType:PUTF;
654 out_msg.Requestor := machineID;
655 out_msg.Destination.add(map_Address_to_Directory(address));
656 out_msg.MessageSize := MessageSizeType:Writeback_Control;
657 }
658 }
659
660 action(e_sendData, "e", desc="Send data from cache to requestor") {
661 peek(forwardToCache_in, RequestMsg) {
662 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
663 assert(is_valid(cache_entry));
664 out_msg.Address := address;
665 out_msg.Type := CoherenceResponseType:DATA;
666 out_msg.Sender := machineID;
667 out_msg.Destination.add(in_msg.Requestor);
668 out_msg.DataBlk := cache_entry.DataBlk;
669 out_msg.Dirty := cache_entry.Dirty;
670 if (in_msg.DirectedProbe) {
671 out_msg.Acks := machineCount(MachineType:L1Cache);
672 } else {
673 out_msg.Acks := 2;
674 }
675 out_msg.SilentAcks := in_msg.SilentAcks;
676 out_msg.MessageSize := MessageSizeType:Response_Data;
677 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
678 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
679 }
680 }
681 }
682
683 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
684 peek(forwardToCache_in, RequestMsg) {
685 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
686 assert(is_valid(cache_entry));
687 out_msg.Address := address;
688 out_msg.Type := CoherenceResponseType:DATA_SHARED;
689 out_msg.Sender := machineID;
690 out_msg.Destination.add(in_msg.Requestor);
691 out_msg.DataBlk := cache_entry.DataBlk;
692 out_msg.Dirty := cache_entry.Dirty;
693 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
694 if (in_msg.DirectedProbe) {
695 out_msg.Acks := machineCount(MachineType:L1Cache);
696 } else {
697 out_msg.Acks := 2;
698 }
699 out_msg.SilentAcks := in_msg.SilentAcks;
700 out_msg.MessageSize := MessageSizeType:Response_Data;
701 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
702 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
703 }
704 }
705 }
706
707 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
708 peek(forwardToCache_in, RequestMsg) {
709 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
710 assert(is_valid(tbe));
711 out_msg.Address := address;
712 out_msg.Type := CoherenceResponseType:DATA_SHARED;
713 out_msg.Sender := machineID;
714 out_msg.Destination.add(in_msg.Requestor);
715 out_msg.DataBlk := tbe.DataBlk;
716 out_msg.Dirty := tbe.Dirty;
717 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
718 if (in_msg.DirectedProbe) {
719 out_msg.Acks := machineCount(MachineType:L1Cache);
720 } else {
721 out_msg.Acks := 2;
722 }
723 out_msg.SilentAcks := in_msg.SilentAcks;
724 out_msg.MessageSize := MessageSizeType:Response_Data;
725 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
726 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
727 }
728 }
729 }
730
731 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
732 peek(forwardToCache_in, RequestMsg) {
733 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
734 assert(is_valid(cache_entry));
735 out_msg.Address := address;
736 out_msg.Type := CoherenceResponseType:DATA_SHARED;
737 out_msg.Sender := machineID;
738 out_msg.Destination := in_msg.MergedRequestors;
739 out_msg.DataBlk := cache_entry.DataBlk;
740 out_msg.Dirty := cache_entry.Dirty;
741 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
742 out_msg.Acks := machineCount(MachineType:L1Cache);
743 out_msg.SilentAcks := in_msg.SilentAcks;
744 out_msg.MessageSize := MessageSizeType:Response_Data;
745 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
746 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
747 }
748 }
749 }
750
751 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
752 peek(forwardToCache_in, RequestMsg) {
753 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
754 assert(is_valid(tbe));
755 out_msg.Address := address;
756 out_msg.Type := CoherenceResponseType:DATA_SHARED;
757 out_msg.Sender := machineID;
758 out_msg.Destination := in_msg.MergedRequestors;
759 out_msg.DataBlk := tbe.DataBlk;
760 out_msg.Dirty := tbe.Dirty;
761 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
762 out_msg.Acks := machineCount(MachineType:L1Cache);
763 out_msg.SilentAcks := in_msg.SilentAcks;
764 out_msg.MessageSize := MessageSizeType:Response_Data;
765 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
766 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
767 }
768 }
769 }
770
771 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
772 peek(forwardToCache_in, RequestMsg) {
773 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
774 out_msg.Address := address;
775 out_msg.Type := CoherenceResponseType:ACK;
776 out_msg.Sender := machineID;
777 out_msg.Destination.add(in_msg.Requestor);
778 out_msg.Acks := 1;
779 out_msg.SilentAcks := in_msg.SilentAcks;
780 assert(in_msg.DirectedProbe == false);
781 out_msg.MessageSize := MessageSizeType:Response_Control;
782 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
783 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
784 }
785 }
786 }
787
788 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
789 peek(forwardToCache_in, RequestMsg) {
790 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
791 out_msg.Address := address;
792 out_msg.Type := CoherenceResponseType:ACK_SHARED;
793 out_msg.Sender := machineID;
794 out_msg.Destination.add(in_msg.Requestor);
795 out_msg.Acks := 1;
796 out_msg.SilentAcks := in_msg.SilentAcks;
797 assert(in_msg.DirectedProbe == false);
798 out_msg.MessageSize := MessageSizeType:Response_Control;
799 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
800 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
801 }
802 }
803 }
804
805 action(g_sendUnblock, "g", desc="Send unblock to memory") {
806 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
807 out_msg.Address := address;
808 out_msg.Type := CoherenceResponseType:UNBLOCK;
809 out_msg.Sender := machineID;
810 out_msg.Destination.add(map_Address_to_Directory(address));
811 out_msg.MessageSize := MessageSizeType:Unblock_Control;
812 }
813 }
814
815 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
816 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
817 out_msg.Address := address;
818 out_msg.Type := CoherenceResponseType:UNBLOCKM;
819 out_msg.Sender := machineID;
820 out_msg.Destination.add(map_Address_to_Directory(address));
821 out_msg.MessageSize := MessageSizeType:Unblock_Control;
822 }
823 }
824
825 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
826 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
827 assert(is_valid(tbe));
828 out_msg.Address := address;
829 out_msg.Type := CoherenceResponseType:UNBLOCKS;
830 out_msg.Sender := machineID;
831 out_msg.CurOwner := tbe.CurOwner;
832 out_msg.Destination.add(map_Address_to_Directory(address));
833 out_msg.MessageSize := MessageSizeType:Unblock_Control;
834 }
835 }
836
837 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
838 assert(is_valid(cache_entry));
839 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
840 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
841 cache_entry.DataBlk);
842 }
843
844 action(hx_external_load_hit, "hx", desc="load required external msgs") {
845 assert(is_valid(cache_entry));
846 assert(is_valid(tbe));
847 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
848 peek(responseToCache_in, ResponseMsg) {
849
850 sequencer.readCallback(address,
851 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
852 cache_entry.DataBlk,
853 tbe.InitialRequestTime,
854 tbe.ForwardRequestTime,
855 tbe.FirstResponseTime);
856 }
857 }
858
859 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
860 assert(is_valid(cache_entry));
861 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
862 peek(mandatoryQueue_in, RubyRequest) {
863 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
864 cache_entry.DataBlk);
865
866 cache_entry.Dirty := true;
867 if (in_msg.Type == RubyRequestType:ATOMIC) {
868 cache_entry.AtomicAccessed := true;
869 }
870 }
871 }
872
873 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
874 assert(is_valid(tbe));
875 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
876 sequencer.writeCallback(address, GenericMachineType:L1Cache,tbe.DataBlk);
877 }
878
879 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
880 assert(is_valid(cache_entry));
881 assert(is_valid(tbe));
882 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
883 peek(responseToCache_in, ResponseMsg) {
884
885 sequencer.writeCallback(address,
886 getNondirectHitMachType(address, in_msg.Sender),
887 cache_entry.DataBlk,
888 tbe.InitialRequestTime,
889 tbe.ForwardRequestTime,
890 tbe.FirstResponseTime);
891 }
892 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
893 cache_entry.Dirty := true;
894 }
895
896 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
897 assert(is_valid(cache_entry));
898 assert(is_valid(tbe));
899 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
900
901 sequencer.writeCallback(address,
902 getNondirectHitMachType(address, tbe.LastResponder),
903 cache_entry.DataBlk,
904 tbe.InitialRequestTime,
905 tbe.ForwardRequestTime,
906 tbe.FirstResponseTime);
907
908 cache_entry.Dirty := true;
909 }
910
911 action(i_allocateTBE, "i", desc="Allocate TBE") {
912 check_allocate(TBEs);
913 assert(is_valid(cache_entry));
914 TBEs.allocate(address);
915 set_tbe(TBEs[address]);
916 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
917 tbe.Dirty := cache_entry.Dirty;
918 tbe.Sharers := false;
919 }
920
921 action(it_allocateTBE, "it", desc="Allocate TBE") {
922 check_allocate(TBEs);
923 TBEs.allocate(address);
924 set_tbe(TBEs[address]);
925 tbe.Dirty := false;
926 tbe.Sharers := false;
927 }
928
929 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
930 triggerQueue_in.dequeue();
931 }
932
933 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
934 mandatoryQueue_in.dequeue();
935 }
936
937 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
938 forwardToCache_in.dequeue();
939 }
940
941 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
942 assert(is_valid(cache_entry));
943 assert(is_valid(tbe));
944 cache_entry.Dirty := tbe.Dirty;
945 cache_entry.DataBlk := tbe.DataBlk;
946 }
947
948 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
949 assert(is_valid(cache_entry));
950 assert(is_valid(tbe));
951 cache_entry.Dirty := tbe.Dirty;
952 cache_entry.DataBlk := tbe.DataBlk;
953 cache_entry.FromL2 := true;
954 }
955
956 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
957 peek(responseToCache_in, ResponseMsg) {
958 assert(in_msg.Acks >= 0);
959 assert(is_valid(tbe));
960 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
961 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
962 if (tbe.AppliedSilentAcks == false) {
963 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
964 tbe.AppliedSilentAcks := true;
965 }
966 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
967 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
968 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
969 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
970 APPEND_TRANSITION_COMMENT(in_msg.Sender);
971 tbe.LastResponder := in_msg.Sender;
972 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
973 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
974 }
975 if (in_msg.InitialRequestTime != zero_time()) {
976 tbe.InitialRequestTime := in_msg.InitialRequestTime;
977 }
978 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
979 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
980 }
981 if (in_msg.ForwardRequestTime != zero_time()) {
982 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
983 }
984 if (tbe.FirstResponseTime == zero_time()) {
985 tbe.FirstResponseTime := get_time();
986 }
987 }
988 }
989 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
990 peek(responseToCache_in, ResponseMsg) {
991 assert(is_valid(tbe));
992 tbe.CurOwner := in_msg.Sender;
993 }
994 }
995
996 action(n_popResponseQueue, "n", desc="Pop response queue") {
997 responseToCache_in.dequeue();
998 }
999
1000 action(ll_L2toL1Transfer, "ll", desc="") {
1001 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
1002 out_msg.Address := address;
1003 out_msg.Type := TriggerType:L2_to_L1;
1004 }
1005 }
1006
1007 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1008 assert(is_valid(tbe));
1009 if (tbe.NumPendingMsgs == 0) {
1010 enqueue(triggerQueue_out, TriggerMsg) {
1011 out_msg.Address := address;
1012 if (tbe.Sharers) {
1013 out_msg.Type := TriggerType:ALL_ACKS;
1014 } else {
1015 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1016 }
1017 }
1018 }
1019 }
1020
1021 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1022 assert(is_valid(tbe));
1023 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1024 }
1025
1026 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1027 assert(is_valid(tbe));
1028 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1029 }
1030
1031 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1032 peek(forwardToCache_in, RequestMsg) {
1033 assert(in_msg.Requestor != machineID);
1034 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1035 assert(is_valid(tbe));
1036 out_msg.Address := address;
1037 out_msg.Type := CoherenceResponseType:DATA;
1038 out_msg.Sender := machineID;
1039 out_msg.Destination.add(in_msg.Requestor);
1040 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1041 out_msg.DataBlk := tbe.DataBlk;
1042 out_msg.Dirty := tbe.Dirty;
1043 if (in_msg.DirectedProbe) {
1044 out_msg.Acks := machineCount(MachineType:L1Cache);
1045 } else {
1046 out_msg.Acks := 2;
1047 }
1048 out_msg.SilentAcks := in_msg.SilentAcks;
1049 out_msg.MessageSize := MessageSizeType:Response_Data;
1050 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1051 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1052 }
1053 }
1054 }
1055
1056 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1057 peek(forwardToCache_in, RequestMsg) {
1058 assert(in_msg.Requestor != machineID);
1059 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1060 assert(is_valid(tbe));
1061 out_msg.Address := address;
1062 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1063 out_msg.Sender := machineID;
1064 out_msg.Destination.add(in_msg.Requestor);
1065 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1066 out_msg.DataBlk := tbe.DataBlk;
1067 out_msg.Dirty := tbe.Dirty;
1068 if (in_msg.DirectedProbe) {
1069 out_msg.Acks := machineCount(MachineType:L1Cache);
1070 } else {
1071 out_msg.Acks := 2;
1072 }
1073 out_msg.SilentAcks := in_msg.SilentAcks;
1074 out_msg.MessageSize := MessageSizeType:Response_Data;
1075 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1076 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1077 }
1078 }
1079 }
1080
1081 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1082 peek(forwardToCache_in, RequestMsg) {
1083 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1084 assert(is_valid(tbe));
1085 out_msg.Address := address;
1086 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1087 out_msg.Sender := machineID;
1088 out_msg.Destination := in_msg.MergedRequestors;
1089 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1090 out_msg.DataBlk := tbe.DataBlk;
1091 out_msg.Dirty := tbe.Dirty;
1092 out_msg.Acks := machineCount(MachineType:L1Cache);
1093 out_msg.SilentAcks := in_msg.SilentAcks;
1094 out_msg.MessageSize := MessageSizeType:Response_Data;
1095 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1096 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1097 }
1098 }
1099 }
1100
1101 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1102 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1103 assert(is_valid(tbe));
1104 out_msg.Address := address;
1105 out_msg.Sender := machineID;
1106 out_msg.Destination.add(map_Address_to_Directory(address));
1107 out_msg.Dirty := tbe.Dirty;
1108 if (tbe.Dirty) {
1109 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1110 out_msg.DataBlk := tbe.DataBlk;
1111 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1112 } else {
1113 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1114 // NOTE: in a real system this would not send data. We send
1115 // data here only so we can check it at the memory
1116 out_msg.DataBlk := tbe.DataBlk;
1117 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1118 }
1119 }
1120 }
1121
1122 action(r_setSharerBit, "r", desc="We saw other sharers") {
1123 assert(is_valid(tbe));
1124 tbe.Sharers := true;
1125 }
1126
1127 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1128 TBEs.deallocate(address);
1129 unset_tbe();
1130 }
1131
1132 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1133 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1134 assert(is_valid(tbe));
1135 out_msg.Address := address;
1136 out_msg.Sender := machineID;
1137 out_msg.Destination.add(map_Address_to_Directory(address));
1138 out_msg.DataBlk := tbe.DataBlk;
1139 out_msg.Dirty := tbe.Dirty;
1140 if (tbe.Dirty) {
1141 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1142 out_msg.DataBlk := tbe.DataBlk;
1143 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1144 } else {
1145 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1146 // NOTE: in a real system this would not send data. We send
1147 // data here only so we can check it at the memory
1148 out_msg.DataBlk := tbe.DataBlk;
1149 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1150 }
1151 }
1152 }
1153
1154 action(u_writeDataToCache, "u", desc="Write data to cache") {
1155 peek(responseToCache_in, ResponseMsg) {
1156 assert(is_valid(cache_entry));
1157 cache_entry.DataBlk := in_msg.DataBlk;
1158 cache_entry.Dirty := in_msg.Dirty;
1159 }
1160 }
1161
1162 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1163 peek(responseToCache_in, ResponseMsg) {
1164 assert(is_valid(tbe));
1165 tbe.DataBlk := in_msg.DataBlk;
1166 tbe.Dirty := in_msg.Dirty;
1167 }
1168 }
1169
1170 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1171 peek(responseToCache_in, ResponseMsg) {
1172 assert(is_valid(cache_entry));
1173 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1174 cache_entry.DataBlk, in_msg.DataBlk);
1175 assert(cache_entry.DataBlk == in_msg.DataBlk);
1176 cache_entry.DataBlk := in_msg.DataBlk;
1177 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1178 }
1179 }
1180
1181 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1182 peek(responseToCache_in, ResponseMsg) {
1183 assert(is_valid(tbe));
1184 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1185 tbe.DataBlk, in_msg.DataBlk);
1186 assert(tbe.DataBlk == in_msg.DataBlk);
1187 tbe.DataBlk := in_msg.DataBlk;
1188 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1189 }
1190 }
1191
1192 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1193 if (L1DcacheMemory.isTagPresent(address)) {
1194 L1DcacheMemory.deallocate(address);
1195 } else {
1196 L1IcacheMemory.deallocate(address);
1197 }
1198 unset_cache_entry();
1199 }
1200
1201 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1202 if (is_invalid(cache_entry)) {
1203 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
1204 }
1205 }
1206
1207 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1208 if (is_invalid(cache_entry)) {
1209 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
1210 }
1211 }
1212
1213 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1214 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
1215 }
1216
1217 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1218 L2cacheMemory.deallocate(address);
1219 unset_cache_entry();
1220 }
1221
1222 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1223 if (send_evictions) {
1224 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
1225 sequencer.evictionCallback(address);
1226 }
1227 }
1228
1229 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
1230 peek(mandatoryQueue_in, RubyRequest) {
1231 if (L1IcacheMemory.isTagPresent(address)) {
1232 L1IcacheMemory.profileMiss(in_msg);
1233 } else if (L1DcacheMemory.isTagPresent(address)) {
1234 L1DcacheMemory.profileMiss(in_msg);
1235 }
1236 if (L2cacheMemory.isTagPresent(address) == false) {
1237 L2cacheMemory.profileMiss(in_msg);
1238 }
1239 }
1240 }
1241
1242 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1243 stall_and_wait(mandatoryQueue_in, address);
1244 }
1245
1246 action(z_stall, "z", desc="stall") {
1247 // do nothing and the special z_stall action will return a protocol stall
1248 // so that the next port is checked
1249 }
1250
1251 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1252 wakeUpBuffers(address);
1253 }
1254
1255 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1256 wakeUpAllBuffers();
1257 }
1258
1259 //*****************************************************
1260 // TRANSITIONS
1261 //*****************************************************
1262
1263 // Transitions for Load/Store/L2_Replacement from transient states
1264 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1265 zz_stallAndWaitMandatoryQueue;
1266 }
1267
1268 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1269 zz_stallAndWaitMandatoryQueue;
1270 }
1271
1272 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1273 zz_stallAndWaitMandatoryQueue;
1274 }
1275
1276 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1277 zz_stallAndWaitMandatoryQueue;
1278 }
1279
1280 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1281 zz_stallAndWaitMandatoryQueue;
1282 }
1283
1284 transition({MI_F, MM_F}, {Store}) {
1285 zz_stallAndWaitMandatoryQueue;
1286 }
1287
1288 transition({MM_F, MI_F}, {Flush_line}) {
1289 zz_stallAndWaitMandatoryQueue;
1290 }
1291
1292 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1293 z_stall;
1294 }
1295
1296 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1297 z_stall;
1298 }
1299
1300 // Transitions moving data between the L1 and L2 caches
1301 transition({I, S, O, M, MM}, L1_to_L2) {
1302 i_allocateTBE;
1303 gg_deallocateL1CacheBlock;
1304 vv_allocateL2CacheBlock;
1305 hp_copyFromTBEToL2;
1306 s_deallocateTBE;
1307 }
1308
1309 transition(I, Trigger_L2_to_L1D, IT) {
1310 i_allocateTBE;
1311 rr_deallocateL2CacheBlock;
1312 ii_allocateL1DCacheBlock;
1313 nb_copyFromTBEToL1; // Not really needed for state I
1314 s_deallocateTBE;
1315 uu_profileMiss;
1316 zz_stallAndWaitMandatoryQueue;
1317 ll_L2toL1Transfer;
1318 }
1319
1320 transition(S, Trigger_L2_to_L1D, ST) {
1321 i_allocateTBE;
1322 rr_deallocateL2CacheBlock;
1323 ii_allocateL1DCacheBlock;
1324 nb_copyFromTBEToL1;
1325 s_deallocateTBE;
1326 uu_profileMiss;
1327 zz_stallAndWaitMandatoryQueue;
1328 ll_L2toL1Transfer;
1329 }
1330
1331 transition(O, Trigger_L2_to_L1D, OT) {
1332 i_allocateTBE;
1333 rr_deallocateL2CacheBlock;
1334 ii_allocateL1DCacheBlock;
1335 nb_copyFromTBEToL1;
1336 s_deallocateTBE;
1337 uu_profileMiss;
1338 zz_stallAndWaitMandatoryQueue;
1339 ll_L2toL1Transfer;
1340 }
1341
1342 transition(M, Trigger_L2_to_L1D, MT) {
1343 i_allocateTBE;
1344 rr_deallocateL2CacheBlock;
1345 ii_allocateL1DCacheBlock;
1346 nb_copyFromTBEToL1;
1347 s_deallocateTBE;
1348 uu_profileMiss;
1349 zz_stallAndWaitMandatoryQueue;
1350 ll_L2toL1Transfer;
1351 }
1352
1353 transition(MM, Trigger_L2_to_L1D, MMT) {
1354 i_allocateTBE;
1355 rr_deallocateL2CacheBlock;
1356 ii_allocateL1DCacheBlock;
1357 nb_copyFromTBEToL1;
1358 s_deallocateTBE;
1359 uu_profileMiss;
1360 zz_stallAndWaitMandatoryQueue;
1361 ll_L2toL1Transfer;
1362 }
1363
1364 transition(I, Trigger_L2_to_L1I, IT) {
1365 i_allocateTBE;
1366 rr_deallocateL2CacheBlock;
1367 jj_allocateL1ICacheBlock;
1368 nb_copyFromTBEToL1;
1369 s_deallocateTBE;
1370 uu_profileMiss;
1371 zz_stallAndWaitMandatoryQueue;
1372 ll_L2toL1Transfer;
1373 }
1374
1375 transition(S, Trigger_L2_to_L1I, ST) {
1376 i_allocateTBE;
1377 rr_deallocateL2CacheBlock;
1378 jj_allocateL1ICacheBlock;
1379 nb_copyFromTBEToL1;
1380 s_deallocateTBE;
1381 uu_profileMiss;
1382 zz_stallAndWaitMandatoryQueue;
1383 ll_L2toL1Transfer;
1384 }
1385
1386 transition(O, Trigger_L2_to_L1I, OT) {
1387 i_allocateTBE;
1388 rr_deallocateL2CacheBlock;
1389 jj_allocateL1ICacheBlock;
1390 nb_copyFromTBEToL1;
1391 s_deallocateTBE;
1392 uu_profileMiss;
1393 zz_stallAndWaitMandatoryQueue;
1394 ll_L2toL1Transfer;
1395 }
1396
1397 transition(M, Trigger_L2_to_L1I, MT) {
1398 i_allocateTBE;
1399 rr_deallocateL2CacheBlock;
1400 jj_allocateL1ICacheBlock;
1401 nb_copyFromTBEToL1;
1402 s_deallocateTBE;
1403 uu_profileMiss;
1404 zz_stallAndWaitMandatoryQueue;
1405 ll_L2toL1Transfer;
1406 }
1407
1408 transition(MM, Trigger_L2_to_L1I, MMT) {
1409 i_allocateTBE;
1410 rr_deallocateL2CacheBlock;
1411 jj_allocateL1ICacheBlock;
1412 nb_copyFromTBEToL1;
1413 s_deallocateTBE;
1414 uu_profileMiss;
1415 zz_stallAndWaitMandatoryQueue;
1416 ll_L2toL1Transfer;
1417 }
1418
1419 transition(IT, Complete_L2_to_L1, IR) {
1420 j_popTriggerQueue;
1421 kd_wakeUpDependents;
1422 }
1423
1424 transition(ST, Complete_L2_to_L1, SR) {
1425 j_popTriggerQueue;
1426 kd_wakeUpDependents;
1427 }
1428
1429 transition(OT, Complete_L2_to_L1, OR) {
1430 j_popTriggerQueue;
1431 kd_wakeUpDependents;
1432 }
1433
1434 transition(MT, Complete_L2_to_L1, MR) {
1435 j_popTriggerQueue;
1436 kd_wakeUpDependents;
1437 }
1438
1439 transition(MMT, Complete_L2_to_L1, MMR) {
1440 j_popTriggerQueue;
1441 kd_wakeUpDependents;
1442 }
1443
1444 // Transitions from Idle
1445 transition({I, IR}, Load, IS) {
1446 ii_allocateL1DCacheBlock;
1447 i_allocateTBE;
1448 a_issueGETS;
1449 uu_profileMiss;
1450 k_popMandatoryQueue;
1451 }
1452
1453 transition({I, IR}, Ifetch, IS) {
1454 jj_allocateL1ICacheBlock;
1455 i_allocateTBE;
1456 a_issueGETS;
1457 uu_profileMiss;
1458 k_popMandatoryQueue;
1459 }
1460
1461 transition({I, IR}, Store, IM) {
1462 ii_allocateL1DCacheBlock;
1463 i_allocateTBE;
1464 b_issueGETX;
1465 uu_profileMiss;
1466 k_popMandatoryQueue;
1467 }
1468
1469 transition({I, IR}, Flush_line, IM_F) {
1470 it_allocateTBE;
1471 bf_issueGETF;
1472 uu_profileMiss;
1473 k_popMandatoryQueue;
1474 }
1475
1476 transition(I, L2_Replacement) {
1477 rr_deallocateL2CacheBlock;
1478 ka_wakeUpAllDependents;
1479 }
1480
1481 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1482 f_sendAck;
1483 l_popForwardQueue;
1484 }
1485
1486 // Transitions from Shared
1487 transition({S, SM, ISM}, {Load, Ifetch}) {
1488 h_load_hit;
1489 k_popMandatoryQueue;
1490 }
1491
1492 transition(SR, {Load, Ifetch}, S) {
1493 h_load_hit;
1494 k_popMandatoryQueue;
1495 ka_wakeUpAllDependents;
1496 }
1497
1498 transition({S, SR}, Store, SM) {
1499 i_allocateTBE;
1500 b_issueGETX;
1501 uu_profileMiss;
1502 k_popMandatoryQueue;
1503 }
1504
1505 transition({S, SR}, Flush_line, SM_F) {
1506 i_allocateTBE;
1507 bf_issueGETF;
1508 uu_profileMiss;
1509 forward_eviction_to_cpu;
1510 gg_deallocateL1CacheBlock;
1511 k_popMandatoryQueue;
1512 }
1513
1514 transition(S, L2_Replacement, I) {
1515 forward_eviction_to_cpu;
1516 rr_deallocateL2CacheBlock;
1517 ka_wakeUpAllDependents;
1518 }
1519
1520 transition(S, {Other_GETX, Invalidate}, I) {
1521 f_sendAck;
1522 forward_eviction_to_cpu;
1523 l_popForwardQueue;
1524 }
1525
1526 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1527 ff_sendAckShared;
1528 l_popForwardQueue;
1529 }
1530
1531 // Transitions from Owned
1532 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1533 h_load_hit;
1534 k_popMandatoryQueue;
1535 }
1536
1537 transition(OR, {Load, Ifetch}, O) {
1538 h_load_hit;
1539 k_popMandatoryQueue;
1540 ka_wakeUpAllDependents;
1541 }
1542
1543 transition({O, OR}, Store, OM) {
1544 i_allocateTBE;
1545 b_issueGETX;
1546 p_decrementNumberOfMessagesByOne;
1547 uu_profileMiss;
1548 k_popMandatoryQueue;
1549 }
1550 transition({O, OR}, Flush_line, OM_F) {
1551 i_allocateTBE;
1552 bf_issueGETF;
1553 p_decrementNumberOfMessagesByOne;
1554 uu_profileMiss;
1555 forward_eviction_to_cpu;
1556 gg_deallocateL1CacheBlock;
1557 k_popMandatoryQueue;
1558 }
1559
1560 transition(O, L2_Replacement, OI) {
1561 i_allocateTBE;
1562 d_issuePUT;
1563 forward_eviction_to_cpu;
1564 rr_deallocateL2CacheBlock;
1565 ka_wakeUpAllDependents;
1566 }
1567
1568 transition(O, {Other_GETX, Invalidate}, I) {
1569 e_sendData;
1570 forward_eviction_to_cpu;
1571 l_popForwardQueue;
1572 }
1573
1574 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1575 ee_sendDataShared;
1576 l_popForwardQueue;
1577 }
1578
1579 transition(O, Merged_GETS) {
1580 em_sendDataSharedMultiple;
1581 l_popForwardQueue;
1582 }
1583
1584 // Transitions from Modified
1585 transition({MM, M}, {Load, Ifetch}) {
1586 h_load_hit;
1587 k_popMandatoryQueue;
1588 }
1589
1590 transition(MM, Store) {
1591 hh_store_hit;
1592 k_popMandatoryQueue;
1593 }
1594
1595 transition(MMR, {Load, Ifetch}, MM) {
1596 h_load_hit;
1597 k_popMandatoryQueue;
1598 ka_wakeUpAllDependents;
1599 }
1600
1601 transition(MMR, Store, MM) {
1602 hh_store_hit;
1603 k_popMandatoryQueue;
1604 ka_wakeUpAllDependents;
1605 }
1606
1607 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1608 i_allocateTBE;
1609 bf_issueGETF;
1610 p_decrementNumberOfMessagesByOne;
1611 forward_eviction_to_cpu;
1612 gg_deallocateL1CacheBlock;
1613 k_popMandatoryQueue;
1614 }
1615
1616 transition(MM_F, Block_Ack, MI_F) {
1617 df_issuePUTF;
1618 l_popForwardQueue;
1619 kd_wakeUpDependents;
1620 }
1621
1622 transition(MM, L2_Replacement, MI) {
1623 i_allocateTBE;
1624 d_issuePUT;
1625 forward_eviction_to_cpu;
1626 rr_deallocateL2CacheBlock;
1627 ka_wakeUpAllDependents;
1628 }
1629
1630 transition(MM, {Other_GETX, Invalidate}, I) {
1631 c_sendExclusiveData;
1632 forward_eviction_to_cpu;
1633 l_popForwardQueue;
1634 }
1635
1636 transition(MM, Other_GETS, I) {
1637 c_sendExclusiveData;
1638 forward_eviction_to_cpu;
1639 l_popForwardQueue;
1640 }
1641
1642 transition(MM, NC_DMA_GETS, O) {
1643 ee_sendDataShared;
1644 l_popForwardQueue;
1645 }
1646
1647 transition(MM, Other_GETS_No_Mig, O) {
1648 ee_sendDataShared;
1649 l_popForwardQueue;
1650 }
1651
1652 transition(MM, Merged_GETS, O) {
1653 em_sendDataSharedMultiple;
1654 l_popForwardQueue;
1655 }
1656
1657 // Transitions from Dirty Exclusive
1658 transition(M, Store, MM) {
1659 hh_store_hit;
1660 k_popMandatoryQueue;
1661 }
1662
1663 transition(MR, {Load, Ifetch}, M) {
1664 h_load_hit;
1665 k_popMandatoryQueue;
1666 ka_wakeUpAllDependents;
1667 }
1668
1669 transition(MR, Store, MM) {
1670 hh_store_hit;
1671 k_popMandatoryQueue;
1672 ka_wakeUpAllDependents;
1673 }
1674
1675 transition(M, L2_Replacement, MI) {
1676 i_allocateTBE;
1677 d_issuePUT;
1678 forward_eviction_to_cpu;
1679 rr_deallocateL2CacheBlock;
1680 ka_wakeUpAllDependents;
1681 }
1682
1683 transition(M, {Other_GETX, Invalidate}, I) {
1684 c_sendExclusiveData;
1685 forward_eviction_to_cpu;
1686 l_popForwardQueue;
1687 }
1688
1689 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1690 ee_sendDataShared;
1691 l_popForwardQueue;
1692 }
1693
1694 transition(M, NC_DMA_GETS, O) {
1695 ee_sendDataShared;
1696 l_popForwardQueue;
1697 }
1698
1699 transition(M, Merged_GETS, O) {
1700 em_sendDataSharedMultiple;
1701 l_popForwardQueue;
1702 }
1703
1704 // Transitions from IM
1705
1706 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1707 f_sendAck;
1708 l_popForwardQueue;
1709 }
1710
1711 transition({IM, IM_F, MM_F}, Ack) {
1712 m_decrementNumberOfMessages;
1713 o_checkForCompletion;
1714 n_popResponseQueue;
1715 }
1716
1717 transition(IM, Data, ISM) {
1718 u_writeDataToCache;
1719 m_decrementNumberOfMessages;
1720 o_checkForCompletion;
1721 n_popResponseQueue;
1722 }
1723
1724 transition(IM_F, Data, ISM_F) {
1725 uf_writeDataToCacheTBE;
1726 m_decrementNumberOfMessages;
1727 o_checkForCompletion;
1728 n_popResponseQueue;
1729 }
1730
1731 transition(IM, Exclusive_Data, MM_W) {
1732 u_writeDataToCache;
1733 m_decrementNumberOfMessages;
1734 o_checkForCompletion;
1735 sx_external_store_hit;
1736 n_popResponseQueue;
1737 kd_wakeUpDependents;
1738 }
1739
1740 transition(IM_F, Exclusive_Data, MM_WF) {
1741 uf_writeDataToCacheTBE;
1742 m_decrementNumberOfMessages;
1743 o_checkForCompletion;
1744 n_popResponseQueue;
1745 }
1746
1747 // Transitions from SM
1748 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1749 ff_sendAckShared;
1750 l_popForwardQueue;
1751 }
1752
1753 transition(SM, {Other_GETX, Invalidate}, IM) {
1754 f_sendAck;
1755 forward_eviction_to_cpu;
1756 l_popForwardQueue;
1757 }
1758
1759 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1760 f_sendAck;
1761 forward_eviction_to_cpu;
1762 l_popForwardQueue;
1763 }
1764
1765 transition({SM, SM_F}, Ack) {
1766 m_decrementNumberOfMessages;
1767 o_checkForCompletion;
1768 n_popResponseQueue;
1769 }
1770
1771 transition(SM, {Data, Exclusive_Data}, ISM) {
1772 v_writeDataToCacheVerify;
1773 m_decrementNumberOfMessages;
1774 o_checkForCompletion;
1775 n_popResponseQueue;
1776 }
1777
1778 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1779 vt_writeDataToTBEVerify;
1780 m_decrementNumberOfMessages;
1781 o_checkForCompletion;
1782 n_popResponseQueue;
1783 }
1784
1785 // Transitions from ISM
1786 transition({ISM, ISM_F}, Ack) {
1787 m_decrementNumberOfMessages;
1788 o_checkForCompletion;
1789 n_popResponseQueue;
1790 }
1791
1792 transition(ISM, All_acks_no_sharers, MM) {
1793 sxt_trig_ext_store_hit;
1794 gm_sendUnblockM;
1795 s_deallocateTBE;
1796 j_popTriggerQueue;
1797 kd_wakeUpDependents;
1798 }
1799
1800 transition(ISM_F, All_acks_no_sharers, MI_F) {
1801 df_issuePUTF;
1802 j_popTriggerQueue;
1803 kd_wakeUpDependents;
1804 }
1805
1806 // Transitions from OM
1807
1808 transition(OM, {Other_GETX, Invalidate}, IM) {
1809 e_sendData;
1810 pp_incrementNumberOfMessagesByOne;
1811 forward_eviction_to_cpu;
1812 l_popForwardQueue;
1813 }
1814
1815 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1816 q_sendDataFromTBEToCache;
1817 pp_incrementNumberOfMessagesByOne;
1818 forward_eviction_to_cpu;
1819 l_popForwardQueue;
1820 }
1821
1822 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1823 ee_sendDataShared;
1824 l_popForwardQueue;
1825 }
1826
1827 transition(OM, Merged_GETS) {
1828 em_sendDataSharedMultiple;
1829 l_popForwardQueue;
1830 }
1831
1832 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1833 et_sendDataSharedFromTBE;
1834 l_popForwardQueue;
1835 }
1836
1837 transition(OM_F, Merged_GETS) {
1838 emt_sendDataSharedMultipleFromTBE;
1839 l_popForwardQueue;
1840 }
1841
1842 transition({OM, OM_F}, Ack) {
1843 m_decrementNumberOfMessages;
1844 o_checkForCompletion;
1845 n_popResponseQueue;
1846 }
1847
1848 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1849 sxt_trig_ext_store_hit;
1850 gm_sendUnblockM;
1851 s_deallocateTBE;
1852 j_popTriggerQueue;
1853 kd_wakeUpDependents;
1854 }
1855
1856 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1857 df_issuePUTF;
1858 j_popTriggerQueue;
1859 kd_wakeUpDependents;
1860 }
1861 // Transitions from IS
1862
1863 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1864 f_sendAck;
1865 l_popForwardQueue;
1866 }
1867
1868 transition(IS, Ack) {
1869 m_decrementNumberOfMessages;
1870 o_checkForCompletion;
1871 n_popResponseQueue;
1872 }
1873
1874 transition(IS, Shared_Ack) {
1875 m_decrementNumberOfMessages;
1876 r_setSharerBit;
1877 o_checkForCompletion;
1878 n_popResponseQueue;
1879 }
1880
1881 transition(IS, Data, SS) {
1882 u_writeDataToCache;
1883 m_decrementNumberOfMessages;
1884 o_checkForCompletion;
1885 hx_external_load_hit;
1886 uo_updateCurrentOwner;
1887 n_popResponseQueue;
1888 kd_wakeUpDependents;
1889 }
1890
1891 transition(IS, Exclusive_Data, M_W) {
1892 u_writeDataToCache;
1893 m_decrementNumberOfMessages;
1894 o_checkForCompletion;
1895 hx_external_load_hit;
1896 n_popResponseQueue;
1897 kd_wakeUpDependents;
1898 }
1899
1900 transition(IS, Shared_Data, SS) {
1901 u_writeDataToCache;
1902 r_setSharerBit;
1903 m_decrementNumberOfMessages;
1904 o_checkForCompletion;
1905 hx_external_load_hit;
1906 uo_updateCurrentOwner;
1907 n_popResponseQueue;
1908 kd_wakeUpDependents;
1909 }
1910
1911 // Transitions from SS
1912
1913 transition(SS, Ack) {
1914 m_decrementNumberOfMessages;
1915 o_checkForCompletion;
1916 n_popResponseQueue;
1917 }
1918
1919 transition(SS, Shared_Ack) {
1920 m_decrementNumberOfMessages;
1921 r_setSharerBit;
1922 o_checkForCompletion;
1923 n_popResponseQueue;
1924 }
1925
1926 transition(SS, All_acks, S) {
1927 gs_sendUnblockS;
1928 s_deallocateTBE;
1929 j_popTriggerQueue;
1930 kd_wakeUpDependents;
1931 }
1932
1933 transition(SS, All_acks_no_sharers, S) {
1934 // Note: The directory might still be the owner, so that is why we go to S
1935 gs_sendUnblockS;
1936 s_deallocateTBE;
1937 j_popTriggerQueue;
1938 kd_wakeUpDependents;
1939 }
1940
1941 // Transitions from MM_W
1942
1943 transition(MM_W, Store) {
1944 hh_store_hit;
1945 k_popMandatoryQueue;
1946 }
1947
1948 transition({MM_W, MM_WF}, Ack) {
1949 m_decrementNumberOfMessages;
1950 o_checkForCompletion;
1951 n_popResponseQueue;
1952 }
1953
1954 transition(MM_W, All_acks_no_sharers, MM) {
1955 gm_sendUnblockM;
1956 s_deallocateTBE;
1957 j_popTriggerQueue;
1958 kd_wakeUpDependents;
1959 }
1960
1961 transition(MM_WF, All_acks_no_sharers, MI_F) {
1962 df_issuePUTF;
1963 j_popTriggerQueue;
1964 kd_wakeUpDependents;
1965 }
1966 // Transitions from M_W
1967
1968 transition(M_W, Store, MM_W) {
1969 hh_store_hit;
1970 k_popMandatoryQueue;
1971 }
1972
1973 transition(M_W, Ack) {
1974 m_decrementNumberOfMessages;
1975 o_checkForCompletion;
1976 n_popResponseQueue;
1977 }
1978
1979 transition(M_W, All_acks_no_sharers, M) {
1980 gm_sendUnblockM;
1981 s_deallocateTBE;
1982 j_popTriggerQueue;
1983 kd_wakeUpDependents;
1984 }
1985
1986 // Transitions from OI/MI
1987
1988 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1989 q_sendDataFromTBEToCache;
1990 l_popForwardQueue;
1991 }
1992
1993 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1994 sq_sendSharedDataFromTBEToCache;
1995 l_popForwardQueue;
1996 }
1997
1998 transition({OI, MI}, Merged_GETS, OI) {
1999 qm_sendDataFromTBEToCache;
2000 l_popForwardQueue;
2001 }
2002
2003 transition(MI, Writeback_Ack, I) {
2004 t_sendExclusiveDataFromTBEToMemory;
2005 s_deallocateTBE;
2006 l_popForwardQueue;
2007 kd_wakeUpDependents;
2008 }
2009
2010 transition(MI_F, Writeback_Ack, I) {
2011 hh_flush_hit;
2012 t_sendExclusiveDataFromTBEToMemory;
2013 s_deallocateTBE;
2014 l_popForwardQueue;
2015 kd_wakeUpDependents;
2016 }
2017
2018 transition(OI, Writeback_Ack, I) {
2019 qq_sendDataFromTBEToMemory;
2020 s_deallocateTBE;
2021 l_popForwardQueue;
2022 kd_wakeUpDependents;
2023 }
2024
2025 // Transitions from II
2026 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2027 f_sendAck;
2028 l_popForwardQueue;
2029 }
2030
2031 transition(II, Writeback_Ack, I) {
2032 g_sendUnblock;
2033 s_deallocateTBE;
2034 l_popForwardQueue;
2035 kd_wakeUpDependents;
2036 }
2037
2038 transition(II, Writeback_Nack, I) {
2039 s_deallocateTBE;
2040 l_popForwardQueue;
2041 kd_wakeUpDependents;
2042 }
2043
2044 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2045 ct_sendExclusiveDataFromTBE;
2046 pp_incrementNumberOfMessagesByOne;
2047 l_popForwardQueue;
2048 }
2049
2050 transition(MM_F, Other_GETS, IM_F) {
2051 ct_sendExclusiveDataFromTBE;
2052 pp_incrementNumberOfMessagesByOne;
2053 l_popForwardQueue;
2054 }
2055
2056 transition(MM_F, NC_DMA_GETS, OM_F) {
2057 sq_sendSharedDataFromTBEToCache;
2058 l_popForwardQueue;
2059 }
2060
2061 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2062 et_sendDataSharedFromTBE;
2063 l_popForwardQueue;
2064 }
2065
2066 transition(MM_F, Merged_GETS, OM_F) {
2067 emt_sendDataSharedMultipleFromTBE;
2068 l_popForwardQueue;
2069 }
2070 }