ruby: rename template_hack to template
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true,
45 bool send_evictions
46 {
47
48 // NETWORK BUFFERS
49 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
50 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
51 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
52
53 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
54 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
55
56
57 // STATES
58 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
59 // Base states
60 I, AccessPermission:Invalid, desc="Idle";
61 S, AccessPermission:Read_Only, desc="Shared";
62 O, AccessPermission:Read_Only, desc="Owned";
63 M, AccessPermission:Read_Only, desc="Modified (dirty)";
64 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
65
66 // Base states, locked and ready to service the mandatory queue
67 IR, AccessPermission:Invalid, desc="Idle";
68 SR, AccessPermission:Read_Only, desc="Shared";
69 OR, AccessPermission:Read_Only, desc="Owned";
70 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
71 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73 // Transient States
74 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
75 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
76 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
77 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
78 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
79 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
80 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
81 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
82 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
83 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
84 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
85 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
86 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
87 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
88 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
89 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
90
91 //Transition States Related to Flushing
92 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
93 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
94 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
95 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
96 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
97 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
98 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
99 }
100
101 // EVENTS
102 enumeration(Event, desc="Cache events") {
103 Load, desc="Load request from the processor";
104 Ifetch, desc="I-fetch request from the processor";
105 Store, desc="Store request from the processor";
106 L2_Replacement, desc="L2 Replacement";
107 L1_to_L2, desc="L1 to L2 transfer";
108 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
109 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
110 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
111
112 // Requests
113 Other_GETX, desc="A GetX from another processor";
114 Other_GETS, desc="A GetS from another processor";
115 Merged_GETS, desc="A Merged GetS from another processor";
116 Other_GETS_No_Mig, desc="A GetS from another processor";
117 NC_DMA_GETS, desc="special GetS when only DMA exists";
118 Invalidate, desc="Invalidate block";
119
120 // Responses
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Data, desc="Received a data message";
124 Shared_Data, desc="Received a data message, responder has a shared copy";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 Writeback_Ack, desc="Writeback O.K. from directory";
128 Writeback_Nack, desc="Writeback not O.K. from directory";
129
130 // Triggers
131 All_acks, desc="Received all required data and message acks";
132 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
133
134 // For Flush
135 Flush_line, desc="flush the cache line from all caches";
136 Block_Ack, desc="the directory is blocked and ready for the flush";
137 }
138
139 // TYPES
140
141 // STRUCTURE DEFINITIONS
142
143 MessageBuffer mandatoryQueue, ordered="false";
144
145 // CacheEntry
146 structure(Entry, desc="...", interface="AbstractCacheEntry") {
147 State CacheState, desc="cache state";
148 bool Dirty, desc="Is the data dirty (different than memory)?";
149 DataBlock DataBlk, desc="data for the block";
150 bool FromL2, default="false", desc="block just moved from L2";
151 bool AtomicAccessed, default="false", desc="block just moved from L2";
152 }
153
154 // TBE fields
155 structure(TBE, desc="...") {
156 State TBEState, desc="Transient state";
157 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
158 bool Dirty, desc="Is the data dirty (different than memory)?";
159 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
160 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
161 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
162 MachineID LastResponder, desc="last machine to send a response for this request";
163 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
164 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
165 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
166 Time FirstResponseTime, default="0", desc="the time the first response was received";
167 }
168
169 structure(TBETable, external="yes") {
170 TBE lookup(Address);
171 void allocate(Address);
172 void deallocate(Address);
173 bool isPresent(Address);
174 }
175
176 TBETable TBEs, template="<L1Cache_TBE>";
177
178 void set_cache_entry(AbstractCacheEntry b);
179 void unset_cache_entry();
180 void set_tbe(TBE b);
181 void unset_tbe();
182 void wakeUpAllBuffers();
183 void wakeUpBuffers(Address a);
184
185 Entry getCacheEntry(Address address), return_by_pointer="yes" {
186 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
187 if(is_valid(L2cache_entry)) {
188 return L2cache_entry;
189 }
190
191 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
192 if(is_valid(L1Dcache_entry)) {
193 return L1Dcache_entry;
194 }
195
196 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
197 return L1Icache_entry;
198 }
199
200 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
201 return getCacheEntry(addr).DataBlk;
202 }
203
204 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
205 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
206 return L2cache_entry;
207 }
208
209 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
210 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
211 return L1Dcache_entry;
212 }
213
214 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
215 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
216 return L1Icache_entry;
217 }
218
219 State getState(TBE tbe, Entry cache_entry, Address addr) {
220 if(is_valid(tbe)) {
221 return tbe.TBEState;
222 } else if (is_valid(cache_entry)) {
223 return cache_entry.CacheState;
224 }
225 return State:I;
226 }
227
228 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
229 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
230 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
231 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
232
233 if (is_valid(tbe)) {
234 tbe.TBEState := state;
235 }
236
237 if (is_valid(cache_entry)) {
238 cache_entry.CacheState := state;
239 }
240 }
241
242 AccessPermission getAccessPermission(Address addr) {
243 TBE tbe := TBEs[addr];
244 if(is_valid(tbe)) {
245 return L1Cache_State_to_permission(tbe.TBEState);
246 }
247
248 Entry cache_entry := getCacheEntry(addr);
249 if(is_valid(cache_entry)) {
250 return L1Cache_State_to_permission(cache_entry.CacheState);
251 }
252
253 return AccessPermission:NotPresent;
254 }
255
256 void setAccessPermission(Entry cache_entry, Address addr, State state) {
257 if (is_valid(cache_entry)) {
258 cache_entry.changePermission(L1Cache_State_to_permission(state));
259 }
260 }
261
262 Event mandatory_request_type_to_event(RubyRequestType type) {
263 if (type == RubyRequestType:LD) {
264 return Event:Load;
265 } else if (type == RubyRequestType:IFETCH) {
266 return Event:Ifetch;
267 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
268 return Event:Store;
269 } else if ((type == RubyRequestType:FLUSH)) {
270 return Event:Flush_line;
271 } else {
272 error("Invalid RubyRequestType");
273 }
274 }
275
276 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
277 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
278 //
279 // NOTE direct local hits should not call this
280 //
281 return GenericMachineType:L1Cache_wCC;
282 } else {
283 return ConvertMachToGenericMach(machineIDToMachineType(sender));
284 }
285 }
286
287 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
288 if (is_valid(cache_entry) && cache_entry.FromL2) {
289 cache_entry.FromL2 := false;
290 return GenericMachineType:L2Cache;
291 } else {
292 return GenericMachineType:L1Cache;
293 }
294 }
295
296 bool IsAtomicAccessed(Entry cache_entry) {
297 assert(is_valid(cache_entry));
298 return cache_entry.AtomicAccessed;
299 }
300
301 MessageBuffer triggerQueue, ordered="false";
302
303 // ** OUT_PORTS **
304
305 out_port(requestNetwork_out, RequestMsg, requestFromCache);
306 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
307 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
308 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
309
310 // ** IN_PORTS **
311
312 // Trigger Queue
313 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
314 if (triggerQueue_in.isReady()) {
315 peek(triggerQueue_in, TriggerMsg) {
316
317 Entry cache_entry := getCacheEntry(in_msg.Address);
318 TBE tbe := TBEs[in_msg.Address];
319
320 if (in_msg.Type == TriggerType:L2_to_L1) {
321 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
322 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
323 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
324 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
325 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
326 } else {
327 error("Unexpected message");
328 }
329 }
330 }
331 }
332
333 // Nothing from the unblock network
334
335 // Response Network
336 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
337 if (responseToCache_in.isReady()) {
338 peek(responseToCache_in, ResponseMsg, block_on="Address") {
339
340 Entry cache_entry := getCacheEntry(in_msg.Address);
341 TBE tbe := TBEs[in_msg.Address];
342
343 if (in_msg.Type == CoherenceResponseType:ACK) {
344 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
345 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
346 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
347 } else if (in_msg.Type == CoherenceResponseType:DATA) {
348 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
349 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
350 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
351 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
352 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
353 } else {
354 error("Unexpected message");
355 }
356 }
357 }
358 }
359
360 // Forward Network
361 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
362 if (forwardToCache_in.isReady()) {
363 peek(forwardToCache_in, RequestMsg, block_on="Address") {
364
365 Entry cache_entry := getCacheEntry(in_msg.Address);
366 TBE tbe := TBEs[in_msg.Address];
367
368 if ((in_msg.Type == CoherenceRequestType:GETX) || (in_msg.Type == CoherenceRequestType:GETF)) {
369 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
370 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
371 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
372 } else if (in_msg.Type == CoherenceRequestType:GETS) {
373 if (machineCount(MachineType:L1Cache) > 1) {
374 if (is_valid(cache_entry)) {
375 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
376 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
377 } else {
378 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
379 }
380 } else {
381 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
382 }
383 } else {
384 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
385 }
386 } else if (in_msg.Type == CoherenceRequestType:INV) {
387 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
388 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
389 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
390 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
391 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
392 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
393 trigger(Event:Block_Ack, in_msg.Address, cache_entry, tbe);
394 } else {
395 error("Unexpected message");
396 }
397 }
398 }
399 }
400
401 // Nothing from the request network
402
403 // Mandatory Queue
404 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
405 if (mandatoryQueue_in.isReady()) {
406 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
407
408 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
409 TBE tbe := TBEs[in_msg.LineAddress];
410
411 if (in_msg.Type == RubyRequestType:IFETCH) {
412 // ** INSTRUCTION ACCESS ***
413
414 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
415 if (is_valid(L1Icache_entry)) {
416 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
417 trigger(mandatory_request_type_to_event(in_msg.Type),
418 in_msg.LineAddress, L1Icache_entry, tbe);
419 } else {
420 // Check to see if it is in the OTHER L1
421 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
422 if (is_valid(L1Dcache_entry)) {
423 // The block is in the wrong L1, try to write it to the L2
424 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
425 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
426 } else {
427 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
428 trigger(Event:L2_Replacement,
429 l2_victim_addr,
430 getL2CacheEntry(l2_victim_addr),
431 TBEs[l2_victim_addr]);
432 }
433 }
434
435 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
436 // L1 does't have the line, but we have space for it in the L1
437
438 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
439 if (is_valid(L2cache_entry)) {
440 // L2 has it (maybe not with the right permissions)
441 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
442 L2cache_entry, tbe);
443 } else {
444 // We have room, the L2 doesn't have it, so the L1 fetches the line
445 trigger(mandatory_request_type_to_event(in_msg.Type),
446 in_msg.LineAddress, L1Icache_entry, tbe);
447 }
448 } else {
449 // No room in the L1, so we need to make room
450 Address l1i_victim_addr := L1IcacheMemory.cacheProbe(in_msg.LineAddress);
451 if (L2cacheMemory.cacheAvail(l1i_victim_addr)) {
452 // The L2 has room, so we move the line from the L1 to the L2
453 trigger(Event:L1_to_L2,
454 l1i_victim_addr,
455 getL1ICacheEntry(l1i_victim_addr),
456 TBEs[l1i_victim_addr]);
457 } else {
458 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1i_victim_addr);
459 // The L2 does not have room, so we replace a line from the L2
460 trigger(Event:L2_Replacement,
461 l2_victim_addr,
462 getL2CacheEntry(l2_victim_addr),
463 TBEs[l2_victim_addr]);
464 }
465 }
466 }
467 } else {
468 // *** DATA ACCESS ***
469
470 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
471 if (is_valid(L1Dcache_entry)) {
472 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
473 trigger(mandatory_request_type_to_event(in_msg.Type),
474 in_msg.LineAddress, L1Dcache_entry, tbe);
475 } else {
476
477 // Check to see if it is in the OTHER L1
478 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
479 if (is_valid(L1Icache_entry)) {
480 // The block is in the wrong L1, try to write it to the L2
481 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
482 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
483 } else {
484 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
485 trigger(Event:L2_Replacement,
486 l2_victim_addr,
487 getL2CacheEntry(l2_victim_addr),
488 TBEs[l2_victim_addr]);
489 }
490 }
491
492 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
493 // L1 does't have the line, but we have space for it in the L1
494 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
495 if (is_valid(L2cache_entry)) {
496 // L2 has it (maybe not with the right permissions)
497 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
498 L2cache_entry, tbe);
499 } else {
500 // We have room, the L2 doesn't have it, so the L1 fetches the line
501 trigger(mandatory_request_type_to_event(in_msg.Type),
502 in_msg.LineAddress, L1Dcache_entry, tbe);
503 }
504 } else {
505 // No room in the L1, so we need to make room
506 Address l1d_victim_addr := L1DcacheMemory.cacheProbe(in_msg.LineAddress);
507 if (L2cacheMemory.cacheAvail(l1d_victim_addr)) {
508 // The L2 has room, so we move the line from the L1 to the L2
509 trigger(Event:L1_to_L2,
510 l1d_victim_addr,
511 getL1DCacheEntry(l1d_victim_addr),
512 TBEs[l1d_victim_addr]);
513 } else {
514 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1d_victim_addr);
515 // The L2 does not have room, so we replace a line from the L2
516 trigger(Event:L2_Replacement,
517 l2_victim_addr,
518 getL2CacheEntry(l2_victim_addr),
519 TBEs[l2_victim_addr]);
520 }
521 }
522 }
523 }
524 }
525 }
526 }
527
528 // ACTIONS
529
530 action(a_issueGETS, "a", desc="Issue GETS") {
531 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
532 assert(is_valid(tbe));
533 out_msg.Address := address;
534 out_msg.Type := CoherenceRequestType:GETS;
535 out_msg.Requestor := machineID;
536 out_msg.Destination.add(map_Address_to_Directory(address));
537 out_msg.MessageSize := MessageSizeType:Request_Control;
538 out_msg.InitialRequestTime := get_time();
539 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
540 }
541 }
542
543 action(b_issueGETX, "b", desc="Issue GETX") {
544 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
545 assert(is_valid(tbe));
546 out_msg.Address := address;
547 out_msg.Type := CoherenceRequestType:GETX;
548 out_msg.Requestor := machineID;
549 out_msg.Destination.add(map_Address_to_Directory(address));
550 out_msg.MessageSize := MessageSizeType:Request_Control;
551 out_msg.InitialRequestTime := get_time();
552 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
553 }
554 }
555
556 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
557 if (machineCount(MachineType:L1Cache) > 1) {
558 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
559 assert(is_valid(tbe));
560 out_msg.Address := address;
561 out_msg.Type := CoherenceRequestType:GETX;
562 out_msg.Requestor := machineID;
563 out_msg.Destination.add(map_Address_to_Directory(address));
564 out_msg.MessageSize := MessageSizeType:Request_Control;
565 out_msg.InitialRequestTime := get_time();
566 }
567 }
568 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
569 }
570
571 action(bf_issueGETF, "bf", desc="Issue GETF") {
572 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
573 assert(is_valid(tbe));
574 out_msg.Address := address;
575 out_msg.Type := CoherenceRequestType:GETF;
576 out_msg.Requestor := machineID;
577 out_msg.Destination.add(map_Address_to_Directory(address));
578 out_msg.MessageSize := MessageSizeType:Request_Control;
579 out_msg.InitialRequestTime := get_time();
580 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
581 }
582 }
583
584 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
585 peek(forwardToCache_in, RequestMsg) {
586 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
587 assert(is_valid(cache_entry));
588 out_msg.Address := address;
589 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
590 out_msg.Sender := machineID;
591 out_msg.Destination.add(in_msg.Requestor);
592 out_msg.DataBlk := cache_entry.DataBlk;
593 out_msg.Dirty := cache_entry.Dirty;
594 if (in_msg.DirectedProbe) {
595 out_msg.Acks := machineCount(MachineType:L1Cache);
596 } else {
597 out_msg.Acks := 2;
598 }
599 out_msg.SilentAcks := in_msg.SilentAcks;
600 out_msg.MessageSize := MessageSizeType:Response_Data;
601 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
602 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
603 }
604 }
605 }
606
607 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
608 peek(forwardToCache_in, RequestMsg) {
609 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
610 assert(is_valid(tbe));
611 out_msg.Address := address;
612 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
613 out_msg.Sender := machineID;
614 out_msg.Destination.add(in_msg.Requestor);
615 out_msg.DataBlk := tbe.DataBlk;
616 out_msg.Dirty := tbe.Dirty;
617 if (in_msg.DirectedProbe) {
618 out_msg.Acks := machineCount(MachineType:L1Cache);
619 } else {
620 out_msg.Acks := 2;
621 }
622 out_msg.SilentAcks := in_msg.SilentAcks;
623 out_msg.MessageSize := MessageSizeType:Response_Data;
624 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
625 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
626 }
627 }
628 }
629
630 action(d_issuePUT, "d", desc="Issue PUT") {
631 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
632 out_msg.Address := address;
633 out_msg.Type := CoherenceRequestType:PUT;
634 out_msg.Requestor := machineID;
635 out_msg.Destination.add(map_Address_to_Directory(address));
636 out_msg.MessageSize := MessageSizeType:Writeback_Control;
637 }
638 }
639
640 action(df_issuePUTF, "df", desc="Issue PUTF") {
641 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
642 out_msg.Address := address;
643 out_msg.Type := CoherenceRequestType:PUTF;
644 out_msg.Requestor := machineID;
645 out_msg.Destination.add(map_Address_to_Directory(address));
646 out_msg.MessageSize := MessageSizeType:Writeback_Control;
647 }
648 }
649
650 action(e_sendData, "e", desc="Send data from cache to requestor") {
651 peek(forwardToCache_in, RequestMsg) {
652 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
653 assert(is_valid(cache_entry));
654 out_msg.Address := address;
655 out_msg.Type := CoherenceResponseType:DATA;
656 out_msg.Sender := machineID;
657 out_msg.Destination.add(in_msg.Requestor);
658 out_msg.DataBlk := cache_entry.DataBlk;
659 out_msg.Dirty := cache_entry.Dirty;
660 if (in_msg.DirectedProbe) {
661 out_msg.Acks := machineCount(MachineType:L1Cache);
662 } else {
663 out_msg.Acks := 2;
664 }
665 out_msg.SilentAcks := in_msg.SilentAcks;
666 out_msg.MessageSize := MessageSizeType:Response_Data;
667 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
668 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
669 }
670 }
671 }
672
673 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
674 peek(forwardToCache_in, RequestMsg) {
675 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
676 assert(is_valid(cache_entry));
677 out_msg.Address := address;
678 out_msg.Type := CoherenceResponseType:DATA_SHARED;
679 out_msg.Sender := machineID;
680 out_msg.Destination.add(in_msg.Requestor);
681 out_msg.DataBlk := cache_entry.DataBlk;
682 out_msg.Dirty := cache_entry.Dirty;
683 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
684 if (in_msg.DirectedProbe) {
685 out_msg.Acks := machineCount(MachineType:L1Cache);
686 } else {
687 out_msg.Acks := 2;
688 }
689 out_msg.SilentAcks := in_msg.SilentAcks;
690 out_msg.MessageSize := MessageSizeType:Response_Data;
691 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
692 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
693 }
694 }
695 }
696
697 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
698 peek(forwardToCache_in, RequestMsg) {
699 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
700 assert(is_valid(tbe));
701 out_msg.Address := address;
702 out_msg.Type := CoherenceResponseType:DATA_SHARED;
703 out_msg.Sender := machineID;
704 out_msg.Destination.add(in_msg.Requestor);
705 out_msg.DataBlk := tbe.DataBlk;
706 out_msg.Dirty := tbe.Dirty;
707 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
708 if (in_msg.DirectedProbe) {
709 out_msg.Acks := machineCount(MachineType:L1Cache);
710 } else {
711 out_msg.Acks := 2;
712 }
713 out_msg.SilentAcks := in_msg.SilentAcks;
714 out_msg.MessageSize := MessageSizeType:Response_Data;
715 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
716 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
717 }
718 }
719 }
720
721 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
722 peek(forwardToCache_in, RequestMsg) {
723 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
724 assert(is_valid(cache_entry));
725 out_msg.Address := address;
726 out_msg.Type := CoherenceResponseType:DATA_SHARED;
727 out_msg.Sender := machineID;
728 out_msg.Destination := in_msg.MergedRequestors;
729 out_msg.DataBlk := cache_entry.DataBlk;
730 out_msg.Dirty := cache_entry.Dirty;
731 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
732 out_msg.Acks := machineCount(MachineType:L1Cache);
733 out_msg.SilentAcks := in_msg.SilentAcks;
734 out_msg.MessageSize := MessageSizeType:Response_Data;
735 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
736 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
737 }
738 }
739 }
740
741 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
742 peek(forwardToCache_in, RequestMsg) {
743 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
744 assert(is_valid(tbe));
745 out_msg.Address := address;
746 out_msg.Type := CoherenceResponseType:DATA_SHARED;
747 out_msg.Sender := machineID;
748 out_msg.Destination := in_msg.MergedRequestors;
749 out_msg.DataBlk := tbe.DataBlk;
750 out_msg.Dirty := tbe.Dirty;
751 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
752 out_msg.Acks := machineCount(MachineType:L1Cache);
753 out_msg.SilentAcks := in_msg.SilentAcks;
754 out_msg.MessageSize := MessageSizeType:Response_Data;
755 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
756 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
757 }
758 }
759 }
760
761 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
762 peek(forwardToCache_in, RequestMsg) {
763 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
764 out_msg.Address := address;
765 out_msg.Type := CoherenceResponseType:ACK;
766 out_msg.Sender := machineID;
767 out_msg.Destination.add(in_msg.Requestor);
768 out_msg.Acks := 1;
769 out_msg.SilentAcks := in_msg.SilentAcks;
770 assert(in_msg.DirectedProbe == false);
771 out_msg.MessageSize := MessageSizeType:Response_Control;
772 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
773 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
774 }
775 }
776 }
777
778 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
779 peek(forwardToCache_in, RequestMsg) {
780 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
781 out_msg.Address := address;
782 out_msg.Type := CoherenceResponseType:ACK_SHARED;
783 out_msg.Sender := machineID;
784 out_msg.Destination.add(in_msg.Requestor);
785 out_msg.Acks := 1;
786 out_msg.SilentAcks := in_msg.SilentAcks;
787 assert(in_msg.DirectedProbe == false);
788 out_msg.MessageSize := MessageSizeType:Response_Control;
789 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
790 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
791 }
792 }
793 }
794
795 action(g_sendUnblock, "g", desc="Send unblock to memory") {
796 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
797 out_msg.Address := address;
798 out_msg.Type := CoherenceResponseType:UNBLOCK;
799 out_msg.Sender := machineID;
800 out_msg.Destination.add(map_Address_to_Directory(address));
801 out_msg.MessageSize := MessageSizeType:Unblock_Control;
802 }
803 }
804
805 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
806 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
807 out_msg.Address := address;
808 out_msg.Type := CoherenceResponseType:UNBLOCKM;
809 out_msg.Sender := machineID;
810 out_msg.Destination.add(map_Address_to_Directory(address));
811 out_msg.MessageSize := MessageSizeType:Unblock_Control;
812 }
813 }
814
815 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
816 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
817 assert(is_valid(tbe));
818 out_msg.Address := address;
819 out_msg.Type := CoherenceResponseType:UNBLOCKS;
820 out_msg.Sender := machineID;
821 out_msg.CurOwner := tbe.CurOwner;
822 out_msg.Destination.add(map_Address_to_Directory(address));
823 out_msg.MessageSize := MessageSizeType:Unblock_Control;
824 }
825 }
826
827 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
828 assert(is_valid(cache_entry));
829 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
830 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
831 cache_entry.DataBlk);
832 }
833
834 action(hx_external_load_hit, "hx", desc="load required external msgs") {
835 assert(is_valid(cache_entry));
836 assert(is_valid(tbe));
837 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
838 peek(responseToCache_in, ResponseMsg) {
839
840 sequencer.readCallback(address,
841 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
842 cache_entry.DataBlk,
843 tbe.InitialRequestTime,
844 tbe.ForwardRequestTime,
845 tbe.FirstResponseTime);
846 }
847 }
848
849 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
850 assert(is_valid(cache_entry));
851 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
852 peek(mandatoryQueue_in, RubyRequest) {
853 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
854 cache_entry.DataBlk);
855
856 cache_entry.Dirty := true;
857 if (in_msg.Type == RubyRequestType:ATOMIC) {
858 cache_entry.AtomicAccessed := true;
859 }
860 }
861 }
862
863 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
864 assert(is_valid(tbe));
865 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
866 sequencer.writeCallback(address, GenericMachineType:L1Cache,tbe.DataBlk);
867 }
868
869 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
870 assert(is_valid(cache_entry));
871 assert(is_valid(tbe));
872 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
873 peek(responseToCache_in, ResponseMsg) {
874
875 sequencer.writeCallback(address,
876 getNondirectHitMachType(address, in_msg.Sender),
877 cache_entry.DataBlk,
878 tbe.InitialRequestTime,
879 tbe.ForwardRequestTime,
880 tbe.FirstResponseTime);
881 }
882 cache_entry.Dirty := true;
883 }
884
885 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
886 assert(is_valid(cache_entry));
887 assert(is_valid(tbe));
888 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
889
890 sequencer.writeCallback(address,
891 getNondirectHitMachType(address, tbe.LastResponder),
892 cache_entry.DataBlk,
893 tbe.InitialRequestTime,
894 tbe.ForwardRequestTime,
895 tbe.FirstResponseTime);
896
897 cache_entry.Dirty := true;
898 }
899
900 action(i_allocateTBE, "i", desc="Allocate TBE") {
901 check_allocate(TBEs);
902 assert(is_valid(cache_entry));
903 TBEs.allocate(address);
904 set_tbe(TBEs[address]);
905 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
906 tbe.Dirty := cache_entry.Dirty;
907 tbe.Sharers := false;
908 }
909
910 action(it_allocateTBE, "it", desc="Allocate TBE") {
911 check_allocate(TBEs);
912 TBEs.allocate(address);
913 set_tbe(TBEs[address]);
914 tbe.Dirty := false;
915 tbe.Sharers := false;
916 }
917
918 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
919 triggerQueue_in.dequeue();
920 }
921
922 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
923 mandatoryQueue_in.dequeue();
924 }
925
926 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
927 forwardToCache_in.dequeue();
928 }
929
930 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
931 assert(is_valid(cache_entry));
932 assert(is_valid(tbe));
933 cache_entry.Dirty := tbe.Dirty;
934 cache_entry.DataBlk := tbe.DataBlk;
935 }
936
937 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
938 assert(is_valid(cache_entry));
939 assert(is_valid(tbe));
940 cache_entry.Dirty := tbe.Dirty;
941 cache_entry.DataBlk := tbe.DataBlk;
942 cache_entry.FromL2 := true;
943 }
944
945 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
946 peek(responseToCache_in, ResponseMsg) {
947 assert(in_msg.Acks >= 0);
948 assert(is_valid(tbe));
949 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
950 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
951 if (tbe.AppliedSilentAcks == false) {
952 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
953 tbe.AppliedSilentAcks := true;
954 }
955 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
956 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
957 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
958 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
959 APPEND_TRANSITION_COMMENT(in_msg.Sender);
960 tbe.LastResponder := in_msg.Sender;
961 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
962 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
963 }
964 if (in_msg.InitialRequestTime != zero_time()) {
965 tbe.InitialRequestTime := in_msg.InitialRequestTime;
966 }
967 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
968 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
969 }
970 if (in_msg.ForwardRequestTime != zero_time()) {
971 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
972 }
973 if (tbe.FirstResponseTime == zero_time()) {
974 tbe.FirstResponseTime := get_time();
975 }
976 }
977 }
978 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
979 peek(responseToCache_in, ResponseMsg) {
980 assert(is_valid(tbe));
981 tbe.CurOwner := in_msg.Sender;
982 }
983 }
984
985 action(n_popResponseQueue, "n", desc="Pop response queue") {
986 responseToCache_in.dequeue();
987 }
988
989 action(ll_L2toL1Transfer, "ll", desc="") {
990 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
991 out_msg.Address := address;
992 out_msg.Type := TriggerType:L2_to_L1;
993 }
994 }
995
996 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
997 assert(is_valid(tbe));
998 if (tbe.NumPendingMsgs == 0) {
999 enqueue(triggerQueue_out, TriggerMsg) {
1000 out_msg.Address := address;
1001 if (tbe.Sharers) {
1002 out_msg.Type := TriggerType:ALL_ACKS;
1003 } else {
1004 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1005 }
1006 }
1007 }
1008 }
1009
1010 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1011 assert(is_valid(tbe));
1012 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1013 }
1014
1015 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1016 assert(is_valid(tbe));
1017 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1018 }
1019
1020 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1021 peek(forwardToCache_in, RequestMsg) {
1022 assert(in_msg.Requestor != machineID);
1023 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1024 assert(is_valid(tbe));
1025 out_msg.Address := address;
1026 out_msg.Type := CoherenceResponseType:DATA;
1027 out_msg.Sender := machineID;
1028 out_msg.Destination.add(in_msg.Requestor);
1029 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1030 out_msg.DataBlk := tbe.DataBlk;
1031 out_msg.Dirty := tbe.Dirty;
1032 if (in_msg.DirectedProbe) {
1033 out_msg.Acks := machineCount(MachineType:L1Cache);
1034 } else {
1035 out_msg.Acks := 2;
1036 }
1037 out_msg.SilentAcks := in_msg.SilentAcks;
1038 out_msg.MessageSize := MessageSizeType:Response_Data;
1039 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1040 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1041 }
1042 }
1043 }
1044
1045 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1046 peek(forwardToCache_in, RequestMsg) {
1047 assert(in_msg.Requestor != machineID);
1048 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1049 assert(is_valid(tbe));
1050 out_msg.Address := address;
1051 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1052 out_msg.Sender := machineID;
1053 out_msg.Destination.add(in_msg.Requestor);
1054 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1055 out_msg.DataBlk := tbe.DataBlk;
1056 out_msg.Dirty := tbe.Dirty;
1057 if (in_msg.DirectedProbe) {
1058 out_msg.Acks := machineCount(MachineType:L1Cache);
1059 } else {
1060 out_msg.Acks := 2;
1061 }
1062 out_msg.SilentAcks := in_msg.SilentAcks;
1063 out_msg.MessageSize := MessageSizeType:Response_Data;
1064 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1065 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1066 }
1067 }
1068 }
1069
1070 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1071 peek(forwardToCache_in, RequestMsg) {
1072 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1073 assert(is_valid(tbe));
1074 out_msg.Address := address;
1075 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1076 out_msg.Sender := machineID;
1077 out_msg.Destination := in_msg.MergedRequestors;
1078 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1079 out_msg.DataBlk := tbe.DataBlk;
1080 out_msg.Dirty := tbe.Dirty;
1081 out_msg.Acks := machineCount(MachineType:L1Cache);
1082 out_msg.SilentAcks := in_msg.SilentAcks;
1083 out_msg.MessageSize := MessageSizeType:Response_Data;
1084 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1085 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1086 }
1087 }
1088 }
1089
1090 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1091 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1092 assert(is_valid(tbe));
1093 out_msg.Address := address;
1094 out_msg.Sender := machineID;
1095 out_msg.Destination.add(map_Address_to_Directory(address));
1096 out_msg.Dirty := tbe.Dirty;
1097 if (tbe.Dirty) {
1098 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1099 out_msg.DataBlk := tbe.DataBlk;
1100 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1101 } else {
1102 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1103 // NOTE: in a real system this would not send data. We send
1104 // data here only so we can check it at the memory
1105 out_msg.DataBlk := tbe.DataBlk;
1106 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1107 }
1108 }
1109 }
1110
1111 action(r_setSharerBit, "r", desc="We saw other sharers") {
1112 assert(is_valid(tbe));
1113 tbe.Sharers := true;
1114 }
1115
1116 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1117 TBEs.deallocate(address);
1118 unset_tbe();
1119 }
1120
1121 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1122 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1123 assert(is_valid(tbe));
1124 out_msg.Address := address;
1125 out_msg.Sender := machineID;
1126 out_msg.Destination.add(map_Address_to_Directory(address));
1127 out_msg.DataBlk := tbe.DataBlk;
1128 out_msg.Dirty := tbe.Dirty;
1129 if (tbe.Dirty) {
1130 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1131 out_msg.DataBlk := tbe.DataBlk;
1132 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1133 } else {
1134 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1135 // NOTE: in a real system this would not send data. We send
1136 // data here only so we can check it at the memory
1137 out_msg.DataBlk := tbe.DataBlk;
1138 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1139 }
1140 }
1141 }
1142
1143 action(u_writeDataToCache, "u", desc="Write data to cache") {
1144 peek(responseToCache_in, ResponseMsg) {
1145 assert(is_valid(cache_entry));
1146 cache_entry.DataBlk := in_msg.DataBlk;
1147 cache_entry.Dirty := in_msg.Dirty;
1148 }
1149 }
1150
1151 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1152 peek(responseToCache_in, ResponseMsg) {
1153 assert(is_valid(tbe));
1154 tbe.DataBlk := in_msg.DataBlk;
1155 tbe.Dirty := in_msg.Dirty;
1156 }
1157 }
1158
1159 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1160 peek(responseToCache_in, ResponseMsg) {
1161 assert(is_valid(cache_entry));
1162 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1163 cache_entry.DataBlk, in_msg.DataBlk);
1164 assert(cache_entry.DataBlk == in_msg.DataBlk);
1165 cache_entry.DataBlk := in_msg.DataBlk;
1166 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1167 }
1168 }
1169
1170 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1171 peek(responseToCache_in, ResponseMsg) {
1172 assert(is_valid(tbe));
1173 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1174 tbe.DataBlk, in_msg.DataBlk);
1175 assert(tbe.DataBlk == in_msg.DataBlk);
1176 tbe.DataBlk := in_msg.DataBlk;
1177 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1178 }
1179 }
1180
1181 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1182 if (L1DcacheMemory.isTagPresent(address)) {
1183 L1DcacheMemory.deallocate(address);
1184 } else {
1185 L1IcacheMemory.deallocate(address);
1186 }
1187 unset_cache_entry();
1188 }
1189
1190 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1191 if (is_invalid(cache_entry)) {
1192 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
1193 }
1194 }
1195
1196 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1197 if (is_invalid(cache_entry)) {
1198 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
1199 }
1200 }
1201
1202 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1203 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
1204 }
1205
1206 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1207 L2cacheMemory.deallocate(address);
1208 unset_cache_entry();
1209 }
1210
1211 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1212 if (send_evictions) {
1213 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
1214 sequencer.evictionCallback(address);
1215 }
1216 }
1217
1218 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
1219 peek(mandatoryQueue_in, RubyRequest) {
1220 if (L1IcacheMemory.isTagPresent(address)) {
1221 L1IcacheMemory.profileMiss(in_msg);
1222 } else if (L1DcacheMemory.isTagPresent(address)) {
1223 L1DcacheMemory.profileMiss(in_msg);
1224 }
1225 if (L2cacheMemory.isTagPresent(address) == false) {
1226 L2cacheMemory.profileMiss(in_msg);
1227 }
1228 }
1229 }
1230
1231 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1232 stall_and_wait(mandatoryQueue_in, address);
1233 }
1234
1235 action(z_stall, "z", desc="stall") {
1236 // do nothing and the special z_stall action will return a protocol stall
1237 // so that the next port is checked
1238 }
1239
1240 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1241 wakeUpBuffers(address);
1242 }
1243
1244 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1245 wakeUpAllBuffers();
1246 }
1247
1248 //*****************************************************
1249 // TRANSITIONS
1250 //*****************************************************
1251
1252 // Transitions for Load/Store/L2_Replacement from transient states
1253 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1254 zz_stallAndWaitMandatoryQueue;
1255 }
1256
1257 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1258 zz_stallAndWaitMandatoryQueue;
1259 }
1260
1261 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1262 zz_stallAndWaitMandatoryQueue;
1263 }
1264
1265 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1266 zz_stallAndWaitMandatoryQueue;
1267 }
1268
1269 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1270 zz_stallAndWaitMandatoryQueue;
1271 }
1272
1273 transition({MI_F, MM_F}, {Store}) {
1274 zz_stallAndWaitMandatoryQueue;
1275 }
1276
1277 transition({MM_F, MI_F}, {Flush_line}) {
1278 zz_stallAndWaitMandatoryQueue;
1279 }
1280
1281 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1282 z_stall;
1283 }
1284
1285 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1286 z_stall;
1287 }
1288
1289 // Transitions moving data between the L1 and L2 caches
1290 transition({I, S, O, M, MM}, L1_to_L2) {
1291 i_allocateTBE;
1292 gg_deallocateL1CacheBlock;
1293 vv_allocateL2CacheBlock;
1294 hp_copyFromTBEToL2;
1295 s_deallocateTBE;
1296 }
1297
1298 transition(I, Trigger_L2_to_L1D, IT) {
1299 i_allocateTBE;
1300 rr_deallocateL2CacheBlock;
1301 ii_allocateL1DCacheBlock;
1302 nb_copyFromTBEToL1; // Not really needed for state I
1303 s_deallocateTBE;
1304 uu_profileMiss;
1305 zz_stallAndWaitMandatoryQueue;
1306 ll_L2toL1Transfer;
1307 }
1308
1309 transition(S, Trigger_L2_to_L1D, ST) {
1310 i_allocateTBE;
1311 rr_deallocateL2CacheBlock;
1312 ii_allocateL1DCacheBlock;
1313 nb_copyFromTBEToL1;
1314 s_deallocateTBE;
1315 uu_profileMiss;
1316 zz_stallAndWaitMandatoryQueue;
1317 ll_L2toL1Transfer;
1318 }
1319
1320 transition(O, Trigger_L2_to_L1D, OT) {
1321 i_allocateTBE;
1322 rr_deallocateL2CacheBlock;
1323 ii_allocateL1DCacheBlock;
1324 nb_copyFromTBEToL1;
1325 s_deallocateTBE;
1326 uu_profileMiss;
1327 zz_stallAndWaitMandatoryQueue;
1328 ll_L2toL1Transfer;
1329 }
1330
1331 transition(M, Trigger_L2_to_L1D, MT) {
1332 i_allocateTBE;
1333 rr_deallocateL2CacheBlock;
1334 ii_allocateL1DCacheBlock;
1335 nb_copyFromTBEToL1;
1336 s_deallocateTBE;
1337 uu_profileMiss;
1338 zz_stallAndWaitMandatoryQueue;
1339 ll_L2toL1Transfer;
1340 }
1341
1342 transition(MM, Trigger_L2_to_L1D, MMT) {
1343 i_allocateTBE;
1344 rr_deallocateL2CacheBlock;
1345 ii_allocateL1DCacheBlock;
1346 nb_copyFromTBEToL1;
1347 s_deallocateTBE;
1348 uu_profileMiss;
1349 zz_stallAndWaitMandatoryQueue;
1350 ll_L2toL1Transfer;
1351 }
1352
1353 transition(I, Trigger_L2_to_L1I, IT) {
1354 i_allocateTBE;
1355 rr_deallocateL2CacheBlock;
1356 jj_allocateL1ICacheBlock;
1357 nb_copyFromTBEToL1;
1358 s_deallocateTBE;
1359 uu_profileMiss;
1360 zz_stallAndWaitMandatoryQueue;
1361 ll_L2toL1Transfer;
1362 }
1363
1364 transition(S, Trigger_L2_to_L1I, ST) {
1365 i_allocateTBE;
1366 rr_deallocateL2CacheBlock;
1367 jj_allocateL1ICacheBlock;
1368 nb_copyFromTBEToL1;
1369 s_deallocateTBE;
1370 uu_profileMiss;
1371 zz_stallAndWaitMandatoryQueue;
1372 ll_L2toL1Transfer;
1373 }
1374
1375 transition(O, Trigger_L2_to_L1I, OT) {
1376 i_allocateTBE;
1377 rr_deallocateL2CacheBlock;
1378 jj_allocateL1ICacheBlock;
1379 nb_copyFromTBEToL1;
1380 s_deallocateTBE;
1381 uu_profileMiss;
1382 zz_stallAndWaitMandatoryQueue;
1383 ll_L2toL1Transfer;
1384 }
1385
1386 transition(M, Trigger_L2_to_L1I, MT) {
1387 i_allocateTBE;
1388 rr_deallocateL2CacheBlock;
1389 jj_allocateL1ICacheBlock;
1390 nb_copyFromTBEToL1;
1391 s_deallocateTBE;
1392 uu_profileMiss;
1393 zz_stallAndWaitMandatoryQueue;
1394 ll_L2toL1Transfer;
1395 }
1396
1397 transition(MM, Trigger_L2_to_L1I, MMT) {
1398 i_allocateTBE;
1399 rr_deallocateL2CacheBlock;
1400 jj_allocateL1ICacheBlock;
1401 nb_copyFromTBEToL1;
1402 s_deallocateTBE;
1403 uu_profileMiss;
1404 zz_stallAndWaitMandatoryQueue;
1405 ll_L2toL1Transfer;
1406 }
1407
1408 transition(IT, Complete_L2_to_L1, IR) {
1409 j_popTriggerQueue;
1410 kd_wakeUpDependents;
1411 }
1412
1413 transition(ST, Complete_L2_to_L1, SR) {
1414 j_popTriggerQueue;
1415 kd_wakeUpDependents;
1416 }
1417
1418 transition(OT, Complete_L2_to_L1, OR) {
1419 j_popTriggerQueue;
1420 kd_wakeUpDependents;
1421 }
1422
1423 transition(MT, Complete_L2_to_L1, MR) {
1424 j_popTriggerQueue;
1425 kd_wakeUpDependents;
1426 }
1427
1428 transition(MMT, Complete_L2_to_L1, MMR) {
1429 j_popTriggerQueue;
1430 kd_wakeUpDependents;
1431 }
1432
1433 // Transitions from Idle
1434 transition({I, IR}, Load, IS) {
1435 ii_allocateL1DCacheBlock;
1436 i_allocateTBE;
1437 a_issueGETS;
1438 uu_profileMiss;
1439 k_popMandatoryQueue;
1440 }
1441
1442 transition({I, IR}, Ifetch, IS) {
1443 jj_allocateL1ICacheBlock;
1444 i_allocateTBE;
1445 a_issueGETS;
1446 uu_profileMiss;
1447 k_popMandatoryQueue;
1448 }
1449
1450 transition({I, IR}, Store, IM) {
1451 ii_allocateL1DCacheBlock;
1452 i_allocateTBE;
1453 b_issueGETX;
1454 uu_profileMiss;
1455 k_popMandatoryQueue;
1456 }
1457
1458 transition({I, IR}, Flush_line, IM_F) {
1459 it_allocateTBE;
1460 bf_issueGETF;
1461 uu_profileMiss;
1462 k_popMandatoryQueue;
1463 }
1464
1465 transition(I, L2_Replacement) {
1466 rr_deallocateL2CacheBlock;
1467 ka_wakeUpAllDependents;
1468 }
1469
1470 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1471 f_sendAck;
1472 l_popForwardQueue;
1473 }
1474
1475 // Transitions from Shared
1476 transition({S, SM, ISM}, {Load, Ifetch}) {
1477 h_load_hit;
1478 k_popMandatoryQueue;
1479 }
1480
1481 transition(SR, {Load, Ifetch}, S) {
1482 h_load_hit;
1483 k_popMandatoryQueue;
1484 ka_wakeUpAllDependents;
1485 }
1486
1487 transition({S, SR}, Store, SM) {
1488 i_allocateTBE;
1489 b_issueGETX;
1490 uu_profileMiss;
1491 k_popMandatoryQueue;
1492 }
1493
1494 transition({S, SR}, Flush_line, SM_F) {
1495 i_allocateTBE;
1496 bf_issueGETF;
1497 uu_profileMiss;
1498 forward_eviction_to_cpu;
1499 gg_deallocateL1CacheBlock;
1500 k_popMandatoryQueue;
1501 }
1502
1503 transition(S, L2_Replacement, I) {
1504 forward_eviction_to_cpu;
1505 rr_deallocateL2CacheBlock;
1506 ka_wakeUpAllDependents;
1507 }
1508
1509 transition(S, {Other_GETX, Invalidate}, I) {
1510 f_sendAck;
1511 forward_eviction_to_cpu;
1512 l_popForwardQueue;
1513 }
1514
1515 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1516 ff_sendAckShared;
1517 l_popForwardQueue;
1518 }
1519
1520 // Transitions from Owned
1521 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1522 h_load_hit;
1523 k_popMandatoryQueue;
1524 }
1525
1526 transition(OR, {Load, Ifetch}, O) {
1527 h_load_hit;
1528 k_popMandatoryQueue;
1529 ka_wakeUpAllDependents;
1530 }
1531
1532 transition({O, OR}, Store, OM) {
1533 i_allocateTBE;
1534 b_issueGETX;
1535 p_decrementNumberOfMessagesByOne;
1536 uu_profileMiss;
1537 k_popMandatoryQueue;
1538 }
1539 transition({O, OR}, Flush_line, OM_F) {
1540 i_allocateTBE;
1541 bf_issueGETF;
1542 p_decrementNumberOfMessagesByOne;
1543 uu_profileMiss;
1544 forward_eviction_to_cpu;
1545 gg_deallocateL1CacheBlock;
1546 k_popMandatoryQueue;
1547 }
1548
1549 transition(O, L2_Replacement, OI) {
1550 i_allocateTBE;
1551 d_issuePUT;
1552 forward_eviction_to_cpu;
1553 rr_deallocateL2CacheBlock;
1554 ka_wakeUpAllDependents;
1555 }
1556
1557 transition(O, {Other_GETX, Invalidate}, I) {
1558 e_sendData;
1559 forward_eviction_to_cpu;
1560 l_popForwardQueue;
1561 }
1562
1563 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1564 ee_sendDataShared;
1565 l_popForwardQueue;
1566 }
1567
1568 transition(O, Merged_GETS) {
1569 em_sendDataSharedMultiple;
1570 l_popForwardQueue;
1571 }
1572
1573 // Transitions from Modified
1574 transition({MM, M}, {Load, Ifetch}) {
1575 h_load_hit;
1576 k_popMandatoryQueue;
1577 }
1578
1579 transition(MM, Store) {
1580 hh_store_hit;
1581 k_popMandatoryQueue;
1582 }
1583
1584 transition(MMR, {Load, Ifetch}, MM) {
1585 h_load_hit;
1586 k_popMandatoryQueue;
1587 ka_wakeUpAllDependents;
1588 }
1589
1590 transition(MMR, Store, MM) {
1591 hh_store_hit;
1592 k_popMandatoryQueue;
1593 ka_wakeUpAllDependents;
1594 }
1595
1596 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1597 i_allocateTBE;
1598 bf_issueGETF;
1599 p_decrementNumberOfMessagesByOne;
1600 forward_eviction_to_cpu;
1601 gg_deallocateL1CacheBlock;
1602 k_popMandatoryQueue;
1603 }
1604
1605 transition(MM_F, Block_Ack, MI_F) {
1606 df_issuePUTF;
1607 l_popForwardQueue;
1608 kd_wakeUpDependents;
1609 }
1610
1611 transition(MM, L2_Replacement, MI) {
1612 i_allocateTBE;
1613 d_issuePUT;
1614 forward_eviction_to_cpu;
1615 rr_deallocateL2CacheBlock;
1616 ka_wakeUpAllDependents;
1617 }
1618
1619 transition(MM, {Other_GETX, Invalidate}, I) {
1620 c_sendExclusiveData;
1621 forward_eviction_to_cpu;
1622 l_popForwardQueue;
1623 }
1624
1625 transition(MM, Other_GETS, I) {
1626 c_sendExclusiveData;
1627 forward_eviction_to_cpu;
1628 l_popForwardQueue;
1629 }
1630
1631 transition(MM, NC_DMA_GETS, O) {
1632 ee_sendDataShared;
1633 l_popForwardQueue;
1634 }
1635
1636 transition(MM, Other_GETS_No_Mig, O) {
1637 ee_sendDataShared;
1638 l_popForwardQueue;
1639 }
1640
1641 transition(MM, Merged_GETS, O) {
1642 em_sendDataSharedMultiple;
1643 l_popForwardQueue;
1644 }
1645
1646 // Transitions from Dirty Exclusive
1647 transition(M, Store, MM) {
1648 hh_store_hit;
1649 k_popMandatoryQueue;
1650 }
1651
1652 transition(MR, {Load, Ifetch}, M) {
1653 h_load_hit;
1654 k_popMandatoryQueue;
1655 ka_wakeUpAllDependents;
1656 }
1657
1658 transition(MR, Store, MM) {
1659 hh_store_hit;
1660 k_popMandatoryQueue;
1661 ka_wakeUpAllDependents;
1662 }
1663
1664 transition(M, L2_Replacement, MI) {
1665 i_allocateTBE;
1666 d_issuePUT;
1667 forward_eviction_to_cpu;
1668 rr_deallocateL2CacheBlock;
1669 ka_wakeUpAllDependents;
1670 }
1671
1672 transition(M, {Other_GETX, Invalidate}, I) {
1673 c_sendExclusiveData;
1674 forward_eviction_to_cpu;
1675 l_popForwardQueue;
1676 }
1677
1678 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1679 ee_sendDataShared;
1680 l_popForwardQueue;
1681 }
1682
1683 transition(M, NC_DMA_GETS, O) {
1684 ee_sendDataShared;
1685 l_popForwardQueue;
1686 }
1687
1688 transition(M, Merged_GETS, O) {
1689 em_sendDataSharedMultiple;
1690 l_popForwardQueue;
1691 }
1692
1693 // Transitions from IM
1694
1695 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1696 f_sendAck;
1697 l_popForwardQueue;
1698 }
1699
1700 transition({IM, IM_F, MM_F}, Ack) {
1701 m_decrementNumberOfMessages;
1702 o_checkForCompletion;
1703 n_popResponseQueue;
1704 }
1705
1706 transition(IM, Data, ISM) {
1707 u_writeDataToCache;
1708 m_decrementNumberOfMessages;
1709 o_checkForCompletion;
1710 n_popResponseQueue;
1711 }
1712
1713 transition(IM_F, Data, ISM_F) {
1714 uf_writeDataToCacheTBE;
1715 m_decrementNumberOfMessages;
1716 o_checkForCompletion;
1717 n_popResponseQueue;
1718 }
1719
1720 transition(IM, Exclusive_Data, MM_W) {
1721 u_writeDataToCache;
1722 m_decrementNumberOfMessages;
1723 o_checkForCompletion;
1724 sx_external_store_hit;
1725 n_popResponseQueue;
1726 kd_wakeUpDependents;
1727 }
1728
1729 transition(IM_F, Exclusive_Data, MM_WF) {
1730 uf_writeDataToCacheTBE;
1731 m_decrementNumberOfMessages;
1732 o_checkForCompletion;
1733 n_popResponseQueue;
1734 }
1735
1736 // Transitions from SM
1737 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1738 ff_sendAckShared;
1739 l_popForwardQueue;
1740 }
1741
1742 transition(SM, {Other_GETX, Invalidate}, IM) {
1743 f_sendAck;
1744 forward_eviction_to_cpu;
1745 l_popForwardQueue;
1746 }
1747
1748 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1749 f_sendAck;
1750 forward_eviction_to_cpu;
1751 l_popForwardQueue;
1752 }
1753
1754 transition({SM, SM_F}, Ack) {
1755 m_decrementNumberOfMessages;
1756 o_checkForCompletion;
1757 n_popResponseQueue;
1758 }
1759
1760 transition(SM, {Data, Exclusive_Data}, ISM) {
1761 v_writeDataToCacheVerify;
1762 m_decrementNumberOfMessages;
1763 o_checkForCompletion;
1764 n_popResponseQueue;
1765 }
1766
1767 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1768 vt_writeDataToTBEVerify;
1769 m_decrementNumberOfMessages;
1770 o_checkForCompletion;
1771 n_popResponseQueue;
1772 }
1773
1774 // Transitions from ISM
1775 transition({ISM, ISM_F}, Ack) {
1776 m_decrementNumberOfMessages;
1777 o_checkForCompletion;
1778 n_popResponseQueue;
1779 }
1780
1781 transition(ISM, All_acks_no_sharers, MM) {
1782 sxt_trig_ext_store_hit;
1783 gm_sendUnblockM;
1784 s_deallocateTBE;
1785 j_popTriggerQueue;
1786 kd_wakeUpDependents;
1787 }
1788
1789 transition(ISM_F, All_acks_no_sharers, MI_F) {
1790 df_issuePUTF;
1791 j_popTriggerQueue;
1792 kd_wakeUpDependents;
1793 }
1794
1795 // Transitions from OM
1796
1797 transition(OM, {Other_GETX, Invalidate}, IM) {
1798 e_sendData;
1799 pp_incrementNumberOfMessagesByOne;
1800 forward_eviction_to_cpu;
1801 l_popForwardQueue;
1802 }
1803
1804 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1805 q_sendDataFromTBEToCache;
1806 pp_incrementNumberOfMessagesByOne;
1807 forward_eviction_to_cpu;
1808 l_popForwardQueue;
1809 }
1810
1811 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1812 ee_sendDataShared;
1813 l_popForwardQueue;
1814 }
1815
1816 transition(OM, Merged_GETS) {
1817 em_sendDataSharedMultiple;
1818 l_popForwardQueue;
1819 }
1820
1821 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1822 et_sendDataSharedFromTBE;
1823 l_popForwardQueue;
1824 }
1825
1826 transition(OM_F, Merged_GETS) {
1827 emt_sendDataSharedMultipleFromTBE;
1828 l_popForwardQueue;
1829 }
1830
1831 transition({OM, OM_F}, Ack) {
1832 m_decrementNumberOfMessages;
1833 o_checkForCompletion;
1834 n_popResponseQueue;
1835 }
1836
1837 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1838 sxt_trig_ext_store_hit;
1839 gm_sendUnblockM;
1840 s_deallocateTBE;
1841 j_popTriggerQueue;
1842 kd_wakeUpDependents;
1843 }
1844
1845 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1846 df_issuePUTF;
1847 j_popTriggerQueue;
1848 kd_wakeUpDependents;
1849 }
1850 // Transitions from IS
1851
1852 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1853 f_sendAck;
1854 l_popForwardQueue;
1855 }
1856
1857 transition(IS, Ack) {
1858 m_decrementNumberOfMessages;
1859 o_checkForCompletion;
1860 n_popResponseQueue;
1861 }
1862
1863 transition(IS, Shared_Ack) {
1864 m_decrementNumberOfMessages;
1865 r_setSharerBit;
1866 o_checkForCompletion;
1867 n_popResponseQueue;
1868 }
1869
1870 transition(IS, Data, SS) {
1871 u_writeDataToCache;
1872 m_decrementNumberOfMessages;
1873 o_checkForCompletion;
1874 hx_external_load_hit;
1875 uo_updateCurrentOwner;
1876 n_popResponseQueue;
1877 kd_wakeUpDependents;
1878 }
1879
1880 transition(IS, Exclusive_Data, M_W) {
1881 u_writeDataToCache;
1882 m_decrementNumberOfMessages;
1883 o_checkForCompletion;
1884 hx_external_load_hit;
1885 n_popResponseQueue;
1886 kd_wakeUpDependents;
1887 }
1888
1889 transition(IS, Shared_Data, SS) {
1890 u_writeDataToCache;
1891 r_setSharerBit;
1892 m_decrementNumberOfMessages;
1893 o_checkForCompletion;
1894 hx_external_load_hit;
1895 uo_updateCurrentOwner;
1896 n_popResponseQueue;
1897 kd_wakeUpDependents;
1898 }
1899
1900 // Transitions from SS
1901
1902 transition(SS, Ack) {
1903 m_decrementNumberOfMessages;
1904 o_checkForCompletion;
1905 n_popResponseQueue;
1906 }
1907
1908 transition(SS, Shared_Ack) {
1909 m_decrementNumberOfMessages;
1910 r_setSharerBit;
1911 o_checkForCompletion;
1912 n_popResponseQueue;
1913 }
1914
1915 transition(SS, All_acks, S) {
1916 gs_sendUnblockS;
1917 s_deallocateTBE;
1918 j_popTriggerQueue;
1919 kd_wakeUpDependents;
1920 }
1921
1922 transition(SS, All_acks_no_sharers, S) {
1923 // Note: The directory might still be the owner, so that is why we go to S
1924 gs_sendUnblockS;
1925 s_deallocateTBE;
1926 j_popTriggerQueue;
1927 kd_wakeUpDependents;
1928 }
1929
1930 // Transitions from MM_W
1931
1932 transition(MM_W, Store) {
1933 hh_store_hit;
1934 k_popMandatoryQueue;
1935 }
1936
1937 transition({MM_W, MM_WF}, Ack) {
1938 m_decrementNumberOfMessages;
1939 o_checkForCompletion;
1940 n_popResponseQueue;
1941 }
1942
1943 transition(MM_W, All_acks_no_sharers, MM) {
1944 gm_sendUnblockM;
1945 s_deallocateTBE;
1946 j_popTriggerQueue;
1947 kd_wakeUpDependents;
1948 }
1949
1950 transition(MM_WF, All_acks_no_sharers, MI_F) {
1951 df_issuePUTF;
1952 j_popTriggerQueue;
1953 kd_wakeUpDependents;
1954 }
1955 // Transitions from M_W
1956
1957 transition(M_W, Store, MM_W) {
1958 hh_store_hit;
1959 k_popMandatoryQueue;
1960 }
1961
1962 transition(M_W, Ack) {
1963 m_decrementNumberOfMessages;
1964 o_checkForCompletion;
1965 n_popResponseQueue;
1966 }
1967
1968 transition(M_W, All_acks_no_sharers, M) {
1969 gm_sendUnblockM;
1970 s_deallocateTBE;
1971 j_popTriggerQueue;
1972 kd_wakeUpDependents;
1973 }
1974
1975 // Transitions from OI/MI
1976
1977 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1978 q_sendDataFromTBEToCache;
1979 l_popForwardQueue;
1980 }
1981
1982 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1983 sq_sendSharedDataFromTBEToCache;
1984 l_popForwardQueue;
1985 }
1986
1987 transition({OI, MI}, Merged_GETS, OI) {
1988 qm_sendDataFromTBEToCache;
1989 l_popForwardQueue;
1990 }
1991
1992 transition(MI, Writeback_Ack, I) {
1993 t_sendExclusiveDataFromTBEToMemory;
1994 s_deallocateTBE;
1995 l_popForwardQueue;
1996 kd_wakeUpDependents;
1997 }
1998
1999 transition(MI_F, Writeback_Ack, I) {
2000 hh_flush_hit;
2001 t_sendExclusiveDataFromTBEToMemory;
2002 s_deallocateTBE;
2003 l_popForwardQueue;
2004 kd_wakeUpDependents;
2005 }
2006
2007 transition(OI, Writeback_Ack, I) {
2008 qq_sendDataFromTBEToMemory;
2009 s_deallocateTBE;
2010 l_popForwardQueue;
2011 kd_wakeUpDependents;
2012 }
2013
2014 // Transitions from II
2015 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2016 f_sendAck;
2017 l_popForwardQueue;
2018 }
2019
2020 transition(II, Writeback_Ack, I) {
2021 g_sendUnblock;
2022 s_deallocateTBE;
2023 l_popForwardQueue;
2024 kd_wakeUpDependents;
2025 }
2026
2027 transition(II, Writeback_Nack, I) {
2028 s_deallocateTBE;
2029 l_popForwardQueue;
2030 kd_wakeUpDependents;
2031 }
2032
2033 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2034 ct_sendExclusiveDataFromTBE;
2035 pp_incrementNumberOfMessagesByOne;
2036 l_popForwardQueue;
2037 }
2038
2039 transition(MM_F, Other_GETS, IM_F) {
2040 ct_sendExclusiveDataFromTBE;
2041 pp_incrementNumberOfMessagesByOne;
2042 l_popForwardQueue;
2043 }
2044
2045 transition(MM_F, NC_DMA_GETS, OM_F) {
2046 sq_sendSharedDataFromTBEToCache;
2047 l_popForwardQueue;
2048 }
2049
2050 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2051 et_sendDataSharedFromTBE;
2052 l_popForwardQueue;
2053 }
2054
2055 transition(MM_F, Merged_GETS, OM_F) {
2056 emt_sendDataSharedMultipleFromTBE;
2057 l_popForwardQueue;
2058 }
2059 }