ruby: add stats to .sm files, remove cache profiler
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 Cycles cache_response_latency = 10,
42 Cycles issue_latency = 2,
43 Cycles l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true,
45 bool send_evictions
46 {
47
48 // NETWORK BUFFERS
49 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
50 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
51 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
52
53 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
54 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
55
56
57 // STATES
58 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
59 // Base states
60 I, AccessPermission:Invalid, desc="Idle";
61 S, AccessPermission:Read_Only, desc="Shared";
62 O, AccessPermission:Read_Only, desc="Owned";
63 M, AccessPermission:Read_Only, desc="Modified (dirty)";
64 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
65
66 // Base states, locked and ready to service the mandatory queue
67 IR, AccessPermission:Invalid, desc="Idle";
68 SR, AccessPermission:Read_Only, desc="Shared";
69 OR, AccessPermission:Read_Only, desc="Owned";
70 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
71 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73 // Transient States
74 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
75 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
76 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
77 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
78 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
79 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
80 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
81 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
82 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
83 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
84 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
85 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
86 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
87 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
88 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
89 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
90
91 //Transition States Related to Flushing
92 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
93 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
94 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
95 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
96 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
97 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
98 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
99 }
100
101 // EVENTS
102 enumeration(Event, desc="Cache events") {
103 Load, desc="Load request from the processor";
104 Ifetch, desc="I-fetch request from the processor";
105 Store, desc="Store request from the processor";
106 L2_Replacement, desc="L2 Replacement";
107 L1_to_L2, desc="L1 to L2 transfer";
108 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
109 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
110 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
111
112 // Requests
113 Other_GETX, desc="A GetX from another processor";
114 Other_GETS, desc="A GetS from another processor";
115 Merged_GETS, desc="A Merged GetS from another processor";
116 Other_GETS_No_Mig, desc="A GetS from another processor";
117 NC_DMA_GETS, desc="special GetS when only DMA exists";
118 Invalidate, desc="Invalidate block";
119
120 // Responses
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Data, desc="Received a data message";
124 Shared_Data, desc="Received a data message, responder has a shared copy";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 Writeback_Ack, desc="Writeback O.K. from directory";
128 Writeback_Nack, desc="Writeback not O.K. from directory";
129
130 // Triggers
131 All_acks, desc="Received all required data and message acks";
132 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
133
134 // For Flush
135 Flush_line, desc="flush the cache line from all caches";
136 Block_Ack, desc="the directory is blocked and ready for the flush";
137 }
138
139 // TYPES
140
141 // STRUCTURE DEFINITIONS
142
143 MessageBuffer mandatoryQueue, ordered="false";
144
145 // CacheEntry
146 structure(Entry, desc="...", interface="AbstractCacheEntry") {
147 State CacheState, desc="cache state";
148 bool Dirty, desc="Is the data dirty (different than memory)?";
149 DataBlock DataBlk, desc="data for the block";
150 bool FromL2, default="false", desc="block just moved from L2";
151 bool AtomicAccessed, default="false", desc="block just moved from L2";
152 }
153
154 // TBE fields
155 structure(TBE, desc="...") {
156 State TBEState, desc="Transient state";
157 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
158 bool Dirty, desc="Is the data dirty (different than memory)?";
159 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
160 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
161 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
162 MachineID LastResponder, desc="last machine to send a response for this request";
163 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
164
165 Cycles InitialRequestTime, default="Cycles(0)",
166 desc="time the initial requests was sent from the L1Cache";
167 Cycles ForwardRequestTime, default="Cycles(0)",
168 desc="time the dir forwarded the request";
169 Cycles FirstResponseTime, default="Cycles(0)",
170 desc="the time the first response was received";
171 }
172
173 structure(TBETable, external="yes") {
174 TBE lookup(Address);
175 void allocate(Address);
176 void deallocate(Address);
177 bool isPresent(Address);
178 }
179
180 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
181
182 void set_cache_entry(AbstractCacheEntry b);
183 void unset_cache_entry();
184 void set_tbe(TBE b);
185 void unset_tbe();
186 void wakeUpAllBuffers();
187 void wakeUpBuffers(Address a);
188 Cycles curCycle();
189
190 Entry getCacheEntry(Address address), return_by_pointer="yes" {
191 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
192 if(is_valid(L2cache_entry)) {
193 return L2cache_entry;
194 }
195
196 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
197 if(is_valid(L1Dcache_entry)) {
198 return L1Dcache_entry;
199 }
200
201 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
202 return L1Icache_entry;
203 }
204
205 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
206 Entry cache_entry := getCacheEntry(addr);
207 if(is_valid(cache_entry)) {
208 return cache_entry.DataBlk;
209 }
210
211 TBE tbe := TBEs[addr];
212 if(is_valid(tbe)) {
213 return tbe.DataBlk;
214 }
215
216 error("Missing data block");
217 }
218
219 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
220 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
221 return L2cache_entry;
222 }
223
224 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
225 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
226 return L1Dcache_entry;
227 }
228
229 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
230 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
231 return L1Icache_entry;
232 }
233
234 State getState(TBE tbe, Entry cache_entry, Address addr) {
235 if(is_valid(tbe)) {
236 return tbe.TBEState;
237 } else if (is_valid(cache_entry)) {
238 return cache_entry.CacheState;
239 }
240 return State:I;
241 }
242
243 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
244 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
245 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
246 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
247
248 if (is_valid(tbe)) {
249 tbe.TBEState := state;
250 }
251
252 if (is_valid(cache_entry)) {
253 cache_entry.CacheState := state;
254 }
255 }
256
257 AccessPermission getAccessPermission(Address addr) {
258 TBE tbe := TBEs[addr];
259 if(is_valid(tbe)) {
260 return L1Cache_State_to_permission(tbe.TBEState);
261 }
262
263 Entry cache_entry := getCacheEntry(addr);
264 if(is_valid(cache_entry)) {
265 return L1Cache_State_to_permission(cache_entry.CacheState);
266 }
267
268 return AccessPermission:NotPresent;
269 }
270
271 void setAccessPermission(Entry cache_entry, Address addr, State state) {
272 if (is_valid(cache_entry)) {
273 cache_entry.changePermission(L1Cache_State_to_permission(state));
274 }
275 }
276
277 Event mandatory_request_type_to_event(RubyRequestType type) {
278 if (type == RubyRequestType:LD) {
279 return Event:Load;
280 } else if (type == RubyRequestType:IFETCH) {
281 return Event:Ifetch;
282 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
283 return Event:Store;
284 } else if ((type == RubyRequestType:FLUSH)) {
285 return Event:Flush_line;
286 } else {
287 error("Invalid RubyRequestType");
288 }
289 }
290
291 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
292 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
293 //
294 // NOTE direct local hits should not call this
295 //
296 return GenericMachineType:L1Cache_wCC;
297 } else {
298 return ConvertMachToGenericMach(machineIDToMachineType(sender));
299 }
300 }
301
302 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
303 if (is_valid(cache_entry) && cache_entry.FromL2) {
304 cache_entry.FromL2 := false;
305 return GenericMachineType:L2Cache;
306 } else {
307 return GenericMachineType:L1Cache;
308 }
309 }
310
311 bool IsAtomicAccessed(Entry cache_entry) {
312 assert(is_valid(cache_entry));
313 return cache_entry.AtomicAccessed;
314 }
315
316 MessageBuffer triggerQueue, ordered="false";
317
318 // ** OUT_PORTS **
319
320 out_port(requestNetwork_out, RequestMsg, requestFromCache);
321 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
322 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
323 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
324
325 // ** IN_PORTS **
326
327 // Trigger Queue
328 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
329 if (triggerQueue_in.isReady()) {
330 peek(triggerQueue_in, TriggerMsg) {
331
332 Entry cache_entry := getCacheEntry(in_msg.Address);
333 TBE tbe := TBEs[in_msg.Address];
334
335 if (in_msg.Type == TriggerType:L2_to_L1) {
336 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
337 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
338 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
339 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
340 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
341 } else {
342 error("Unexpected message");
343 }
344 }
345 }
346 }
347
348 // Nothing from the unblock network
349
350 // Response Network
351 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
352 if (responseToCache_in.isReady()) {
353 peek(responseToCache_in, ResponseMsg, block_on="Address") {
354
355 Entry cache_entry := getCacheEntry(in_msg.Address);
356 TBE tbe := TBEs[in_msg.Address];
357
358 if (in_msg.Type == CoherenceResponseType:ACK) {
359 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
360 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
361 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
362 } else if (in_msg.Type == CoherenceResponseType:DATA) {
363 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
364 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
365 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
366 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
367 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
368 } else {
369 error("Unexpected message");
370 }
371 }
372 }
373 }
374
375 // Forward Network
376 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
377 if (forwardToCache_in.isReady()) {
378 peek(forwardToCache_in, RequestMsg, block_on="Address") {
379
380 Entry cache_entry := getCacheEntry(in_msg.Address);
381 TBE tbe := TBEs[in_msg.Address];
382
383 if ((in_msg.Type == CoherenceRequestType:GETX) || (in_msg.Type == CoherenceRequestType:GETF)) {
384 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
385 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
386 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
387 } else if (in_msg.Type == CoherenceRequestType:GETS) {
388 if (machineCount(MachineType:L1Cache) > 1) {
389 if (is_valid(cache_entry)) {
390 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
391 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
392 } else {
393 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
394 }
395 } else {
396 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
397 }
398 } else {
399 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
400 }
401 } else if (in_msg.Type == CoherenceRequestType:INV) {
402 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
403 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
404 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
405 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
406 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
407 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
408 trigger(Event:Block_Ack, in_msg.Address, cache_entry, tbe);
409 } else {
410 error("Unexpected message");
411 }
412 }
413 }
414 }
415
416 // Nothing from the request network
417
418 // Mandatory Queue
419 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
420 if (mandatoryQueue_in.isReady()) {
421 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
422
423 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
424 TBE tbe := TBEs[in_msg.LineAddress];
425
426 if (in_msg.Type == RubyRequestType:IFETCH) {
427 // ** INSTRUCTION ACCESS ***
428
429 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
430 if (is_valid(L1Icache_entry)) {
431 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
432 trigger(mandatory_request_type_to_event(in_msg.Type),
433 in_msg.LineAddress, L1Icache_entry, tbe);
434 } else {
435 // Check to see if it is in the OTHER L1
436 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
437 if (is_valid(L1Dcache_entry)) {
438 // The block is in the wrong L1, try to write it to the L2
439 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
440 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
441 } else {
442 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
443 trigger(Event:L2_Replacement,
444 l2_victim_addr,
445 getL2CacheEntry(l2_victim_addr),
446 TBEs[l2_victim_addr]);
447 }
448 }
449
450 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
451 // L1 does't have the line, but we have space for it in the L1
452
453 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
454 if (is_valid(L2cache_entry)) {
455 // L2 has it (maybe not with the right permissions)
456 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
457 L2cache_entry, tbe);
458 } else {
459 // We have room, the L2 doesn't have it, so the L1 fetches the line
460 trigger(mandatory_request_type_to_event(in_msg.Type),
461 in_msg.LineAddress, L1Icache_entry, tbe);
462 }
463 } else {
464 // No room in the L1, so we need to make room
465 Address l1i_victim_addr := L1IcacheMemory.cacheProbe(in_msg.LineAddress);
466 if (L2cacheMemory.cacheAvail(l1i_victim_addr)) {
467 // The L2 has room, so we move the line from the L1 to the L2
468 trigger(Event:L1_to_L2,
469 l1i_victim_addr,
470 getL1ICacheEntry(l1i_victim_addr),
471 TBEs[l1i_victim_addr]);
472 } else {
473 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1i_victim_addr);
474 // The L2 does not have room, so we replace a line from the L2
475 trigger(Event:L2_Replacement,
476 l2_victim_addr,
477 getL2CacheEntry(l2_victim_addr),
478 TBEs[l2_victim_addr]);
479 }
480 }
481 }
482 } else {
483 // *** DATA ACCESS ***
484
485 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
486 if (is_valid(L1Dcache_entry)) {
487 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
488 trigger(mandatory_request_type_to_event(in_msg.Type),
489 in_msg.LineAddress, L1Dcache_entry, tbe);
490 } else {
491
492 // Check to see if it is in the OTHER L1
493 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
494 if (is_valid(L1Icache_entry)) {
495 // The block is in the wrong L1, try to write it to the L2
496 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
497 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
498 } else {
499 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
500 trigger(Event:L2_Replacement,
501 l2_victim_addr,
502 getL2CacheEntry(l2_victim_addr),
503 TBEs[l2_victim_addr]);
504 }
505 }
506
507 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
508 // L1 does't have the line, but we have space for it in the L1
509 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
510 if (is_valid(L2cache_entry)) {
511 // L2 has it (maybe not with the right permissions)
512 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
513 L2cache_entry, tbe);
514 } else {
515 // We have room, the L2 doesn't have it, so the L1 fetches the line
516 trigger(mandatory_request_type_to_event(in_msg.Type),
517 in_msg.LineAddress, L1Dcache_entry, tbe);
518 }
519 } else {
520 // No room in the L1, so we need to make room
521 Address l1d_victim_addr := L1DcacheMemory.cacheProbe(in_msg.LineAddress);
522 if (L2cacheMemory.cacheAvail(l1d_victim_addr)) {
523 // The L2 has room, so we move the line from the L1 to the L2
524 trigger(Event:L1_to_L2,
525 l1d_victim_addr,
526 getL1DCacheEntry(l1d_victim_addr),
527 TBEs[l1d_victim_addr]);
528 } else {
529 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1d_victim_addr);
530 // The L2 does not have room, so we replace a line from the L2
531 trigger(Event:L2_Replacement,
532 l2_victim_addr,
533 getL2CacheEntry(l2_victim_addr),
534 TBEs[l2_victim_addr]);
535 }
536 }
537 }
538 }
539 }
540 }
541 }
542
543 // ACTIONS
544
545 action(a_issueGETS, "a", desc="Issue GETS") {
546 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
547 assert(is_valid(tbe));
548 out_msg.Address := address;
549 out_msg.Type := CoherenceRequestType:GETS;
550 out_msg.Requestor := machineID;
551 out_msg.Destination.add(map_Address_to_Directory(address));
552 out_msg.MessageSize := MessageSizeType:Request_Control;
553 out_msg.InitialRequestTime := curCycle();
554 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
555 }
556 }
557
558 action(b_issueGETX, "b", desc="Issue GETX") {
559 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
560 assert(is_valid(tbe));
561 out_msg.Address := address;
562 out_msg.Type := CoherenceRequestType:GETX;
563 out_msg.Requestor := machineID;
564 out_msg.Destination.add(map_Address_to_Directory(address));
565 out_msg.MessageSize := MessageSizeType:Request_Control;
566 out_msg.InitialRequestTime := curCycle();
567 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
568 }
569 }
570
571 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
572 if (machineCount(MachineType:L1Cache) > 1) {
573 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
574 assert(is_valid(tbe));
575 out_msg.Address := address;
576 out_msg.Type := CoherenceRequestType:GETX;
577 out_msg.Requestor := machineID;
578 out_msg.Destination.add(map_Address_to_Directory(address));
579 out_msg.MessageSize := MessageSizeType:Request_Control;
580 out_msg.InitialRequestTime := curCycle();
581 }
582 }
583 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
584 }
585
586 action(bf_issueGETF, "bf", desc="Issue GETF") {
587 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
588 assert(is_valid(tbe));
589 out_msg.Address := address;
590 out_msg.Type := CoherenceRequestType:GETF;
591 out_msg.Requestor := machineID;
592 out_msg.Destination.add(map_Address_to_Directory(address));
593 out_msg.MessageSize := MessageSizeType:Request_Control;
594 out_msg.InitialRequestTime := curCycle();
595 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
596 }
597 }
598
599 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
600 peek(forwardToCache_in, RequestMsg) {
601 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
602 assert(is_valid(cache_entry));
603 out_msg.Address := address;
604 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
605 out_msg.Sender := machineID;
606 out_msg.Destination.add(in_msg.Requestor);
607 out_msg.DataBlk := cache_entry.DataBlk;
608 out_msg.Dirty := cache_entry.Dirty;
609 if (in_msg.DirectedProbe) {
610 out_msg.Acks := machineCount(MachineType:L1Cache);
611 } else {
612 out_msg.Acks := 2;
613 }
614 out_msg.SilentAcks := in_msg.SilentAcks;
615 out_msg.MessageSize := MessageSizeType:Response_Data;
616 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
617 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
618 }
619 }
620 }
621
622 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
623 peek(forwardToCache_in, RequestMsg) {
624 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
625 assert(is_valid(tbe));
626 out_msg.Address := address;
627 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
628 out_msg.Sender := machineID;
629 out_msg.Destination.add(in_msg.Requestor);
630 out_msg.DataBlk := tbe.DataBlk;
631 out_msg.Dirty := tbe.Dirty;
632 if (in_msg.DirectedProbe) {
633 out_msg.Acks := machineCount(MachineType:L1Cache);
634 } else {
635 out_msg.Acks := 2;
636 }
637 out_msg.SilentAcks := in_msg.SilentAcks;
638 out_msg.MessageSize := MessageSizeType:Response_Data;
639 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
640 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
641 }
642 }
643 }
644
645 action(d_issuePUT, "d", desc="Issue PUT") {
646 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
647 out_msg.Address := address;
648 out_msg.Type := CoherenceRequestType:PUT;
649 out_msg.Requestor := machineID;
650 out_msg.Destination.add(map_Address_to_Directory(address));
651 out_msg.MessageSize := MessageSizeType:Writeback_Control;
652 }
653 }
654
655 action(df_issuePUTF, "df", desc="Issue PUTF") {
656 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
657 out_msg.Address := address;
658 out_msg.Type := CoherenceRequestType:PUTF;
659 out_msg.Requestor := machineID;
660 out_msg.Destination.add(map_Address_to_Directory(address));
661 out_msg.MessageSize := MessageSizeType:Writeback_Control;
662 }
663 }
664
665 action(e_sendData, "e", desc="Send data from cache to requestor") {
666 peek(forwardToCache_in, RequestMsg) {
667 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
668 assert(is_valid(cache_entry));
669 out_msg.Address := address;
670 out_msg.Type := CoherenceResponseType:DATA;
671 out_msg.Sender := machineID;
672 out_msg.Destination.add(in_msg.Requestor);
673 out_msg.DataBlk := cache_entry.DataBlk;
674 out_msg.Dirty := cache_entry.Dirty;
675 if (in_msg.DirectedProbe) {
676 out_msg.Acks := machineCount(MachineType:L1Cache);
677 } else {
678 out_msg.Acks := 2;
679 }
680 out_msg.SilentAcks := in_msg.SilentAcks;
681 out_msg.MessageSize := MessageSizeType:Response_Data;
682 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
683 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
684 }
685 }
686 }
687
688 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
689 peek(forwardToCache_in, RequestMsg) {
690 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
691 assert(is_valid(cache_entry));
692 out_msg.Address := address;
693 out_msg.Type := CoherenceResponseType:DATA_SHARED;
694 out_msg.Sender := machineID;
695 out_msg.Destination.add(in_msg.Requestor);
696 out_msg.DataBlk := cache_entry.DataBlk;
697 out_msg.Dirty := cache_entry.Dirty;
698 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
699 if (in_msg.DirectedProbe) {
700 out_msg.Acks := machineCount(MachineType:L1Cache);
701 } else {
702 out_msg.Acks := 2;
703 }
704 out_msg.SilentAcks := in_msg.SilentAcks;
705 out_msg.MessageSize := MessageSizeType:Response_Data;
706 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
707 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
708 }
709 }
710 }
711
712 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
713 peek(forwardToCache_in, RequestMsg) {
714 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
715 assert(is_valid(tbe));
716 out_msg.Address := address;
717 out_msg.Type := CoherenceResponseType:DATA_SHARED;
718 out_msg.Sender := machineID;
719 out_msg.Destination.add(in_msg.Requestor);
720 out_msg.DataBlk := tbe.DataBlk;
721 out_msg.Dirty := tbe.Dirty;
722 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
723 if (in_msg.DirectedProbe) {
724 out_msg.Acks := machineCount(MachineType:L1Cache);
725 } else {
726 out_msg.Acks := 2;
727 }
728 out_msg.SilentAcks := in_msg.SilentAcks;
729 out_msg.MessageSize := MessageSizeType:Response_Data;
730 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
731 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
732 }
733 }
734 }
735
736 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
737 peek(forwardToCache_in, RequestMsg) {
738 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
739 assert(is_valid(cache_entry));
740 out_msg.Address := address;
741 out_msg.Type := CoherenceResponseType:DATA_SHARED;
742 out_msg.Sender := machineID;
743 out_msg.Destination := in_msg.MergedRequestors;
744 out_msg.DataBlk := cache_entry.DataBlk;
745 out_msg.Dirty := cache_entry.Dirty;
746 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
747 out_msg.Acks := machineCount(MachineType:L1Cache);
748 out_msg.SilentAcks := in_msg.SilentAcks;
749 out_msg.MessageSize := MessageSizeType:Response_Data;
750 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
751 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
752 }
753 }
754 }
755
756 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
757 peek(forwardToCache_in, RequestMsg) {
758 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
759 assert(is_valid(tbe));
760 out_msg.Address := address;
761 out_msg.Type := CoherenceResponseType:DATA_SHARED;
762 out_msg.Sender := machineID;
763 out_msg.Destination := in_msg.MergedRequestors;
764 out_msg.DataBlk := tbe.DataBlk;
765 out_msg.Dirty := tbe.Dirty;
766 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
767 out_msg.Acks := machineCount(MachineType:L1Cache);
768 out_msg.SilentAcks := in_msg.SilentAcks;
769 out_msg.MessageSize := MessageSizeType:Response_Data;
770 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
771 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
772 }
773 }
774 }
775
776 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
777 peek(forwardToCache_in, RequestMsg) {
778 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
779 out_msg.Address := address;
780 out_msg.Type := CoherenceResponseType:ACK;
781 out_msg.Sender := machineID;
782 out_msg.Destination.add(in_msg.Requestor);
783 out_msg.Acks := 1;
784 out_msg.SilentAcks := in_msg.SilentAcks;
785 assert(in_msg.DirectedProbe == false);
786 out_msg.MessageSize := MessageSizeType:Response_Control;
787 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
788 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
789 }
790 }
791 }
792
793 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
794 peek(forwardToCache_in, RequestMsg) {
795 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
796 out_msg.Address := address;
797 out_msg.Type := CoherenceResponseType:ACK_SHARED;
798 out_msg.Sender := machineID;
799 out_msg.Destination.add(in_msg.Requestor);
800 out_msg.Acks := 1;
801 out_msg.SilentAcks := in_msg.SilentAcks;
802 assert(in_msg.DirectedProbe == false);
803 out_msg.MessageSize := MessageSizeType:Response_Control;
804 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
805 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
806 }
807 }
808 }
809
810 action(g_sendUnblock, "g", desc="Send unblock to memory") {
811 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
812 out_msg.Address := address;
813 out_msg.Type := CoherenceResponseType:UNBLOCK;
814 out_msg.Sender := machineID;
815 out_msg.Destination.add(map_Address_to_Directory(address));
816 out_msg.MessageSize := MessageSizeType:Unblock_Control;
817 }
818 }
819
820 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
821 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
822 out_msg.Address := address;
823 out_msg.Type := CoherenceResponseType:UNBLOCKM;
824 out_msg.Sender := machineID;
825 out_msg.Destination.add(map_Address_to_Directory(address));
826 out_msg.MessageSize := MessageSizeType:Unblock_Control;
827 }
828 }
829
830 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
831 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
832 assert(is_valid(tbe));
833 out_msg.Address := address;
834 out_msg.Type := CoherenceResponseType:UNBLOCKS;
835 out_msg.Sender := machineID;
836 out_msg.CurOwner := tbe.CurOwner;
837 out_msg.Destination.add(map_Address_to_Directory(address));
838 out_msg.MessageSize := MessageSizeType:Unblock_Control;
839 }
840 }
841
842 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
843 assert(is_valid(cache_entry));
844 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
845 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
846 cache_entry.DataBlk);
847 }
848
849 action(hx_external_load_hit, "hx", desc="load required external msgs") {
850 assert(is_valid(cache_entry));
851 assert(is_valid(tbe));
852 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
853 peek(responseToCache_in, ResponseMsg) {
854
855 sequencer.readCallback(address,
856 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
857 cache_entry.DataBlk,
858 tbe.InitialRequestTime,
859 tbe.ForwardRequestTime,
860 tbe.FirstResponseTime);
861 }
862 }
863
864 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
865 assert(is_valid(cache_entry));
866 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
867 peek(mandatoryQueue_in, RubyRequest) {
868 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
869 cache_entry.DataBlk);
870
871 cache_entry.Dirty := true;
872 if (in_msg.Type == RubyRequestType:ATOMIC) {
873 cache_entry.AtomicAccessed := true;
874 }
875 }
876 }
877
878 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
879 assert(is_valid(tbe));
880 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
881 sequencer.writeCallback(address, GenericMachineType:L1Cache,tbe.DataBlk);
882 }
883
884 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
885 assert(is_valid(cache_entry));
886 assert(is_valid(tbe));
887 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
888 peek(responseToCache_in, ResponseMsg) {
889
890 sequencer.writeCallback(address,
891 getNondirectHitMachType(address, in_msg.Sender),
892 cache_entry.DataBlk,
893 tbe.InitialRequestTime,
894 tbe.ForwardRequestTime,
895 tbe.FirstResponseTime);
896 }
897 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
898 cache_entry.Dirty := true;
899 }
900
901 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
902 assert(is_valid(cache_entry));
903 assert(is_valid(tbe));
904 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
905
906 sequencer.writeCallback(address,
907 getNondirectHitMachType(address, tbe.LastResponder),
908 cache_entry.DataBlk,
909 tbe.InitialRequestTime,
910 tbe.ForwardRequestTime,
911 tbe.FirstResponseTime);
912
913 cache_entry.Dirty := true;
914 }
915
916 action(i_allocateTBE, "i", desc="Allocate TBE") {
917 check_allocate(TBEs);
918 assert(is_valid(cache_entry));
919 TBEs.allocate(address);
920 set_tbe(TBEs[address]);
921 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
922 tbe.Dirty := cache_entry.Dirty;
923 tbe.Sharers := false;
924 }
925
926 action(it_allocateTBE, "it", desc="Allocate TBE") {
927 check_allocate(TBEs);
928 TBEs.allocate(address);
929 set_tbe(TBEs[address]);
930 tbe.Dirty := false;
931 tbe.Sharers := false;
932 }
933
934 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
935 triggerQueue_in.dequeue();
936 }
937
938 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
939 mandatoryQueue_in.dequeue();
940 }
941
942 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
943 forwardToCache_in.dequeue();
944 }
945
946 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
947 assert(is_valid(cache_entry));
948 assert(is_valid(tbe));
949 cache_entry.Dirty := tbe.Dirty;
950 cache_entry.DataBlk := tbe.DataBlk;
951 }
952
953 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
954 assert(is_valid(cache_entry));
955 assert(is_valid(tbe));
956 cache_entry.Dirty := tbe.Dirty;
957 cache_entry.DataBlk := tbe.DataBlk;
958 cache_entry.FromL2 := true;
959 }
960
961 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
962 peek(responseToCache_in, ResponseMsg) {
963 assert(in_msg.Acks >= 0);
964 assert(is_valid(tbe));
965 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
966 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
967 if (tbe.AppliedSilentAcks == false) {
968 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
969 tbe.AppliedSilentAcks := true;
970 }
971 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
972 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
973 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
974 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
975 APPEND_TRANSITION_COMMENT(in_msg.Sender);
976 tbe.LastResponder := in_msg.Sender;
977 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
978 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
979 }
980 if (in_msg.InitialRequestTime != zero_time()) {
981 tbe.InitialRequestTime := in_msg.InitialRequestTime;
982 }
983 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
984 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
985 }
986 if (in_msg.ForwardRequestTime != zero_time()) {
987 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
988 }
989 if (tbe.FirstResponseTime == zero_time()) {
990 tbe.FirstResponseTime := curCycle();
991 }
992 }
993 }
994 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
995 peek(responseToCache_in, ResponseMsg) {
996 assert(is_valid(tbe));
997 tbe.CurOwner := in_msg.Sender;
998 }
999 }
1000
1001 action(n_popResponseQueue, "n", desc="Pop response queue") {
1002 responseToCache_in.dequeue();
1003 }
1004
1005 action(ll_L2toL1Transfer, "ll", desc="") {
1006 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
1007 out_msg.Address := address;
1008 out_msg.Type := TriggerType:L2_to_L1;
1009 }
1010 }
1011
1012 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1013 assert(is_valid(tbe));
1014 if (tbe.NumPendingMsgs == 0) {
1015 enqueue(triggerQueue_out, TriggerMsg) {
1016 out_msg.Address := address;
1017 if (tbe.Sharers) {
1018 out_msg.Type := TriggerType:ALL_ACKS;
1019 } else {
1020 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1021 }
1022 }
1023 }
1024 }
1025
1026 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1027 assert(is_valid(tbe));
1028 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1029 }
1030
1031 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1032 assert(is_valid(tbe));
1033 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1034 }
1035
1036 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1037 peek(forwardToCache_in, RequestMsg) {
1038 assert(in_msg.Requestor != machineID);
1039 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1040 assert(is_valid(tbe));
1041 out_msg.Address := address;
1042 out_msg.Type := CoherenceResponseType:DATA;
1043 out_msg.Sender := machineID;
1044 out_msg.Destination.add(in_msg.Requestor);
1045 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1046 out_msg.DataBlk := tbe.DataBlk;
1047 out_msg.Dirty := tbe.Dirty;
1048 if (in_msg.DirectedProbe) {
1049 out_msg.Acks := machineCount(MachineType:L1Cache);
1050 } else {
1051 out_msg.Acks := 2;
1052 }
1053 out_msg.SilentAcks := in_msg.SilentAcks;
1054 out_msg.MessageSize := MessageSizeType:Response_Data;
1055 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1056 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1057 }
1058 }
1059 }
1060
1061 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1062 peek(forwardToCache_in, RequestMsg) {
1063 assert(in_msg.Requestor != machineID);
1064 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1065 assert(is_valid(tbe));
1066 out_msg.Address := address;
1067 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1068 out_msg.Sender := machineID;
1069 out_msg.Destination.add(in_msg.Requestor);
1070 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1071 out_msg.DataBlk := tbe.DataBlk;
1072 out_msg.Dirty := tbe.Dirty;
1073 if (in_msg.DirectedProbe) {
1074 out_msg.Acks := machineCount(MachineType:L1Cache);
1075 } else {
1076 out_msg.Acks := 2;
1077 }
1078 out_msg.SilentAcks := in_msg.SilentAcks;
1079 out_msg.MessageSize := MessageSizeType:Response_Data;
1080 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1081 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1082 }
1083 }
1084 }
1085
1086 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1087 peek(forwardToCache_in, RequestMsg) {
1088 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1089 assert(is_valid(tbe));
1090 out_msg.Address := address;
1091 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1092 out_msg.Sender := machineID;
1093 out_msg.Destination := in_msg.MergedRequestors;
1094 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1095 out_msg.DataBlk := tbe.DataBlk;
1096 out_msg.Dirty := tbe.Dirty;
1097 out_msg.Acks := machineCount(MachineType:L1Cache);
1098 out_msg.SilentAcks := in_msg.SilentAcks;
1099 out_msg.MessageSize := MessageSizeType:Response_Data;
1100 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1101 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1102 }
1103 }
1104 }
1105
1106 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1107 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1108 assert(is_valid(tbe));
1109 out_msg.Address := address;
1110 out_msg.Sender := machineID;
1111 out_msg.Destination.add(map_Address_to_Directory(address));
1112 out_msg.Dirty := tbe.Dirty;
1113 if (tbe.Dirty) {
1114 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1115 out_msg.DataBlk := tbe.DataBlk;
1116 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1117 } else {
1118 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1119 // NOTE: in a real system this would not send data. We send
1120 // data here only so we can check it at the memory
1121 out_msg.DataBlk := tbe.DataBlk;
1122 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1123 }
1124 }
1125 }
1126
1127 action(r_setSharerBit, "r", desc="We saw other sharers") {
1128 assert(is_valid(tbe));
1129 tbe.Sharers := true;
1130 }
1131
1132 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1133 TBEs.deallocate(address);
1134 unset_tbe();
1135 }
1136
1137 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1138 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1139 assert(is_valid(tbe));
1140 out_msg.Address := address;
1141 out_msg.Sender := machineID;
1142 out_msg.Destination.add(map_Address_to_Directory(address));
1143 out_msg.DataBlk := tbe.DataBlk;
1144 out_msg.Dirty := tbe.Dirty;
1145 if (tbe.Dirty) {
1146 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1147 out_msg.DataBlk := tbe.DataBlk;
1148 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1149 } else {
1150 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1151 // NOTE: in a real system this would not send data. We send
1152 // data here only so we can check it at the memory
1153 out_msg.DataBlk := tbe.DataBlk;
1154 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1155 }
1156 }
1157 }
1158
1159 action(u_writeDataToCache, "u", desc="Write data to cache") {
1160 peek(responseToCache_in, ResponseMsg) {
1161 assert(is_valid(cache_entry));
1162 cache_entry.DataBlk := in_msg.DataBlk;
1163 cache_entry.Dirty := in_msg.Dirty;
1164 }
1165 }
1166
1167 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1168 peek(responseToCache_in, ResponseMsg) {
1169 assert(is_valid(tbe));
1170 tbe.DataBlk := in_msg.DataBlk;
1171 tbe.Dirty := in_msg.Dirty;
1172 }
1173 }
1174
1175 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1176 peek(responseToCache_in, ResponseMsg) {
1177 assert(is_valid(cache_entry));
1178 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1179 cache_entry.DataBlk, in_msg.DataBlk);
1180 assert(cache_entry.DataBlk == in_msg.DataBlk);
1181 cache_entry.DataBlk := in_msg.DataBlk;
1182 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1183 }
1184 }
1185
1186 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1187 peek(responseToCache_in, ResponseMsg) {
1188 assert(is_valid(tbe));
1189 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1190 tbe.DataBlk, in_msg.DataBlk);
1191 assert(tbe.DataBlk == in_msg.DataBlk);
1192 tbe.DataBlk := in_msg.DataBlk;
1193 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1194 }
1195 }
1196
1197 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1198 if (L1DcacheMemory.isTagPresent(address)) {
1199 L1DcacheMemory.deallocate(address);
1200 } else {
1201 L1IcacheMemory.deallocate(address);
1202 }
1203 unset_cache_entry();
1204 }
1205
1206 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1207 if (is_invalid(cache_entry)) {
1208 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
1209 }
1210 }
1211
1212 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1213 if (is_invalid(cache_entry)) {
1214 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
1215 }
1216 }
1217
1218 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1219 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
1220 }
1221
1222 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1223 L2cacheMemory.deallocate(address);
1224 unset_cache_entry();
1225 }
1226
1227 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1228 if (send_evictions) {
1229 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
1230 sequencer.evictionCallback(address);
1231 }
1232 }
1233
1234 action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
1235 ++L1DcacheMemory.demand_misses;
1236 }
1237
1238 action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
1239 ++L1DcacheMemory.demand_hits;
1240 }
1241
1242 action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
1243 ++L1IcacheMemory.demand_misses;
1244 }
1245
1246 action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
1247 ++L1IcacheMemory.demand_hits;
1248 }
1249
1250 action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
1251 ++L2cacheMemory.demand_misses;
1252 }
1253
1254 action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
1255 ++L2cacheMemory.demand_hits;
1256 }
1257
1258 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1259 stall_and_wait(mandatoryQueue_in, address);
1260 }
1261
1262 action(z_stall, "z", desc="stall") {
1263 // do nothing and the special z_stall action will return a protocol stall
1264 // so that the next port is checked
1265 }
1266
1267 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1268 wakeUpBuffers(address);
1269 }
1270
1271 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1272 wakeUpAllBuffers();
1273 }
1274
1275 //*****************************************************
1276 // TRANSITIONS
1277 //*****************************************************
1278
1279 // Transitions for Load/Store/L2_Replacement from transient states
1280 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1281 zz_stallAndWaitMandatoryQueue;
1282 }
1283
1284 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1285 zz_stallAndWaitMandatoryQueue;
1286 }
1287
1288 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1289 zz_stallAndWaitMandatoryQueue;
1290 }
1291
1292 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1293 zz_stallAndWaitMandatoryQueue;
1294 }
1295
1296 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1297 zz_stallAndWaitMandatoryQueue;
1298 }
1299
1300 transition({MI_F, MM_F}, {Store}) {
1301 zz_stallAndWaitMandatoryQueue;
1302 }
1303
1304 transition({MM_F, MI_F}, {Flush_line}) {
1305 zz_stallAndWaitMandatoryQueue;
1306 }
1307
1308 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1309 z_stall;
1310 }
1311
1312 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1313 z_stall;
1314 }
1315
1316 // Transitions moving data between the L1 and L2 caches
1317 transition({I, S, O, M, MM}, L1_to_L2) {
1318 i_allocateTBE;
1319 gg_deallocateL1CacheBlock;
1320 vv_allocateL2CacheBlock;
1321 hp_copyFromTBEToL2;
1322 s_deallocateTBE;
1323 }
1324
1325 transition(I, Trigger_L2_to_L1D, IT) {
1326 i_allocateTBE;
1327 rr_deallocateL2CacheBlock;
1328 ii_allocateL1DCacheBlock;
1329 nb_copyFromTBEToL1; // Not really needed for state I
1330 s_deallocateTBE;
1331 zz_stallAndWaitMandatoryQueue;
1332 ll_L2toL1Transfer;
1333 }
1334
1335 transition(S, Trigger_L2_to_L1D, ST) {
1336 i_allocateTBE;
1337 rr_deallocateL2CacheBlock;
1338 ii_allocateL1DCacheBlock;
1339 nb_copyFromTBEToL1;
1340 s_deallocateTBE;
1341 zz_stallAndWaitMandatoryQueue;
1342 ll_L2toL1Transfer;
1343 }
1344
1345 transition(O, Trigger_L2_to_L1D, OT) {
1346 i_allocateTBE;
1347 rr_deallocateL2CacheBlock;
1348 ii_allocateL1DCacheBlock;
1349 nb_copyFromTBEToL1;
1350 s_deallocateTBE;
1351 zz_stallAndWaitMandatoryQueue;
1352 ll_L2toL1Transfer;
1353 }
1354
1355 transition(M, Trigger_L2_to_L1D, MT) {
1356 i_allocateTBE;
1357 rr_deallocateL2CacheBlock;
1358 ii_allocateL1DCacheBlock;
1359 nb_copyFromTBEToL1;
1360 s_deallocateTBE;
1361 zz_stallAndWaitMandatoryQueue;
1362 ll_L2toL1Transfer;
1363 }
1364
1365 transition(MM, Trigger_L2_to_L1D, MMT) {
1366 i_allocateTBE;
1367 rr_deallocateL2CacheBlock;
1368 ii_allocateL1DCacheBlock;
1369 nb_copyFromTBEToL1;
1370 s_deallocateTBE;
1371 zz_stallAndWaitMandatoryQueue;
1372 ll_L2toL1Transfer;
1373 }
1374
1375 transition(I, Trigger_L2_to_L1I, IT) {
1376 i_allocateTBE;
1377 rr_deallocateL2CacheBlock;
1378 jj_allocateL1ICacheBlock;
1379 nb_copyFromTBEToL1;
1380 s_deallocateTBE;
1381 zz_stallAndWaitMandatoryQueue;
1382 ll_L2toL1Transfer;
1383 }
1384
1385 transition(S, Trigger_L2_to_L1I, ST) {
1386 i_allocateTBE;
1387 rr_deallocateL2CacheBlock;
1388 jj_allocateL1ICacheBlock;
1389 nb_copyFromTBEToL1;
1390 s_deallocateTBE;
1391 zz_stallAndWaitMandatoryQueue;
1392 ll_L2toL1Transfer;
1393 }
1394
1395 transition(O, Trigger_L2_to_L1I, OT) {
1396 i_allocateTBE;
1397 rr_deallocateL2CacheBlock;
1398 jj_allocateL1ICacheBlock;
1399 nb_copyFromTBEToL1;
1400 s_deallocateTBE;
1401 zz_stallAndWaitMandatoryQueue;
1402 ll_L2toL1Transfer;
1403 }
1404
1405 transition(M, Trigger_L2_to_L1I, MT) {
1406 i_allocateTBE;
1407 rr_deallocateL2CacheBlock;
1408 jj_allocateL1ICacheBlock;
1409 nb_copyFromTBEToL1;
1410 s_deallocateTBE;
1411 zz_stallAndWaitMandatoryQueue;
1412 ll_L2toL1Transfer;
1413 }
1414
1415 transition(MM, Trigger_L2_to_L1I, MMT) {
1416 i_allocateTBE;
1417 rr_deallocateL2CacheBlock;
1418 jj_allocateL1ICacheBlock;
1419 nb_copyFromTBEToL1;
1420 s_deallocateTBE;
1421 zz_stallAndWaitMandatoryQueue;
1422 ll_L2toL1Transfer;
1423 }
1424
1425 transition(IT, Complete_L2_to_L1, IR) {
1426 j_popTriggerQueue;
1427 kd_wakeUpDependents;
1428 }
1429
1430 transition(ST, Complete_L2_to_L1, SR) {
1431 j_popTriggerQueue;
1432 kd_wakeUpDependents;
1433 }
1434
1435 transition(OT, Complete_L2_to_L1, OR) {
1436 j_popTriggerQueue;
1437 kd_wakeUpDependents;
1438 }
1439
1440 transition(MT, Complete_L2_to_L1, MR) {
1441 j_popTriggerQueue;
1442 kd_wakeUpDependents;
1443 }
1444
1445 transition(MMT, Complete_L2_to_L1, MMR) {
1446 j_popTriggerQueue;
1447 kd_wakeUpDependents;
1448 }
1449
1450 // Transitions from Idle
1451 transition({I,IR}, Load, IS) {
1452 ii_allocateL1DCacheBlock;
1453 i_allocateTBE;
1454 a_issueGETS;
1455 uu_profileL1DataMiss;
1456 uu_profileL2Miss;
1457 k_popMandatoryQueue;
1458 }
1459
1460 transition({I,IR}, Ifetch, IS) {
1461 jj_allocateL1ICacheBlock;
1462 i_allocateTBE;
1463 a_issueGETS;
1464 uu_profileL1InstMiss;
1465 uu_profileL2Miss;
1466 k_popMandatoryQueue;
1467 }
1468
1469 transition({I,IR}, Store, IM) {
1470 ii_allocateL1DCacheBlock;
1471 i_allocateTBE;
1472 b_issueGETX;
1473 uu_profileL1DataMiss;
1474 uu_profileL2Miss;
1475 k_popMandatoryQueue;
1476 }
1477
1478 transition({I, IR}, Flush_line, IM_F) {
1479 it_allocateTBE;
1480 bf_issueGETF;
1481 k_popMandatoryQueue;
1482 }
1483
1484 transition(I, L2_Replacement) {
1485 rr_deallocateL2CacheBlock;
1486 ka_wakeUpAllDependents;
1487 }
1488
1489 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1490 f_sendAck;
1491 l_popForwardQueue;
1492 }
1493
1494 // Transitions from Shared
1495 transition({S, SM, ISM}, Load) {
1496 h_load_hit;
1497 uu_profileL1DataHit;
1498 k_popMandatoryQueue;
1499 }
1500
1501 transition({S, SM, ISM}, Ifetch) {
1502 h_load_hit;
1503 uu_profileL1InstHit;
1504 k_popMandatoryQueue;
1505 }
1506
1507 transition(SR, Load, S) {
1508 h_load_hit;
1509 uu_profileL1DataMiss;
1510 uu_profileL2Hit;
1511 k_popMandatoryQueue;
1512 ka_wakeUpAllDependents;
1513 }
1514
1515 transition(SR, Ifetch, S) {
1516 h_load_hit;
1517 uu_profileL1InstMiss;
1518 uu_profileL2Hit;
1519 k_popMandatoryQueue;
1520 ka_wakeUpAllDependents;
1521 }
1522
1523 transition({S,SR}, Store, SM) {
1524 i_allocateTBE;
1525 b_issueGETX;
1526 uu_profileL1DataMiss;
1527 uu_profileL2Miss;
1528 k_popMandatoryQueue;
1529 }
1530
1531 transition({S, SR}, Flush_line, SM_F) {
1532 i_allocateTBE;
1533 bf_issueGETF;
1534 forward_eviction_to_cpu;
1535 gg_deallocateL1CacheBlock;
1536 k_popMandatoryQueue;
1537 }
1538
1539 transition(S, L2_Replacement, I) {
1540 forward_eviction_to_cpu;
1541 rr_deallocateL2CacheBlock;
1542 ka_wakeUpAllDependents;
1543 }
1544
1545 transition(S, {Other_GETX, Invalidate}, I) {
1546 f_sendAck;
1547 forward_eviction_to_cpu;
1548 l_popForwardQueue;
1549 }
1550
1551 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1552 ff_sendAckShared;
1553 l_popForwardQueue;
1554 }
1555
1556 // Transitions from Owned
1557 transition({O, OM, SS, MM_W, M_W}, {Load}) {
1558 h_load_hit;
1559 uu_profileL1DataHit;
1560 k_popMandatoryQueue;
1561 }
1562
1563 transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
1564 h_load_hit;
1565 uu_profileL1InstHit;
1566 k_popMandatoryQueue;
1567 }
1568
1569 transition(OR, Load, O) {
1570 h_load_hit;
1571 uu_profileL1DataMiss;
1572 uu_profileL2Hit;
1573 k_popMandatoryQueue;
1574 ka_wakeUpAllDependents;
1575 }
1576
1577 transition(OR, Ifetch, O) {
1578 h_load_hit;
1579 uu_profileL1InstMiss;
1580 uu_profileL2Hit;
1581 k_popMandatoryQueue;
1582 ka_wakeUpAllDependents;
1583 }
1584
1585 transition({O,OR}, Store, OM) {
1586 i_allocateTBE;
1587 b_issueGETX;
1588 p_decrementNumberOfMessagesByOne;
1589 uu_profileL1DataMiss;
1590 uu_profileL2Miss;
1591 k_popMandatoryQueue;
1592 }
1593
1594 transition({O, OR}, Flush_line, OM_F) {
1595 i_allocateTBE;
1596 bf_issueGETF;
1597 p_decrementNumberOfMessagesByOne;
1598 forward_eviction_to_cpu;
1599 gg_deallocateL1CacheBlock;
1600 k_popMandatoryQueue;
1601 }
1602
1603 transition(O, L2_Replacement, OI) {
1604 i_allocateTBE;
1605 d_issuePUT;
1606 forward_eviction_to_cpu;
1607 rr_deallocateL2CacheBlock;
1608 ka_wakeUpAllDependents;
1609 }
1610
1611 transition(O, {Other_GETX, Invalidate}, I) {
1612 e_sendData;
1613 forward_eviction_to_cpu;
1614 l_popForwardQueue;
1615 }
1616
1617 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1618 ee_sendDataShared;
1619 l_popForwardQueue;
1620 }
1621
1622 transition(O, Merged_GETS) {
1623 em_sendDataSharedMultiple;
1624 l_popForwardQueue;
1625 }
1626
1627 // Transitions from Modified
1628 transition({MM, M}, {Ifetch}) {
1629 h_load_hit;
1630 uu_profileL1InstHit;
1631 k_popMandatoryQueue;
1632 }
1633
1634 transition({MM, M}, {Load}) {
1635 h_load_hit;
1636 uu_profileL1DataHit;
1637 k_popMandatoryQueue;
1638 }
1639
1640 transition(MM, Store) {
1641 hh_store_hit;
1642 uu_profileL1DataHit;
1643 k_popMandatoryQueue;
1644 }
1645
1646 transition(MMR, Load, MM) {
1647 h_load_hit;
1648 uu_profileL1DataMiss;
1649 uu_profileL2Hit;
1650 k_popMandatoryQueue;
1651 ka_wakeUpAllDependents;
1652 }
1653
1654 transition(MMR, Ifetch, MM) {
1655 h_load_hit;
1656 uu_profileL1InstMiss;
1657 uu_profileL2Hit;
1658 k_popMandatoryQueue;
1659 ka_wakeUpAllDependents;
1660 }
1661
1662 transition(MMR, Store, MM) {
1663 hh_store_hit;
1664 uu_profileL1DataMiss;
1665 uu_profileL2Hit;
1666 k_popMandatoryQueue;
1667 ka_wakeUpAllDependents;
1668 }
1669
1670 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1671 i_allocateTBE;
1672 bf_issueGETF;
1673 p_decrementNumberOfMessagesByOne;
1674 forward_eviction_to_cpu;
1675 gg_deallocateL1CacheBlock;
1676 k_popMandatoryQueue;
1677 }
1678
1679 transition(MM_F, Block_Ack, MI_F) {
1680 df_issuePUTF;
1681 l_popForwardQueue;
1682 kd_wakeUpDependents;
1683 }
1684
1685 transition(MM, L2_Replacement, MI) {
1686 i_allocateTBE;
1687 d_issuePUT;
1688 forward_eviction_to_cpu;
1689 rr_deallocateL2CacheBlock;
1690 ka_wakeUpAllDependents;
1691 }
1692
1693 transition(MM, {Other_GETX, Invalidate}, I) {
1694 c_sendExclusiveData;
1695 forward_eviction_to_cpu;
1696 l_popForwardQueue;
1697 }
1698
1699 transition(MM, Other_GETS, I) {
1700 c_sendExclusiveData;
1701 forward_eviction_to_cpu;
1702 l_popForwardQueue;
1703 }
1704
1705 transition(MM, NC_DMA_GETS, O) {
1706 ee_sendDataShared;
1707 l_popForwardQueue;
1708 }
1709
1710 transition(MM, Other_GETS_No_Mig, O) {
1711 ee_sendDataShared;
1712 l_popForwardQueue;
1713 }
1714
1715 transition(MM, Merged_GETS, O) {
1716 em_sendDataSharedMultiple;
1717 l_popForwardQueue;
1718 }
1719
1720 // Transitions from Dirty Exclusive
1721 transition(M, Store, MM) {
1722 hh_store_hit;
1723 uu_profileL1DataHit;
1724 k_popMandatoryQueue;
1725 }
1726
1727 transition(MR, Load, M) {
1728 h_load_hit;
1729 uu_profileL1DataMiss;
1730 uu_profileL2Hit;
1731 k_popMandatoryQueue;
1732 ka_wakeUpAllDependents;
1733 }
1734
1735 transition(MR, Ifetch, M) {
1736 h_load_hit;
1737 uu_profileL1InstMiss;
1738 uu_profileL2Hit;
1739 k_popMandatoryQueue;
1740 ka_wakeUpAllDependents;
1741 }
1742
1743 transition(MR, Store, MM) {
1744 hh_store_hit;
1745 uu_profileL1DataMiss;
1746 uu_profileL2Hit;
1747 k_popMandatoryQueue;
1748 ka_wakeUpAllDependents;
1749 }
1750
1751 transition(M, L2_Replacement, MI) {
1752 i_allocateTBE;
1753 d_issuePUT;
1754 forward_eviction_to_cpu;
1755 rr_deallocateL2CacheBlock;
1756 ka_wakeUpAllDependents;
1757 }
1758
1759 transition(M, {Other_GETX, Invalidate}, I) {
1760 c_sendExclusiveData;
1761 forward_eviction_to_cpu;
1762 l_popForwardQueue;
1763 }
1764
1765 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1766 ee_sendDataShared;
1767 l_popForwardQueue;
1768 }
1769
1770 transition(M, NC_DMA_GETS, O) {
1771 ee_sendDataShared;
1772 l_popForwardQueue;
1773 }
1774
1775 transition(M, Merged_GETS, O) {
1776 em_sendDataSharedMultiple;
1777 l_popForwardQueue;
1778 }
1779
1780 // Transitions from IM
1781
1782 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1783 f_sendAck;
1784 l_popForwardQueue;
1785 }
1786
1787 transition({IM, IM_F, MM_F}, Ack) {
1788 m_decrementNumberOfMessages;
1789 o_checkForCompletion;
1790 n_popResponseQueue;
1791 }
1792
1793 transition(IM, Data, ISM) {
1794 u_writeDataToCache;
1795 m_decrementNumberOfMessages;
1796 o_checkForCompletion;
1797 n_popResponseQueue;
1798 }
1799
1800 transition(IM_F, Data, ISM_F) {
1801 uf_writeDataToCacheTBE;
1802 m_decrementNumberOfMessages;
1803 o_checkForCompletion;
1804 n_popResponseQueue;
1805 }
1806
1807 transition(IM, Exclusive_Data, MM_W) {
1808 u_writeDataToCache;
1809 m_decrementNumberOfMessages;
1810 o_checkForCompletion;
1811 sx_external_store_hit;
1812 n_popResponseQueue;
1813 kd_wakeUpDependents;
1814 }
1815
1816 transition(IM_F, Exclusive_Data, MM_WF) {
1817 uf_writeDataToCacheTBE;
1818 m_decrementNumberOfMessages;
1819 o_checkForCompletion;
1820 n_popResponseQueue;
1821 }
1822
1823 // Transitions from SM
1824 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1825 ff_sendAckShared;
1826 l_popForwardQueue;
1827 }
1828
1829 transition(SM, {Other_GETX, Invalidate}, IM) {
1830 f_sendAck;
1831 forward_eviction_to_cpu;
1832 l_popForwardQueue;
1833 }
1834
1835 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1836 f_sendAck;
1837 forward_eviction_to_cpu;
1838 l_popForwardQueue;
1839 }
1840
1841 transition({SM, SM_F}, Ack) {
1842 m_decrementNumberOfMessages;
1843 o_checkForCompletion;
1844 n_popResponseQueue;
1845 }
1846
1847 transition(SM, {Data, Exclusive_Data}, ISM) {
1848 v_writeDataToCacheVerify;
1849 m_decrementNumberOfMessages;
1850 o_checkForCompletion;
1851 n_popResponseQueue;
1852 }
1853
1854 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1855 vt_writeDataToTBEVerify;
1856 m_decrementNumberOfMessages;
1857 o_checkForCompletion;
1858 n_popResponseQueue;
1859 }
1860
1861 // Transitions from ISM
1862 transition({ISM, ISM_F}, Ack) {
1863 m_decrementNumberOfMessages;
1864 o_checkForCompletion;
1865 n_popResponseQueue;
1866 }
1867
1868 transition(ISM, All_acks_no_sharers, MM) {
1869 sxt_trig_ext_store_hit;
1870 gm_sendUnblockM;
1871 s_deallocateTBE;
1872 j_popTriggerQueue;
1873 kd_wakeUpDependents;
1874 }
1875
1876 transition(ISM_F, All_acks_no_sharers, MI_F) {
1877 df_issuePUTF;
1878 j_popTriggerQueue;
1879 kd_wakeUpDependents;
1880 }
1881
1882 // Transitions from OM
1883
1884 transition(OM, {Other_GETX, Invalidate}, IM) {
1885 e_sendData;
1886 pp_incrementNumberOfMessagesByOne;
1887 forward_eviction_to_cpu;
1888 l_popForwardQueue;
1889 }
1890
1891 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1892 q_sendDataFromTBEToCache;
1893 pp_incrementNumberOfMessagesByOne;
1894 forward_eviction_to_cpu;
1895 l_popForwardQueue;
1896 }
1897
1898 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1899 ee_sendDataShared;
1900 l_popForwardQueue;
1901 }
1902
1903 transition(OM, Merged_GETS) {
1904 em_sendDataSharedMultiple;
1905 l_popForwardQueue;
1906 }
1907
1908 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1909 et_sendDataSharedFromTBE;
1910 l_popForwardQueue;
1911 }
1912
1913 transition(OM_F, Merged_GETS) {
1914 emt_sendDataSharedMultipleFromTBE;
1915 l_popForwardQueue;
1916 }
1917
1918 transition({OM, OM_F}, Ack) {
1919 m_decrementNumberOfMessages;
1920 o_checkForCompletion;
1921 n_popResponseQueue;
1922 }
1923
1924 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1925 sxt_trig_ext_store_hit;
1926 gm_sendUnblockM;
1927 s_deallocateTBE;
1928 j_popTriggerQueue;
1929 kd_wakeUpDependents;
1930 }
1931
1932 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1933 df_issuePUTF;
1934 j_popTriggerQueue;
1935 kd_wakeUpDependents;
1936 }
1937 // Transitions from IS
1938
1939 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1940 f_sendAck;
1941 l_popForwardQueue;
1942 }
1943
1944 transition(IS, Ack) {
1945 m_decrementNumberOfMessages;
1946 o_checkForCompletion;
1947 n_popResponseQueue;
1948 }
1949
1950 transition(IS, Shared_Ack) {
1951 m_decrementNumberOfMessages;
1952 r_setSharerBit;
1953 o_checkForCompletion;
1954 n_popResponseQueue;
1955 }
1956
1957 transition(IS, Data, SS) {
1958 u_writeDataToCache;
1959 m_decrementNumberOfMessages;
1960 o_checkForCompletion;
1961 hx_external_load_hit;
1962 uo_updateCurrentOwner;
1963 n_popResponseQueue;
1964 kd_wakeUpDependents;
1965 }
1966
1967 transition(IS, Exclusive_Data, M_W) {
1968 u_writeDataToCache;
1969 m_decrementNumberOfMessages;
1970 o_checkForCompletion;
1971 hx_external_load_hit;
1972 n_popResponseQueue;
1973 kd_wakeUpDependents;
1974 }
1975
1976 transition(IS, Shared_Data, SS) {
1977 u_writeDataToCache;
1978 r_setSharerBit;
1979 m_decrementNumberOfMessages;
1980 o_checkForCompletion;
1981 hx_external_load_hit;
1982 uo_updateCurrentOwner;
1983 n_popResponseQueue;
1984 kd_wakeUpDependents;
1985 }
1986
1987 // Transitions from SS
1988
1989 transition(SS, Ack) {
1990 m_decrementNumberOfMessages;
1991 o_checkForCompletion;
1992 n_popResponseQueue;
1993 }
1994
1995 transition(SS, Shared_Ack) {
1996 m_decrementNumberOfMessages;
1997 r_setSharerBit;
1998 o_checkForCompletion;
1999 n_popResponseQueue;
2000 }
2001
2002 transition(SS, All_acks, S) {
2003 gs_sendUnblockS;
2004 s_deallocateTBE;
2005 j_popTriggerQueue;
2006 kd_wakeUpDependents;
2007 }
2008
2009 transition(SS, All_acks_no_sharers, S) {
2010 // Note: The directory might still be the owner, so that is why we go to S
2011 gs_sendUnblockS;
2012 s_deallocateTBE;
2013 j_popTriggerQueue;
2014 kd_wakeUpDependents;
2015 }
2016
2017 // Transitions from MM_W
2018
2019 transition(MM_W, Store) {
2020 hh_store_hit;
2021 uu_profileL1DataHit;
2022 k_popMandatoryQueue;
2023 }
2024
2025 transition({MM_W, MM_WF}, Ack) {
2026 m_decrementNumberOfMessages;
2027 o_checkForCompletion;
2028 n_popResponseQueue;
2029 }
2030
2031 transition(MM_W, All_acks_no_sharers, MM) {
2032 gm_sendUnblockM;
2033 s_deallocateTBE;
2034 j_popTriggerQueue;
2035 kd_wakeUpDependents;
2036 }
2037
2038 transition(MM_WF, All_acks_no_sharers, MI_F) {
2039 df_issuePUTF;
2040 j_popTriggerQueue;
2041 kd_wakeUpDependents;
2042 }
2043 // Transitions from M_W
2044
2045 transition(M_W, Store, MM_W) {
2046 hh_store_hit;
2047 uu_profileL1DataHit;
2048 k_popMandatoryQueue;
2049 }
2050
2051 transition(M_W, Ack) {
2052 m_decrementNumberOfMessages;
2053 o_checkForCompletion;
2054 n_popResponseQueue;
2055 }
2056
2057 transition(M_W, All_acks_no_sharers, M) {
2058 gm_sendUnblockM;
2059 s_deallocateTBE;
2060 j_popTriggerQueue;
2061 kd_wakeUpDependents;
2062 }
2063
2064 // Transitions from OI/MI
2065
2066 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
2067 q_sendDataFromTBEToCache;
2068 l_popForwardQueue;
2069 }
2070
2071 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
2072 sq_sendSharedDataFromTBEToCache;
2073 l_popForwardQueue;
2074 }
2075
2076 transition({OI, MI}, Merged_GETS, OI) {
2077 qm_sendDataFromTBEToCache;
2078 l_popForwardQueue;
2079 }
2080
2081 transition(MI, Writeback_Ack, I) {
2082 t_sendExclusiveDataFromTBEToMemory;
2083 s_deallocateTBE;
2084 l_popForwardQueue;
2085 kd_wakeUpDependents;
2086 }
2087
2088 transition(MI_F, Writeback_Ack, I) {
2089 hh_flush_hit;
2090 t_sendExclusiveDataFromTBEToMemory;
2091 s_deallocateTBE;
2092 l_popForwardQueue;
2093 kd_wakeUpDependents;
2094 }
2095
2096 transition(OI, Writeback_Ack, I) {
2097 qq_sendDataFromTBEToMemory;
2098 s_deallocateTBE;
2099 l_popForwardQueue;
2100 kd_wakeUpDependents;
2101 }
2102
2103 // Transitions from II
2104 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2105 f_sendAck;
2106 l_popForwardQueue;
2107 }
2108
2109 transition(II, Writeback_Ack, I) {
2110 g_sendUnblock;
2111 s_deallocateTBE;
2112 l_popForwardQueue;
2113 kd_wakeUpDependents;
2114 }
2115
2116 transition(II, Writeback_Nack, I) {
2117 s_deallocateTBE;
2118 l_popForwardQueue;
2119 kd_wakeUpDependents;
2120 }
2121
2122 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2123 ct_sendExclusiveDataFromTBE;
2124 pp_incrementNumberOfMessagesByOne;
2125 l_popForwardQueue;
2126 }
2127
2128 transition(MM_F, Other_GETS, IM_F) {
2129 ct_sendExclusiveDataFromTBE;
2130 pp_incrementNumberOfMessagesByOne;
2131 l_popForwardQueue;
2132 }
2133
2134 transition(MM_F, NC_DMA_GETS, OM_F) {
2135 sq_sendSharedDataFromTBEToCache;
2136 l_popForwardQueue;
2137 }
2138
2139 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2140 et_sendDataSharedFromTBE;
2141 l_popForwardQueue;
2142 }
2143
2144 transition(MM_F, Merged_GETS, OM_F) {
2145 emt_sendDataSharedMultipleFromTBE;
2146 l_popForwardQueue;
2147 }
2148 }