ruby: restrict Address to being a type and not a variable name
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1Icache,
39 CacheMemory * L1Dcache,
40 CacheMemory * L2cache,
41 Cycles cache_response_latency = 10,
42 Cycles issue_latency = 2,
43 Cycles l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true,
45 bool send_evictions
46 {
47
48 // NETWORK BUFFERS
49 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
50 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
51 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
52
53 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
54 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
55
56
57 // STATES
58 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
59 // Base states
60 I, AccessPermission:Invalid, desc="Idle";
61 S, AccessPermission:Read_Only, desc="Shared";
62 O, AccessPermission:Read_Only, desc="Owned";
63 M, AccessPermission:Read_Only, desc="Modified (dirty)";
64 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
65
66 // Base states, locked and ready to service the mandatory queue
67 IR, AccessPermission:Invalid, desc="Idle";
68 SR, AccessPermission:Read_Only, desc="Shared";
69 OR, AccessPermission:Read_Only, desc="Owned";
70 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
71 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73 // Transient States
74 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
75 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
76 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
77 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
78 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
79 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
80 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
81 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
82 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
83 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
84 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
85 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
86 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
87 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
88 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
89 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
90
91 //Transition States Related to Flushing
92 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
93 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
94 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
95 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
96 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
97 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
98 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
99 }
100
101 // EVENTS
102 enumeration(Event, desc="Cache events") {
103 Load, desc="Load request from the processor";
104 Ifetch, desc="I-fetch request from the processor";
105 Store, desc="Store request from the processor";
106 L2_Replacement, desc="L2 Replacement";
107 L1_to_L2, desc="L1 to L2 transfer";
108 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
109 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
110 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
111
112 // Requests
113 Other_GETX, desc="A GetX from another processor";
114 Other_GETS, desc="A GetS from another processor";
115 Merged_GETS, desc="A Merged GetS from another processor";
116 Other_GETS_No_Mig, desc="A GetS from another processor";
117 NC_DMA_GETS, desc="special GetS when only DMA exists";
118 Invalidate, desc="Invalidate block";
119
120 // Responses
121 Ack, desc="Received an ack message";
122 Shared_Ack, desc="Received an ack message, responder has a shared copy";
123 Data, desc="Received a data message";
124 Shared_Data, desc="Received a data message, responder has a shared copy";
125 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
126
127 Writeback_Ack, desc="Writeback O.K. from directory";
128 Writeback_Nack, desc="Writeback not O.K. from directory";
129
130 // Triggers
131 All_acks, desc="Received all required data and message acks";
132 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
133
134 // For Flush
135 Flush_line, desc="flush the cache line from all caches";
136 Block_Ack, desc="the directory is blocked and ready for the flush";
137 }
138
139 // TYPES
140
141 // STRUCTURE DEFINITIONS
142
143 MessageBuffer mandatoryQueue, ordered="false";
144
145 // CacheEntry
146 structure(Entry, desc="...", interface="AbstractCacheEntry") {
147 State CacheState, desc="cache state";
148 bool Dirty, desc="Is the data dirty (different than memory)?";
149 DataBlock DataBlk, desc="data for the block";
150 bool FromL2, default="false", desc="block just moved from L2";
151 bool AtomicAccessed, default="false", desc="block just moved from L2";
152 }
153
154 // TBE fields
155 structure(TBE, desc="...") {
156 State TBEState, desc="Transient state";
157 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
158 bool Dirty, desc="Is the data dirty (different than memory)?";
159 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
160 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
161 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
162 MachineID LastResponder, desc="last machine to send a response for this request";
163 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
164
165 Cycles InitialRequestTime, default="Cycles(0)",
166 desc="time the initial requests was sent from the L1Cache";
167 Cycles ForwardRequestTime, default="Cycles(0)",
168 desc="time the dir forwarded the request";
169 Cycles FirstResponseTime, default="Cycles(0)",
170 desc="the time the first response was received";
171 }
172
173 structure(TBETable, external="yes") {
174 TBE lookup(Address);
175 void allocate(Address);
176 void deallocate(Address);
177 bool isPresent(Address);
178 }
179
180 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
181
182 void set_cache_entry(AbstractCacheEntry b);
183 void unset_cache_entry();
184 void set_tbe(TBE b);
185 void unset_tbe();
186 void wakeUpAllBuffers();
187 void wakeUpBuffers(Address a);
188 Cycles curCycle();
189
190 Entry getCacheEntry(Address address), return_by_pointer="yes" {
191 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
192 if(is_valid(L2cache_entry)) {
193 return L2cache_entry;
194 }
195
196 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
197 if(is_valid(L1Dcache_entry)) {
198 return L1Dcache_entry;
199 }
200
201 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
202 return L1Icache_entry;
203 }
204
205 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
206 Entry cache_entry := getCacheEntry(addr);
207 if(is_valid(cache_entry)) {
208 return cache_entry.DataBlk;
209 }
210
211 TBE tbe := TBEs[addr];
212 if(is_valid(tbe)) {
213 return tbe.DataBlk;
214 }
215
216 error("Missing data block");
217 }
218
219 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
220 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
221 return L2cache_entry;
222 }
223
224 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
225 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
226 return L1Dcache_entry;
227 }
228
229 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
230 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
231 return L1Icache_entry;
232 }
233
234 State getState(TBE tbe, Entry cache_entry, Address addr) {
235 if(is_valid(tbe)) {
236 return tbe.TBEState;
237 } else if (is_valid(cache_entry)) {
238 return cache_entry.CacheState;
239 }
240 return State:I;
241 }
242
243 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
244 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
245 assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
246 assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
247
248 if (is_valid(tbe)) {
249 tbe.TBEState := state;
250 }
251
252 if (is_valid(cache_entry)) {
253 cache_entry.CacheState := state;
254 }
255 }
256
257 AccessPermission getAccessPermission(Address addr) {
258 TBE tbe := TBEs[addr];
259 if(is_valid(tbe)) {
260 return L1Cache_State_to_permission(tbe.TBEState);
261 }
262
263 Entry cache_entry := getCacheEntry(addr);
264 if(is_valid(cache_entry)) {
265 return L1Cache_State_to_permission(cache_entry.CacheState);
266 }
267
268 return AccessPermission:NotPresent;
269 }
270
271 void setAccessPermission(Entry cache_entry, Address addr, State state) {
272 if (is_valid(cache_entry)) {
273 cache_entry.changePermission(L1Cache_State_to_permission(state));
274 }
275 }
276
277 Event mandatory_request_type_to_event(RubyRequestType type) {
278 if (type == RubyRequestType:LD) {
279 return Event:Load;
280 } else if (type == RubyRequestType:IFETCH) {
281 return Event:Ifetch;
282 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
283 return Event:Store;
284 } else if ((type == RubyRequestType:FLUSH)) {
285 return Event:Flush_line;
286 } else {
287 error("Invalid RubyRequestType");
288 }
289 }
290
291 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
292 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
293 //
294 // NOTE direct local hits should not call this
295 //
296 return GenericMachineType:L1Cache_wCC;
297 } else {
298 return ConvertMachToGenericMach(machineIDToMachineType(sender));
299 }
300 }
301
302 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
303 if (is_valid(cache_entry) && cache_entry.FromL2) {
304 cache_entry.FromL2 := false;
305 return GenericMachineType:L2Cache;
306 } else {
307 return GenericMachineType:L1Cache;
308 }
309 }
310
311 bool IsAtomicAccessed(Entry cache_entry) {
312 assert(is_valid(cache_entry));
313 return cache_entry.AtomicAccessed;
314 }
315
316 MessageBuffer triggerQueue, ordered="false";
317
318 // ** OUT_PORTS **
319
320 out_port(requestNetwork_out, RequestMsg, requestFromCache);
321 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
322 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
323 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
324
325 // ** IN_PORTS **
326
327 // Trigger Queue
328 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
329 if (triggerQueue_in.isReady()) {
330 peek(triggerQueue_in, TriggerMsg) {
331
332 Entry cache_entry := getCacheEntry(in_msg.Addr);
333 TBE tbe := TBEs[in_msg.Addr];
334
335 if (in_msg.Type == TriggerType:L2_to_L1) {
336 trigger(Event:Complete_L2_to_L1, in_msg.Addr, cache_entry, tbe);
337 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
338 trigger(Event:All_acks, in_msg.Addr, cache_entry, tbe);
339 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
340 trigger(Event:All_acks_no_sharers, in_msg.Addr, cache_entry, tbe);
341 } else {
342 error("Unexpected message");
343 }
344 }
345 }
346 }
347
348 // Nothing from the unblock network
349
350 // Response Network
351 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
352 if (responseToCache_in.isReady()) {
353 peek(responseToCache_in, ResponseMsg, block_on="Addr") {
354
355 Entry cache_entry := getCacheEntry(in_msg.Addr);
356 TBE tbe := TBEs[in_msg.Addr];
357
358 if (in_msg.Type == CoherenceResponseType:ACK) {
359 trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
360 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
361 trigger(Event:Shared_Ack, in_msg.Addr, cache_entry, tbe);
362 } else if (in_msg.Type == CoherenceResponseType:DATA) {
363 trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
364 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
365 trigger(Event:Shared_Data, in_msg.Addr, cache_entry, tbe);
366 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
367 trigger(Event:Exclusive_Data, in_msg.Addr, cache_entry, tbe);
368 } else {
369 error("Unexpected message");
370 }
371 }
372 }
373 }
374
375 // Forward Network
376 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
377 if (forwardToCache_in.isReady()) {
378 peek(forwardToCache_in, RequestMsg, block_on="Addr") {
379
380 Entry cache_entry := getCacheEntry(in_msg.Addr);
381 TBE tbe := TBEs[in_msg.Addr];
382
383 if ((in_msg.Type == CoherenceRequestType:GETX) ||
384 (in_msg.Type == CoherenceRequestType:GETF)) {
385 trigger(Event:Other_GETX, in_msg.Addr, cache_entry, tbe);
386 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
387 trigger(Event:Merged_GETS, in_msg.Addr, cache_entry, tbe);
388 } else if (in_msg.Type == CoherenceRequestType:GETS) {
389 if (machineCount(MachineType:L1Cache) > 1) {
390 if (is_valid(cache_entry)) {
391 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
392 trigger(Event:Other_GETS_No_Mig, in_msg.Addr, cache_entry, tbe);
393 } else {
394 trigger(Event:Other_GETS, in_msg.Addr, cache_entry, tbe);
395 }
396 } else {
397 trigger(Event:Other_GETS, in_msg.Addr, cache_entry, tbe);
398 }
399 } else {
400 trigger(Event:NC_DMA_GETS, in_msg.Addr, cache_entry, tbe);
401 }
402 } else if (in_msg.Type == CoherenceRequestType:INV) {
403 trigger(Event:Invalidate, in_msg.Addr, cache_entry, tbe);
404 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
405 trigger(Event:Writeback_Ack, in_msg.Addr, cache_entry, tbe);
406 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
407 trigger(Event:Writeback_Nack, in_msg.Addr, cache_entry, tbe);
408 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
409 trigger(Event:Block_Ack, in_msg.Addr, cache_entry, tbe);
410 } else {
411 error("Unexpected message");
412 }
413 }
414 }
415 }
416
417 // Nothing from the request network
418
419 // Mandatory Queue
420 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
421 if (mandatoryQueue_in.isReady()) {
422 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
423
424 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
425 TBE tbe := TBEs[in_msg.LineAddress];
426
427 if (in_msg.Type == RubyRequestType:IFETCH) {
428 // ** INSTRUCTION ACCESS ***
429
430 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
431 if (is_valid(L1Icache_entry)) {
432 // The tag matches for the L1, so the L1 fetches the line.
433 // We know it can't be in the L2 due to exclusion
434 trigger(mandatory_request_type_to_event(in_msg.Type),
435 in_msg.LineAddress, L1Icache_entry, tbe);
436 } else {
437 // Check to see if it is in the OTHER L1
438 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
439 if (is_valid(L1Dcache_entry)) {
440 // The block is in the wrong L1, try to write it to the L2
441 if (L2cache.cacheAvail(in_msg.LineAddress)) {
442 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
443 } else {
444 Address l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
445 trigger(Event:L2_Replacement,
446 l2_victim_addr,
447 getL2CacheEntry(l2_victim_addr),
448 TBEs[l2_victim_addr]);
449 }
450 }
451
452 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
453 // L1 does't have the line, but we have space for it in the L1
454
455 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
456 if (is_valid(L2cache_entry)) {
457 // L2 has it (maybe not with the right permissions)
458 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
459 L2cache_entry, tbe);
460 } else {
461 // We have room, the L2 doesn't have it, so the L1 fetches the line
462 trigger(mandatory_request_type_to_event(in_msg.Type),
463 in_msg.LineAddress, L1Icache_entry, tbe);
464 }
465 } else {
466 // No room in the L1, so we need to make room
467 Address l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
468 if (L2cache.cacheAvail(l1i_victim_addr)) {
469 // The L2 has room, so we move the line from the L1 to the L2
470 trigger(Event:L1_to_L2,
471 l1i_victim_addr,
472 getL1ICacheEntry(l1i_victim_addr),
473 TBEs[l1i_victim_addr]);
474 } else {
475 Address l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
476 // The L2 does not have room, so we replace a line from the L2
477 trigger(Event:L2_Replacement,
478 l2_victim_addr,
479 getL2CacheEntry(l2_victim_addr),
480 TBEs[l2_victim_addr]);
481 }
482 }
483 }
484 } else {
485 // *** DATA ACCESS ***
486
487 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
488 if (is_valid(L1Dcache_entry)) {
489 // The tag matches for the L1, so the L1 fetches the line.
490 // We know it can't be in the L2 due to exclusion
491 trigger(mandatory_request_type_to_event(in_msg.Type),
492 in_msg.LineAddress, L1Dcache_entry, tbe);
493 } else {
494
495 // Check to see if it is in the OTHER L1
496 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
497 if (is_valid(L1Icache_entry)) {
498 // The block is in the wrong L1, try to write it to the L2
499 if (L2cache.cacheAvail(in_msg.LineAddress)) {
500 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
501 } else {
502 Address l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
503 trigger(Event:L2_Replacement,
504 l2_victim_addr,
505 getL2CacheEntry(l2_victim_addr),
506 TBEs[l2_victim_addr]);
507 }
508 }
509
510 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
511 // L1 does't have the line, but we have space for it in the L1
512 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
513 if (is_valid(L2cache_entry)) {
514 // L2 has it (maybe not with the right permissions)
515 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
516 L2cache_entry, tbe);
517 } else {
518 // We have room, the L2 doesn't have it, so the L1 fetches the line
519 trigger(mandatory_request_type_to_event(in_msg.Type),
520 in_msg.LineAddress, L1Dcache_entry, tbe);
521 }
522 } else {
523 // No room in the L1, so we need to make room
524 Address l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
525 if (L2cache.cacheAvail(l1d_victim_addr)) {
526 // The L2 has room, so we move the line from the L1 to the L2
527 trigger(Event:L1_to_L2,
528 l1d_victim_addr,
529 getL1DCacheEntry(l1d_victim_addr),
530 TBEs[l1d_victim_addr]);
531 } else {
532 Address l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
533 // The L2 does not have room, so we replace a line from the L2
534 trigger(Event:L2_Replacement,
535 l2_victim_addr,
536 getL2CacheEntry(l2_victim_addr),
537 TBEs[l2_victim_addr]);
538 }
539 }
540 }
541 }
542 }
543 }
544 }
545
546 // ACTIONS
547
548 action(a_issueGETS, "a", desc="Issue GETS") {
549 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
550 assert(is_valid(tbe));
551 out_msg.Addr := address;
552 out_msg.Type := CoherenceRequestType:GETS;
553 out_msg.Requestor := machineID;
554 out_msg.Destination.add(map_Address_to_Directory(address));
555 out_msg.MessageSize := MessageSizeType:Request_Control;
556 out_msg.InitialRequestTime := curCycle();
557
558 // One from each other cache (n-1) plus the memory (+1)
559 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
560 }
561 }
562
563 action(b_issueGETX, "b", desc="Issue GETX") {
564 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
565 assert(is_valid(tbe));
566 out_msg.Addr := address;
567 out_msg.Type := CoherenceRequestType:GETX;
568 out_msg.Requestor := machineID;
569 out_msg.Destination.add(map_Address_to_Directory(address));
570 out_msg.MessageSize := MessageSizeType:Request_Control;
571 out_msg.InitialRequestTime := curCycle();
572
573 // One from each other cache (n-1) plus the memory (+1)
574 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
575 }
576 }
577
578 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
579 if (machineCount(MachineType:L1Cache) > 1) {
580 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
581 assert(is_valid(tbe));
582 out_msg.Addr := address;
583 out_msg.Type := CoherenceRequestType:GETX;
584 out_msg.Requestor := machineID;
585 out_msg.Destination.add(map_Address_to_Directory(address));
586 out_msg.MessageSize := MessageSizeType:Request_Control;
587 out_msg.InitialRequestTime := curCycle();
588 }
589 }
590
591 // One from each other cache (n-1) plus the memory (+1)
592 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
593 }
594
595 action(bf_issueGETF, "bf", desc="Issue GETF") {
596 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
597 assert(is_valid(tbe));
598 out_msg.Addr := address;
599 out_msg.Type := CoherenceRequestType:GETF;
600 out_msg.Requestor := machineID;
601 out_msg.Destination.add(map_Address_to_Directory(address));
602 out_msg.MessageSize := MessageSizeType:Request_Control;
603 out_msg.InitialRequestTime := curCycle();
604
605 // One from each other cache (n-1) plus the memory (+1)
606 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
607 }
608 }
609
610 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
611 peek(forwardToCache_in, RequestMsg) {
612 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
613 assert(is_valid(cache_entry));
614 out_msg.Addr := address;
615 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
616 out_msg.Sender := machineID;
617 out_msg.Destination.add(in_msg.Requestor);
618 out_msg.DataBlk := cache_entry.DataBlk;
619 out_msg.Dirty := cache_entry.Dirty;
620 if (in_msg.DirectedProbe) {
621 out_msg.Acks := machineCount(MachineType:L1Cache);
622 } else {
623 out_msg.Acks := 2;
624 }
625 out_msg.SilentAcks := in_msg.SilentAcks;
626 out_msg.MessageSize := MessageSizeType:Response_Data;
627 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
628 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
629 }
630 }
631 }
632
633 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
634 peek(forwardToCache_in, RequestMsg) {
635 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
636 assert(is_valid(tbe));
637 out_msg.Addr := address;
638 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
639 out_msg.Sender := machineID;
640 out_msg.Destination.add(in_msg.Requestor);
641 out_msg.DataBlk := tbe.DataBlk;
642 out_msg.Dirty := tbe.Dirty;
643 if (in_msg.DirectedProbe) {
644 out_msg.Acks := machineCount(MachineType:L1Cache);
645 } else {
646 out_msg.Acks := 2;
647 }
648 out_msg.SilentAcks := in_msg.SilentAcks;
649 out_msg.MessageSize := MessageSizeType:Response_Data;
650 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
651 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
652 }
653 }
654 }
655
656 action(d_issuePUT, "d", desc="Issue PUT") {
657 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
658 out_msg.Addr := address;
659 out_msg.Type := CoherenceRequestType:PUT;
660 out_msg.Requestor := machineID;
661 out_msg.Destination.add(map_Address_to_Directory(address));
662 out_msg.MessageSize := MessageSizeType:Writeback_Control;
663 }
664 }
665
666 action(df_issuePUTF, "df", desc="Issue PUTF") {
667 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
668 out_msg.Addr := address;
669 out_msg.Type := CoherenceRequestType:PUTF;
670 out_msg.Requestor := machineID;
671 out_msg.Destination.add(map_Address_to_Directory(address));
672 out_msg.MessageSize := MessageSizeType:Writeback_Control;
673 }
674 }
675
676 action(e_sendData, "e", desc="Send data from cache to requestor") {
677 peek(forwardToCache_in, RequestMsg) {
678 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
679 assert(is_valid(cache_entry));
680 out_msg.Addr := address;
681 out_msg.Type := CoherenceResponseType:DATA;
682 out_msg.Sender := machineID;
683 out_msg.Destination.add(in_msg.Requestor);
684 out_msg.DataBlk := cache_entry.DataBlk;
685 out_msg.Dirty := cache_entry.Dirty;
686 if (in_msg.DirectedProbe) {
687 out_msg.Acks := machineCount(MachineType:L1Cache);
688 } else {
689 out_msg.Acks := 2;
690 }
691 out_msg.SilentAcks := in_msg.SilentAcks;
692 out_msg.MessageSize := MessageSizeType:Response_Data;
693 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
694 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
695 }
696 }
697 }
698
699 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
700 peek(forwardToCache_in, RequestMsg) {
701 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
702 assert(is_valid(cache_entry));
703 out_msg.Addr := address;
704 out_msg.Type := CoherenceResponseType:DATA_SHARED;
705 out_msg.Sender := machineID;
706 out_msg.Destination.add(in_msg.Requestor);
707 out_msg.DataBlk := cache_entry.DataBlk;
708 out_msg.Dirty := cache_entry.Dirty;
709 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
710 if (in_msg.DirectedProbe) {
711 out_msg.Acks := machineCount(MachineType:L1Cache);
712 } else {
713 out_msg.Acks := 2;
714 }
715 out_msg.SilentAcks := in_msg.SilentAcks;
716 out_msg.MessageSize := MessageSizeType:Response_Data;
717 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
718 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
719 }
720 }
721 }
722
723 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
724 peek(forwardToCache_in, RequestMsg) {
725 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
726 assert(is_valid(tbe));
727 out_msg.Addr := address;
728 out_msg.Type := CoherenceResponseType:DATA_SHARED;
729 out_msg.Sender := machineID;
730 out_msg.Destination.add(in_msg.Requestor);
731 out_msg.DataBlk := tbe.DataBlk;
732 out_msg.Dirty := tbe.Dirty;
733 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
734 if (in_msg.DirectedProbe) {
735 out_msg.Acks := machineCount(MachineType:L1Cache);
736 } else {
737 out_msg.Acks := 2;
738 }
739 out_msg.SilentAcks := in_msg.SilentAcks;
740 out_msg.MessageSize := MessageSizeType:Response_Data;
741 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
742 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
743 }
744 }
745 }
746
747 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
748 peek(forwardToCache_in, RequestMsg) {
749 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
750 assert(is_valid(cache_entry));
751 out_msg.Addr := address;
752 out_msg.Type := CoherenceResponseType:DATA_SHARED;
753 out_msg.Sender := machineID;
754 out_msg.Destination := in_msg.MergedRequestors;
755 out_msg.DataBlk := cache_entry.DataBlk;
756 out_msg.Dirty := cache_entry.Dirty;
757 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
758 out_msg.Acks := machineCount(MachineType:L1Cache);
759 out_msg.SilentAcks := in_msg.SilentAcks;
760 out_msg.MessageSize := MessageSizeType:Response_Data;
761 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
762 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
763 }
764 }
765 }
766
767 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
768 peek(forwardToCache_in, RequestMsg) {
769 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
770 assert(is_valid(tbe));
771 out_msg.Addr := address;
772 out_msg.Type := CoherenceResponseType:DATA_SHARED;
773 out_msg.Sender := machineID;
774 out_msg.Destination := in_msg.MergedRequestors;
775 out_msg.DataBlk := tbe.DataBlk;
776 out_msg.Dirty := tbe.Dirty;
777 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
778 out_msg.Acks := machineCount(MachineType:L1Cache);
779 out_msg.SilentAcks := in_msg.SilentAcks;
780 out_msg.MessageSize := MessageSizeType:Response_Data;
781 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
782 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
783 }
784 }
785 }
786
787 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
788 peek(forwardToCache_in, RequestMsg) {
789 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
790 out_msg.Addr := address;
791 out_msg.Type := CoherenceResponseType:ACK;
792 out_msg.Sender := machineID;
793 out_msg.Destination.add(in_msg.Requestor);
794 out_msg.Acks := 1;
795 out_msg.SilentAcks := in_msg.SilentAcks;
796 assert(in_msg.DirectedProbe == false);
797 out_msg.MessageSize := MessageSizeType:Response_Control;
798 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
799 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
800 }
801 }
802 }
803
804 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
805 peek(forwardToCache_in, RequestMsg) {
806 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
807 out_msg.Addr := address;
808 out_msg.Type := CoherenceResponseType:ACK_SHARED;
809 out_msg.Sender := machineID;
810 out_msg.Destination.add(in_msg.Requestor);
811 out_msg.Acks := 1;
812 out_msg.SilentAcks := in_msg.SilentAcks;
813 assert(in_msg.DirectedProbe == false);
814 out_msg.MessageSize := MessageSizeType:Response_Control;
815 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
816 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
817 }
818 }
819 }
820
821 action(g_sendUnblock, "g", desc="Send unblock to memory") {
822 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
823 out_msg.Addr := address;
824 out_msg.Type := CoherenceResponseType:UNBLOCK;
825 out_msg.Sender := machineID;
826 out_msg.Destination.add(map_Address_to_Directory(address));
827 out_msg.MessageSize := MessageSizeType:Unblock_Control;
828 }
829 }
830
831 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
832 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
833 out_msg.Addr := address;
834 out_msg.Type := CoherenceResponseType:UNBLOCKM;
835 out_msg.Sender := machineID;
836 out_msg.Destination.add(map_Address_to_Directory(address));
837 out_msg.MessageSize := MessageSizeType:Unblock_Control;
838 }
839 }
840
841 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
842 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
843 assert(is_valid(tbe));
844 out_msg.Addr := address;
845 out_msg.Type := CoherenceResponseType:UNBLOCKS;
846 out_msg.Sender := machineID;
847 out_msg.CurOwner := tbe.CurOwner;
848 out_msg.Destination.add(map_Address_to_Directory(address));
849 out_msg.MessageSize := MessageSizeType:Unblock_Control;
850 }
851 }
852
853 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
854 assert(is_valid(cache_entry));
855 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
856 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
857 cache_entry.DataBlk);
858 }
859
860 action(hx_external_load_hit, "hx", desc="load required external msgs") {
861 assert(is_valid(cache_entry));
862 assert(is_valid(tbe));
863 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
864 peek(responseToCache_in, ResponseMsg) {
865
866 sequencer.readCallback(address,
867 getNondirectHitMachType(in_msg.Addr, in_msg.Sender),
868 cache_entry.DataBlk,
869 tbe.InitialRequestTime,
870 tbe.ForwardRequestTime,
871 tbe.FirstResponseTime);
872 }
873 }
874
875 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
876 assert(is_valid(cache_entry));
877 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
878 peek(mandatoryQueue_in, RubyRequest) {
879 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
880 cache_entry.DataBlk);
881
882 cache_entry.Dirty := true;
883 if (in_msg.Type == RubyRequestType:ATOMIC) {
884 cache_entry.AtomicAccessed := true;
885 }
886 }
887 }
888
889 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
890 assert(is_valid(tbe));
891 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
892 sequencer.writeCallback(address, GenericMachineType:L1Cache,tbe.DataBlk);
893 }
894
895 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
896 assert(is_valid(cache_entry));
897 assert(is_valid(tbe));
898 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
899 peek(responseToCache_in, ResponseMsg) {
900
901 sequencer.writeCallback(address,
902 getNondirectHitMachType(address, in_msg.Sender),
903 cache_entry.DataBlk,
904 tbe.InitialRequestTime,
905 tbe.ForwardRequestTime,
906 tbe.FirstResponseTime);
907 }
908 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
909 cache_entry.Dirty := true;
910 }
911
912 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
913 assert(is_valid(cache_entry));
914 assert(is_valid(tbe));
915 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
916
917 sequencer.writeCallback(address,
918 getNondirectHitMachType(address, tbe.LastResponder),
919 cache_entry.DataBlk,
920 tbe.InitialRequestTime,
921 tbe.ForwardRequestTime,
922 tbe.FirstResponseTime);
923
924 cache_entry.Dirty := true;
925 }
926
927 action(i_allocateTBE, "i", desc="Allocate TBE") {
928 check_allocate(TBEs);
929 assert(is_valid(cache_entry));
930 TBEs.allocate(address);
931 set_tbe(TBEs[address]);
932 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
933 tbe.Dirty := cache_entry.Dirty;
934 tbe.Sharers := false;
935 }
936
937 action(it_allocateTBE, "it", desc="Allocate TBE") {
938 check_allocate(TBEs);
939 TBEs.allocate(address);
940 set_tbe(TBEs[address]);
941 tbe.Dirty := false;
942 tbe.Sharers := false;
943 }
944
945 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
946 triggerQueue_in.dequeue();
947 }
948
949 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
950 mandatoryQueue_in.dequeue();
951 }
952
953 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
954 forwardToCache_in.dequeue();
955 }
956
957 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
958 assert(is_valid(cache_entry));
959 assert(is_valid(tbe));
960 cache_entry.Dirty := tbe.Dirty;
961 cache_entry.DataBlk := tbe.DataBlk;
962 }
963
964 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
965 assert(is_valid(cache_entry));
966 assert(is_valid(tbe));
967 cache_entry.Dirty := tbe.Dirty;
968 cache_entry.DataBlk := tbe.DataBlk;
969 cache_entry.FromL2 := true;
970 }
971
972 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
973 peek(responseToCache_in, ResponseMsg) {
974 assert(in_msg.Acks >= 0);
975 assert(is_valid(tbe));
976 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
977 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
978 if (tbe.AppliedSilentAcks == false) {
979 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
980 tbe.AppliedSilentAcks := true;
981 }
982 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
983 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
984 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
985 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
986 APPEND_TRANSITION_COMMENT(in_msg.Sender);
987 tbe.LastResponder := in_msg.Sender;
988 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
989 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
990 }
991 if (in_msg.InitialRequestTime != zero_time()) {
992 tbe.InitialRequestTime := in_msg.InitialRequestTime;
993 }
994 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
995 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
996 }
997 if (in_msg.ForwardRequestTime != zero_time()) {
998 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
999 }
1000 if (tbe.FirstResponseTime == zero_time()) {
1001 tbe.FirstResponseTime := curCycle();
1002 }
1003 }
1004 }
1005 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
1006 peek(responseToCache_in, ResponseMsg) {
1007 assert(is_valid(tbe));
1008 tbe.CurOwner := in_msg.Sender;
1009 }
1010 }
1011
1012 action(n_popResponseQueue, "n", desc="Pop response queue") {
1013 responseToCache_in.dequeue();
1014 }
1015
1016 action(ll_L2toL1Transfer, "ll", desc="") {
1017 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
1018 out_msg.Addr := address;
1019 out_msg.Type := TriggerType:L2_to_L1;
1020 }
1021 }
1022
1023 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1024 assert(is_valid(tbe));
1025 if (tbe.NumPendingMsgs == 0) {
1026 enqueue(triggerQueue_out, TriggerMsg) {
1027 out_msg.Addr := address;
1028 if (tbe.Sharers) {
1029 out_msg.Type := TriggerType:ALL_ACKS;
1030 } else {
1031 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1032 }
1033 }
1034 }
1035 }
1036
1037 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1038 assert(is_valid(tbe));
1039 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1040 }
1041
1042 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1043 assert(is_valid(tbe));
1044 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1045 }
1046
1047 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1048 peek(forwardToCache_in, RequestMsg) {
1049 assert(in_msg.Requestor != machineID);
1050 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1051 assert(is_valid(tbe));
1052 out_msg.Addr := address;
1053 out_msg.Type := CoherenceResponseType:DATA;
1054 out_msg.Sender := machineID;
1055 out_msg.Destination.add(in_msg.Requestor);
1056 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1057 out_msg.DataBlk := tbe.DataBlk;
1058 out_msg.Dirty := tbe.Dirty;
1059 if (in_msg.DirectedProbe) {
1060 out_msg.Acks := machineCount(MachineType:L1Cache);
1061 } else {
1062 out_msg.Acks := 2;
1063 }
1064 out_msg.SilentAcks := in_msg.SilentAcks;
1065 out_msg.MessageSize := MessageSizeType:Response_Data;
1066 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1067 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1068 }
1069 }
1070 }
1071
1072 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1073 peek(forwardToCache_in, RequestMsg) {
1074 assert(in_msg.Requestor != machineID);
1075 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1076 assert(is_valid(tbe));
1077 out_msg.Addr := address;
1078 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1079 out_msg.Sender := machineID;
1080 out_msg.Destination.add(in_msg.Requestor);
1081 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1082 out_msg.DataBlk := tbe.DataBlk;
1083 out_msg.Dirty := tbe.Dirty;
1084 if (in_msg.DirectedProbe) {
1085 out_msg.Acks := machineCount(MachineType:L1Cache);
1086 } else {
1087 out_msg.Acks := 2;
1088 }
1089 out_msg.SilentAcks := in_msg.SilentAcks;
1090 out_msg.MessageSize := MessageSizeType:Response_Data;
1091 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1092 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1093 }
1094 }
1095 }
1096
1097 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1098 peek(forwardToCache_in, RequestMsg) {
1099 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1100 assert(is_valid(tbe));
1101 out_msg.Addr := address;
1102 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1103 out_msg.Sender := machineID;
1104 out_msg.Destination := in_msg.MergedRequestors;
1105 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1106 out_msg.DataBlk := tbe.DataBlk;
1107 out_msg.Dirty := tbe.Dirty;
1108 out_msg.Acks := machineCount(MachineType:L1Cache);
1109 out_msg.SilentAcks := in_msg.SilentAcks;
1110 out_msg.MessageSize := MessageSizeType:Response_Data;
1111 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1112 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1113 }
1114 }
1115 }
1116
1117 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1118 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1119 assert(is_valid(tbe));
1120 out_msg.Addr := address;
1121 out_msg.Sender := machineID;
1122 out_msg.Destination.add(map_Address_to_Directory(address));
1123 out_msg.Dirty := tbe.Dirty;
1124 if (tbe.Dirty) {
1125 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1126 out_msg.DataBlk := tbe.DataBlk;
1127 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1128 } else {
1129 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1130 // NOTE: in a real system this would not send data. We send
1131 // data here only so we can check it at the memory
1132 out_msg.DataBlk := tbe.DataBlk;
1133 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1134 }
1135 }
1136 }
1137
1138 action(r_setSharerBit, "r", desc="We saw other sharers") {
1139 assert(is_valid(tbe));
1140 tbe.Sharers := true;
1141 }
1142
1143 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1144 TBEs.deallocate(address);
1145 unset_tbe();
1146 }
1147
1148 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1149 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1150 assert(is_valid(tbe));
1151 out_msg.Addr := address;
1152 out_msg.Sender := machineID;
1153 out_msg.Destination.add(map_Address_to_Directory(address));
1154 out_msg.DataBlk := tbe.DataBlk;
1155 out_msg.Dirty := tbe.Dirty;
1156 if (tbe.Dirty) {
1157 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1158 out_msg.DataBlk := tbe.DataBlk;
1159 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1160 } else {
1161 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1162 // NOTE: in a real system this would not send data. We send
1163 // data here only so we can check it at the memory
1164 out_msg.DataBlk := tbe.DataBlk;
1165 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1166 }
1167 }
1168 }
1169
1170 action(u_writeDataToCache, "u", desc="Write data to cache") {
1171 peek(responseToCache_in, ResponseMsg) {
1172 assert(is_valid(cache_entry));
1173 cache_entry.DataBlk := in_msg.DataBlk;
1174 cache_entry.Dirty := in_msg.Dirty;
1175 }
1176 }
1177
1178 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1179 peek(responseToCache_in, ResponseMsg) {
1180 assert(is_valid(tbe));
1181 tbe.DataBlk := in_msg.DataBlk;
1182 tbe.Dirty := in_msg.Dirty;
1183 }
1184 }
1185
1186 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1187 peek(responseToCache_in, ResponseMsg) {
1188 assert(is_valid(cache_entry));
1189 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1190 cache_entry.DataBlk, in_msg.DataBlk);
1191 assert(cache_entry.DataBlk == in_msg.DataBlk);
1192 cache_entry.DataBlk := in_msg.DataBlk;
1193 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1194 }
1195 }
1196
1197 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1198 peek(responseToCache_in, ResponseMsg) {
1199 assert(is_valid(tbe));
1200 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1201 tbe.DataBlk, in_msg.DataBlk);
1202 assert(tbe.DataBlk == in_msg.DataBlk);
1203 tbe.DataBlk := in_msg.DataBlk;
1204 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1205 }
1206 }
1207
1208 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1209 if (L1Dcache.isTagPresent(address)) {
1210 L1Dcache.deallocate(address);
1211 } else {
1212 L1Icache.deallocate(address);
1213 }
1214 unset_cache_entry();
1215 }
1216
1217 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1218 if (is_invalid(cache_entry)) {
1219 set_cache_entry(L1Dcache.allocate(address, new Entry));
1220 }
1221 }
1222
1223 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1224 if (is_invalid(cache_entry)) {
1225 set_cache_entry(L1Icache.allocate(address, new Entry));
1226 }
1227 }
1228
1229 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1230 set_cache_entry(L2cache.allocate(address, new Entry));
1231 }
1232
1233 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1234 L2cache.deallocate(address);
1235 unset_cache_entry();
1236 }
1237
1238 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1239 if (send_evictions) {
1240 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
1241 sequencer.evictionCallback(address);
1242 }
1243 }
1244
1245 action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
1246 ++L1Dcache.demand_misses;
1247 }
1248
1249 action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
1250 ++L1Dcache.demand_hits;
1251 }
1252
1253 action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
1254 ++L1Icache.demand_misses;
1255 }
1256
1257 action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
1258 ++L1Icache.demand_hits;
1259 }
1260
1261 action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
1262 ++L2cache.demand_misses;
1263 }
1264
1265 action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
1266 ++L2cache.demand_hits;
1267 }
1268
1269 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1270 stall_and_wait(mandatoryQueue_in, address);
1271 }
1272
1273 action(z_stall, "z", desc="stall") {
1274 // do nothing and the special z_stall action will return a protocol stall
1275 // so that the next port is checked
1276 }
1277
1278 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1279 wakeUpBuffers(address);
1280 }
1281
1282 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1283 wakeUpAllBuffers();
1284 }
1285
1286 //*****************************************************
1287 // TRANSITIONS
1288 //*****************************************************
1289
1290 // Transitions for Load/Store/L2_Replacement from transient states
1291 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1292 zz_stallAndWaitMandatoryQueue;
1293 }
1294
1295 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1296 zz_stallAndWaitMandatoryQueue;
1297 }
1298
1299 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1300 zz_stallAndWaitMandatoryQueue;
1301 }
1302
1303 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1304 zz_stallAndWaitMandatoryQueue;
1305 }
1306
1307 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1308 zz_stallAndWaitMandatoryQueue;
1309 }
1310
1311 transition({MI_F, MM_F}, {Store}) {
1312 zz_stallAndWaitMandatoryQueue;
1313 }
1314
1315 transition({MM_F, MI_F}, {Flush_line}) {
1316 zz_stallAndWaitMandatoryQueue;
1317 }
1318
1319 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1320 z_stall;
1321 }
1322
1323 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1324 z_stall;
1325 }
1326
1327 // Transitions moving data between the L1 and L2 caches
1328 transition({I, S, O, M, MM}, L1_to_L2) {
1329 i_allocateTBE;
1330 gg_deallocateL1CacheBlock;
1331 vv_allocateL2CacheBlock;
1332 hp_copyFromTBEToL2;
1333 s_deallocateTBE;
1334 }
1335
1336 transition(I, Trigger_L2_to_L1D, IT) {
1337 i_allocateTBE;
1338 rr_deallocateL2CacheBlock;
1339 ii_allocateL1DCacheBlock;
1340 nb_copyFromTBEToL1; // Not really needed for state I
1341 s_deallocateTBE;
1342 zz_stallAndWaitMandatoryQueue;
1343 ll_L2toL1Transfer;
1344 }
1345
1346 transition(S, Trigger_L2_to_L1D, ST) {
1347 i_allocateTBE;
1348 rr_deallocateL2CacheBlock;
1349 ii_allocateL1DCacheBlock;
1350 nb_copyFromTBEToL1;
1351 s_deallocateTBE;
1352 zz_stallAndWaitMandatoryQueue;
1353 ll_L2toL1Transfer;
1354 }
1355
1356 transition(O, Trigger_L2_to_L1D, OT) {
1357 i_allocateTBE;
1358 rr_deallocateL2CacheBlock;
1359 ii_allocateL1DCacheBlock;
1360 nb_copyFromTBEToL1;
1361 s_deallocateTBE;
1362 zz_stallAndWaitMandatoryQueue;
1363 ll_L2toL1Transfer;
1364 }
1365
1366 transition(M, Trigger_L2_to_L1D, MT) {
1367 i_allocateTBE;
1368 rr_deallocateL2CacheBlock;
1369 ii_allocateL1DCacheBlock;
1370 nb_copyFromTBEToL1;
1371 s_deallocateTBE;
1372 zz_stallAndWaitMandatoryQueue;
1373 ll_L2toL1Transfer;
1374 }
1375
1376 transition(MM, Trigger_L2_to_L1D, MMT) {
1377 i_allocateTBE;
1378 rr_deallocateL2CacheBlock;
1379 ii_allocateL1DCacheBlock;
1380 nb_copyFromTBEToL1;
1381 s_deallocateTBE;
1382 zz_stallAndWaitMandatoryQueue;
1383 ll_L2toL1Transfer;
1384 }
1385
1386 transition(I, Trigger_L2_to_L1I, IT) {
1387 i_allocateTBE;
1388 rr_deallocateL2CacheBlock;
1389 jj_allocateL1ICacheBlock;
1390 nb_copyFromTBEToL1;
1391 s_deallocateTBE;
1392 zz_stallAndWaitMandatoryQueue;
1393 ll_L2toL1Transfer;
1394 }
1395
1396 transition(S, Trigger_L2_to_L1I, ST) {
1397 i_allocateTBE;
1398 rr_deallocateL2CacheBlock;
1399 jj_allocateL1ICacheBlock;
1400 nb_copyFromTBEToL1;
1401 s_deallocateTBE;
1402 zz_stallAndWaitMandatoryQueue;
1403 ll_L2toL1Transfer;
1404 }
1405
1406 transition(O, Trigger_L2_to_L1I, OT) {
1407 i_allocateTBE;
1408 rr_deallocateL2CacheBlock;
1409 jj_allocateL1ICacheBlock;
1410 nb_copyFromTBEToL1;
1411 s_deallocateTBE;
1412 zz_stallAndWaitMandatoryQueue;
1413 ll_L2toL1Transfer;
1414 }
1415
1416 transition(M, Trigger_L2_to_L1I, MT) {
1417 i_allocateTBE;
1418 rr_deallocateL2CacheBlock;
1419 jj_allocateL1ICacheBlock;
1420 nb_copyFromTBEToL1;
1421 s_deallocateTBE;
1422 zz_stallAndWaitMandatoryQueue;
1423 ll_L2toL1Transfer;
1424 }
1425
1426 transition(MM, Trigger_L2_to_L1I, MMT) {
1427 i_allocateTBE;
1428 rr_deallocateL2CacheBlock;
1429 jj_allocateL1ICacheBlock;
1430 nb_copyFromTBEToL1;
1431 s_deallocateTBE;
1432 zz_stallAndWaitMandatoryQueue;
1433 ll_L2toL1Transfer;
1434 }
1435
1436 transition(IT, Complete_L2_to_L1, IR) {
1437 j_popTriggerQueue;
1438 kd_wakeUpDependents;
1439 }
1440
1441 transition(ST, Complete_L2_to_L1, SR) {
1442 j_popTriggerQueue;
1443 kd_wakeUpDependents;
1444 }
1445
1446 transition(OT, Complete_L2_to_L1, OR) {
1447 j_popTriggerQueue;
1448 kd_wakeUpDependents;
1449 }
1450
1451 transition(MT, Complete_L2_to_L1, MR) {
1452 j_popTriggerQueue;
1453 kd_wakeUpDependents;
1454 }
1455
1456 transition(MMT, Complete_L2_to_L1, MMR) {
1457 j_popTriggerQueue;
1458 kd_wakeUpDependents;
1459 }
1460
1461 // Transitions from Idle
1462 transition({I,IR}, Load, IS) {
1463 ii_allocateL1DCacheBlock;
1464 i_allocateTBE;
1465 a_issueGETS;
1466 uu_profileL1DataMiss;
1467 uu_profileL2Miss;
1468 k_popMandatoryQueue;
1469 }
1470
1471 transition({I,IR}, Ifetch, IS) {
1472 jj_allocateL1ICacheBlock;
1473 i_allocateTBE;
1474 a_issueGETS;
1475 uu_profileL1InstMiss;
1476 uu_profileL2Miss;
1477 k_popMandatoryQueue;
1478 }
1479
1480 transition({I,IR}, Store, IM) {
1481 ii_allocateL1DCacheBlock;
1482 i_allocateTBE;
1483 b_issueGETX;
1484 uu_profileL1DataMiss;
1485 uu_profileL2Miss;
1486 k_popMandatoryQueue;
1487 }
1488
1489 transition({I, IR}, Flush_line, IM_F) {
1490 it_allocateTBE;
1491 bf_issueGETF;
1492 k_popMandatoryQueue;
1493 }
1494
1495 transition(I, L2_Replacement) {
1496 rr_deallocateL2CacheBlock;
1497 ka_wakeUpAllDependents;
1498 }
1499
1500 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1501 f_sendAck;
1502 l_popForwardQueue;
1503 }
1504
1505 // Transitions from Shared
1506 transition({S, SM, ISM}, Load) {
1507 h_load_hit;
1508 uu_profileL1DataHit;
1509 k_popMandatoryQueue;
1510 }
1511
1512 transition({S, SM, ISM}, Ifetch) {
1513 h_load_hit;
1514 uu_profileL1InstHit;
1515 k_popMandatoryQueue;
1516 }
1517
1518 transition(SR, Load, S) {
1519 h_load_hit;
1520 uu_profileL1DataMiss;
1521 uu_profileL2Hit;
1522 k_popMandatoryQueue;
1523 ka_wakeUpAllDependents;
1524 }
1525
1526 transition(SR, Ifetch, S) {
1527 h_load_hit;
1528 uu_profileL1InstMiss;
1529 uu_profileL2Hit;
1530 k_popMandatoryQueue;
1531 ka_wakeUpAllDependents;
1532 }
1533
1534 transition({S,SR}, Store, SM) {
1535 i_allocateTBE;
1536 b_issueGETX;
1537 uu_profileL1DataMiss;
1538 uu_profileL2Miss;
1539 k_popMandatoryQueue;
1540 }
1541
1542 transition({S, SR}, Flush_line, SM_F) {
1543 i_allocateTBE;
1544 bf_issueGETF;
1545 forward_eviction_to_cpu;
1546 gg_deallocateL1CacheBlock;
1547 k_popMandatoryQueue;
1548 }
1549
1550 transition(S, L2_Replacement, I) {
1551 forward_eviction_to_cpu;
1552 rr_deallocateL2CacheBlock;
1553 ka_wakeUpAllDependents;
1554 }
1555
1556 transition(S, {Other_GETX, Invalidate}, I) {
1557 f_sendAck;
1558 forward_eviction_to_cpu;
1559 l_popForwardQueue;
1560 }
1561
1562 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1563 ff_sendAckShared;
1564 l_popForwardQueue;
1565 }
1566
1567 // Transitions from Owned
1568 transition({O, OM, SS, MM_W, M_W}, {Load}) {
1569 h_load_hit;
1570 uu_profileL1DataHit;
1571 k_popMandatoryQueue;
1572 }
1573
1574 transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
1575 h_load_hit;
1576 uu_profileL1InstHit;
1577 k_popMandatoryQueue;
1578 }
1579
1580 transition(OR, Load, O) {
1581 h_load_hit;
1582 uu_profileL1DataMiss;
1583 uu_profileL2Hit;
1584 k_popMandatoryQueue;
1585 ka_wakeUpAllDependents;
1586 }
1587
1588 transition(OR, Ifetch, O) {
1589 h_load_hit;
1590 uu_profileL1InstMiss;
1591 uu_profileL2Hit;
1592 k_popMandatoryQueue;
1593 ka_wakeUpAllDependents;
1594 }
1595
1596 transition({O,OR}, Store, OM) {
1597 i_allocateTBE;
1598 b_issueGETX;
1599 p_decrementNumberOfMessagesByOne;
1600 uu_profileL1DataMiss;
1601 uu_profileL2Miss;
1602 k_popMandatoryQueue;
1603 }
1604
1605 transition({O, OR}, Flush_line, OM_F) {
1606 i_allocateTBE;
1607 bf_issueGETF;
1608 p_decrementNumberOfMessagesByOne;
1609 forward_eviction_to_cpu;
1610 gg_deallocateL1CacheBlock;
1611 k_popMandatoryQueue;
1612 }
1613
1614 transition(O, L2_Replacement, OI) {
1615 i_allocateTBE;
1616 d_issuePUT;
1617 forward_eviction_to_cpu;
1618 rr_deallocateL2CacheBlock;
1619 ka_wakeUpAllDependents;
1620 }
1621
1622 transition(O, {Other_GETX, Invalidate}, I) {
1623 e_sendData;
1624 forward_eviction_to_cpu;
1625 l_popForwardQueue;
1626 }
1627
1628 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1629 ee_sendDataShared;
1630 l_popForwardQueue;
1631 }
1632
1633 transition(O, Merged_GETS) {
1634 em_sendDataSharedMultiple;
1635 l_popForwardQueue;
1636 }
1637
1638 // Transitions from Modified
1639 transition({MM, M}, {Ifetch}) {
1640 h_load_hit;
1641 uu_profileL1InstHit;
1642 k_popMandatoryQueue;
1643 }
1644
1645 transition({MM, M}, {Load}) {
1646 h_load_hit;
1647 uu_profileL1DataHit;
1648 k_popMandatoryQueue;
1649 }
1650
1651 transition(MM, Store) {
1652 hh_store_hit;
1653 uu_profileL1DataHit;
1654 k_popMandatoryQueue;
1655 }
1656
1657 transition(MMR, Load, MM) {
1658 h_load_hit;
1659 uu_profileL1DataMiss;
1660 uu_profileL2Hit;
1661 k_popMandatoryQueue;
1662 ka_wakeUpAllDependents;
1663 }
1664
1665 transition(MMR, Ifetch, MM) {
1666 h_load_hit;
1667 uu_profileL1InstMiss;
1668 uu_profileL2Hit;
1669 k_popMandatoryQueue;
1670 ka_wakeUpAllDependents;
1671 }
1672
1673 transition(MMR, Store, MM) {
1674 hh_store_hit;
1675 uu_profileL1DataMiss;
1676 uu_profileL2Hit;
1677 k_popMandatoryQueue;
1678 ka_wakeUpAllDependents;
1679 }
1680
1681 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1682 i_allocateTBE;
1683 bf_issueGETF;
1684 p_decrementNumberOfMessagesByOne;
1685 forward_eviction_to_cpu;
1686 gg_deallocateL1CacheBlock;
1687 k_popMandatoryQueue;
1688 }
1689
1690 transition(MM_F, Block_Ack, MI_F) {
1691 df_issuePUTF;
1692 l_popForwardQueue;
1693 kd_wakeUpDependents;
1694 }
1695
1696 transition(MM, L2_Replacement, MI) {
1697 i_allocateTBE;
1698 d_issuePUT;
1699 forward_eviction_to_cpu;
1700 rr_deallocateL2CacheBlock;
1701 ka_wakeUpAllDependents;
1702 }
1703
1704 transition(MM, {Other_GETX, Invalidate}, I) {
1705 c_sendExclusiveData;
1706 forward_eviction_to_cpu;
1707 l_popForwardQueue;
1708 }
1709
1710 transition(MM, Other_GETS, I) {
1711 c_sendExclusiveData;
1712 forward_eviction_to_cpu;
1713 l_popForwardQueue;
1714 }
1715
1716 transition(MM, NC_DMA_GETS, O) {
1717 ee_sendDataShared;
1718 l_popForwardQueue;
1719 }
1720
1721 transition(MM, Other_GETS_No_Mig, O) {
1722 ee_sendDataShared;
1723 l_popForwardQueue;
1724 }
1725
1726 transition(MM, Merged_GETS, O) {
1727 em_sendDataSharedMultiple;
1728 l_popForwardQueue;
1729 }
1730
1731 // Transitions from Dirty Exclusive
1732 transition(M, Store, MM) {
1733 hh_store_hit;
1734 uu_profileL1DataHit;
1735 k_popMandatoryQueue;
1736 }
1737
1738 transition(MR, Load, M) {
1739 h_load_hit;
1740 uu_profileL1DataMiss;
1741 uu_profileL2Hit;
1742 k_popMandatoryQueue;
1743 ka_wakeUpAllDependents;
1744 }
1745
1746 transition(MR, Ifetch, M) {
1747 h_load_hit;
1748 uu_profileL1InstMiss;
1749 uu_profileL2Hit;
1750 k_popMandatoryQueue;
1751 ka_wakeUpAllDependents;
1752 }
1753
1754 transition(MR, Store, MM) {
1755 hh_store_hit;
1756 uu_profileL1DataMiss;
1757 uu_profileL2Hit;
1758 k_popMandatoryQueue;
1759 ka_wakeUpAllDependents;
1760 }
1761
1762 transition(M, L2_Replacement, MI) {
1763 i_allocateTBE;
1764 d_issuePUT;
1765 forward_eviction_to_cpu;
1766 rr_deallocateL2CacheBlock;
1767 ka_wakeUpAllDependents;
1768 }
1769
1770 transition(M, {Other_GETX, Invalidate}, I) {
1771 c_sendExclusiveData;
1772 forward_eviction_to_cpu;
1773 l_popForwardQueue;
1774 }
1775
1776 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1777 ee_sendDataShared;
1778 l_popForwardQueue;
1779 }
1780
1781 transition(M, NC_DMA_GETS, O) {
1782 ee_sendDataShared;
1783 l_popForwardQueue;
1784 }
1785
1786 transition(M, Merged_GETS, O) {
1787 em_sendDataSharedMultiple;
1788 l_popForwardQueue;
1789 }
1790
1791 // Transitions from IM
1792
1793 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1794 f_sendAck;
1795 l_popForwardQueue;
1796 }
1797
1798 transition({IM, IM_F, MM_F}, Ack) {
1799 m_decrementNumberOfMessages;
1800 o_checkForCompletion;
1801 n_popResponseQueue;
1802 }
1803
1804 transition(IM, Data, ISM) {
1805 u_writeDataToCache;
1806 m_decrementNumberOfMessages;
1807 o_checkForCompletion;
1808 n_popResponseQueue;
1809 }
1810
1811 transition(IM_F, Data, ISM_F) {
1812 uf_writeDataToCacheTBE;
1813 m_decrementNumberOfMessages;
1814 o_checkForCompletion;
1815 n_popResponseQueue;
1816 }
1817
1818 transition(IM, Exclusive_Data, MM_W) {
1819 u_writeDataToCache;
1820 m_decrementNumberOfMessages;
1821 o_checkForCompletion;
1822 sx_external_store_hit;
1823 n_popResponseQueue;
1824 kd_wakeUpDependents;
1825 }
1826
1827 transition(IM_F, Exclusive_Data, MM_WF) {
1828 uf_writeDataToCacheTBE;
1829 m_decrementNumberOfMessages;
1830 o_checkForCompletion;
1831 n_popResponseQueue;
1832 }
1833
1834 // Transitions from SM
1835 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1836 ff_sendAckShared;
1837 l_popForwardQueue;
1838 }
1839
1840 transition(SM, {Other_GETX, Invalidate}, IM) {
1841 f_sendAck;
1842 forward_eviction_to_cpu;
1843 l_popForwardQueue;
1844 }
1845
1846 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1847 f_sendAck;
1848 forward_eviction_to_cpu;
1849 l_popForwardQueue;
1850 }
1851
1852 transition({SM, SM_F}, Ack) {
1853 m_decrementNumberOfMessages;
1854 o_checkForCompletion;
1855 n_popResponseQueue;
1856 }
1857
1858 transition(SM, {Data, Exclusive_Data}, ISM) {
1859 v_writeDataToCacheVerify;
1860 m_decrementNumberOfMessages;
1861 o_checkForCompletion;
1862 n_popResponseQueue;
1863 }
1864
1865 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1866 vt_writeDataToTBEVerify;
1867 m_decrementNumberOfMessages;
1868 o_checkForCompletion;
1869 n_popResponseQueue;
1870 }
1871
1872 // Transitions from ISM
1873 transition({ISM, ISM_F}, Ack) {
1874 m_decrementNumberOfMessages;
1875 o_checkForCompletion;
1876 n_popResponseQueue;
1877 }
1878
1879 transition(ISM, All_acks_no_sharers, MM) {
1880 sxt_trig_ext_store_hit;
1881 gm_sendUnblockM;
1882 s_deallocateTBE;
1883 j_popTriggerQueue;
1884 kd_wakeUpDependents;
1885 }
1886
1887 transition(ISM_F, All_acks_no_sharers, MI_F) {
1888 df_issuePUTF;
1889 j_popTriggerQueue;
1890 kd_wakeUpDependents;
1891 }
1892
1893 // Transitions from OM
1894
1895 transition(OM, {Other_GETX, Invalidate}, IM) {
1896 e_sendData;
1897 pp_incrementNumberOfMessagesByOne;
1898 forward_eviction_to_cpu;
1899 l_popForwardQueue;
1900 }
1901
1902 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1903 q_sendDataFromTBEToCache;
1904 pp_incrementNumberOfMessagesByOne;
1905 forward_eviction_to_cpu;
1906 l_popForwardQueue;
1907 }
1908
1909 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1910 ee_sendDataShared;
1911 l_popForwardQueue;
1912 }
1913
1914 transition(OM, Merged_GETS) {
1915 em_sendDataSharedMultiple;
1916 l_popForwardQueue;
1917 }
1918
1919 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1920 et_sendDataSharedFromTBE;
1921 l_popForwardQueue;
1922 }
1923
1924 transition(OM_F, Merged_GETS) {
1925 emt_sendDataSharedMultipleFromTBE;
1926 l_popForwardQueue;
1927 }
1928
1929 transition({OM, OM_F}, Ack) {
1930 m_decrementNumberOfMessages;
1931 o_checkForCompletion;
1932 n_popResponseQueue;
1933 }
1934
1935 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1936 sxt_trig_ext_store_hit;
1937 gm_sendUnblockM;
1938 s_deallocateTBE;
1939 j_popTriggerQueue;
1940 kd_wakeUpDependents;
1941 }
1942
1943 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1944 df_issuePUTF;
1945 j_popTriggerQueue;
1946 kd_wakeUpDependents;
1947 }
1948 // Transitions from IS
1949
1950 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1951 f_sendAck;
1952 l_popForwardQueue;
1953 }
1954
1955 transition(IS, Ack) {
1956 m_decrementNumberOfMessages;
1957 o_checkForCompletion;
1958 n_popResponseQueue;
1959 }
1960
1961 transition(IS, Shared_Ack) {
1962 m_decrementNumberOfMessages;
1963 r_setSharerBit;
1964 o_checkForCompletion;
1965 n_popResponseQueue;
1966 }
1967
1968 transition(IS, Data, SS) {
1969 u_writeDataToCache;
1970 m_decrementNumberOfMessages;
1971 o_checkForCompletion;
1972 hx_external_load_hit;
1973 uo_updateCurrentOwner;
1974 n_popResponseQueue;
1975 kd_wakeUpDependents;
1976 }
1977
1978 transition(IS, Exclusive_Data, M_W) {
1979 u_writeDataToCache;
1980 m_decrementNumberOfMessages;
1981 o_checkForCompletion;
1982 hx_external_load_hit;
1983 n_popResponseQueue;
1984 kd_wakeUpDependents;
1985 }
1986
1987 transition(IS, Shared_Data, SS) {
1988 u_writeDataToCache;
1989 r_setSharerBit;
1990 m_decrementNumberOfMessages;
1991 o_checkForCompletion;
1992 hx_external_load_hit;
1993 uo_updateCurrentOwner;
1994 n_popResponseQueue;
1995 kd_wakeUpDependents;
1996 }
1997
1998 // Transitions from SS
1999
2000 transition(SS, Ack) {
2001 m_decrementNumberOfMessages;
2002 o_checkForCompletion;
2003 n_popResponseQueue;
2004 }
2005
2006 transition(SS, Shared_Ack) {
2007 m_decrementNumberOfMessages;
2008 r_setSharerBit;
2009 o_checkForCompletion;
2010 n_popResponseQueue;
2011 }
2012
2013 transition(SS, All_acks, S) {
2014 gs_sendUnblockS;
2015 s_deallocateTBE;
2016 j_popTriggerQueue;
2017 kd_wakeUpDependents;
2018 }
2019
2020 transition(SS, All_acks_no_sharers, S) {
2021 // Note: The directory might still be the owner, so that is why we go to S
2022 gs_sendUnblockS;
2023 s_deallocateTBE;
2024 j_popTriggerQueue;
2025 kd_wakeUpDependents;
2026 }
2027
2028 // Transitions from MM_W
2029
2030 transition(MM_W, Store) {
2031 hh_store_hit;
2032 uu_profileL1DataHit;
2033 k_popMandatoryQueue;
2034 }
2035
2036 transition({MM_W, MM_WF}, Ack) {
2037 m_decrementNumberOfMessages;
2038 o_checkForCompletion;
2039 n_popResponseQueue;
2040 }
2041
2042 transition(MM_W, All_acks_no_sharers, MM) {
2043 gm_sendUnblockM;
2044 s_deallocateTBE;
2045 j_popTriggerQueue;
2046 kd_wakeUpDependents;
2047 }
2048
2049 transition(MM_WF, All_acks_no_sharers, MI_F) {
2050 df_issuePUTF;
2051 j_popTriggerQueue;
2052 kd_wakeUpDependents;
2053 }
2054 // Transitions from M_W
2055
2056 transition(M_W, Store, MM_W) {
2057 hh_store_hit;
2058 uu_profileL1DataHit;
2059 k_popMandatoryQueue;
2060 }
2061
2062 transition(M_W, Ack) {
2063 m_decrementNumberOfMessages;
2064 o_checkForCompletion;
2065 n_popResponseQueue;
2066 }
2067
2068 transition(M_W, All_acks_no_sharers, M) {
2069 gm_sendUnblockM;
2070 s_deallocateTBE;
2071 j_popTriggerQueue;
2072 kd_wakeUpDependents;
2073 }
2074
2075 // Transitions from OI/MI
2076
2077 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
2078 q_sendDataFromTBEToCache;
2079 l_popForwardQueue;
2080 }
2081
2082 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
2083 sq_sendSharedDataFromTBEToCache;
2084 l_popForwardQueue;
2085 }
2086
2087 transition({OI, MI}, Merged_GETS, OI) {
2088 qm_sendDataFromTBEToCache;
2089 l_popForwardQueue;
2090 }
2091
2092 transition(MI, Writeback_Ack, I) {
2093 t_sendExclusiveDataFromTBEToMemory;
2094 s_deallocateTBE;
2095 l_popForwardQueue;
2096 kd_wakeUpDependents;
2097 }
2098
2099 transition(MI_F, Writeback_Ack, I) {
2100 hh_flush_hit;
2101 t_sendExclusiveDataFromTBEToMemory;
2102 s_deallocateTBE;
2103 l_popForwardQueue;
2104 kd_wakeUpDependents;
2105 }
2106
2107 transition(OI, Writeback_Ack, I) {
2108 qq_sendDataFromTBEToMemory;
2109 s_deallocateTBE;
2110 l_popForwardQueue;
2111 kd_wakeUpDependents;
2112 }
2113
2114 // Transitions from II
2115 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2116 f_sendAck;
2117 l_popForwardQueue;
2118 }
2119
2120 transition(II, Writeback_Ack, I) {
2121 g_sendUnblock;
2122 s_deallocateTBE;
2123 l_popForwardQueue;
2124 kd_wakeUpDependents;
2125 }
2126
2127 transition(II, Writeback_Nack, I) {
2128 s_deallocateTBE;
2129 l_popForwardQueue;
2130 kd_wakeUpDependents;
2131 }
2132
2133 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2134 ct_sendExclusiveDataFromTBE;
2135 pp_incrementNumberOfMessagesByOne;
2136 l_popForwardQueue;
2137 }
2138
2139 transition(MM_F, Other_GETS, IM_F) {
2140 ct_sendExclusiveDataFromTBE;
2141 pp_incrementNumberOfMessagesByOne;
2142 l_popForwardQueue;
2143 }
2144
2145 transition(MM_F, NC_DMA_GETS, OM_F) {
2146 sq_sendSharedDataFromTBEToCache;
2147 l_popForwardQueue;
2148 }
2149
2150 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2151 et_sendDataSharedFromTBE;
2152 l_popForwardQueue;
2153 }
2154
2155 transition(MM_F, Merged_GETS, OM_F) {
2156 emt_sendDataSharedMultipleFromTBE;
2157 l_popForwardQueue;
2158 }
2159 }