ruby: message buffers: significant changes
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
37 : Sequencer * sequencer;
38 CacheMemory * L1Icache;
39 CacheMemory * L1Dcache;
40 CacheMemory * L2cache;
41 Cycles cache_response_latency := 10;
42 Cycles issue_latency := 2;
43 Cycles l2_cache_hit_latency := 10;
44 bool no_mig_atomic := "True";
45 bool send_evictions;
46
47 // NETWORK BUFFERS
48 MessageBuffer * requestFromCache, network="To", virtual_network="2",
49 ordered="false", vnet_type="request";
50 MessageBuffer * responseFromCache, network="To", virtual_network="4",
51 ordered="false", vnet_type="response";
52 MessageBuffer * unblockFromCache, network="To", virtual_network="5",
53 ordered="false", vnet_type="unblock";
54
55 MessageBuffer * forwardToCache, network="From", virtual_network="3",
56 ordered="false", vnet_type="forward";
57 MessageBuffer * responseToCache, network="From", virtual_network="4",
58 ordered="false", vnet_type="response";
59 {
60 // STATES
61 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
62 // Base states
63 I, AccessPermission:Invalid, desc="Idle";
64 S, AccessPermission:Read_Only, desc="Shared";
65 O, AccessPermission:Read_Only, desc="Owned";
66 M, AccessPermission:Read_Only, desc="Modified (dirty)";
67 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
68
69 // Base states, locked and ready to service the mandatory queue
70 IR, AccessPermission:Invalid, desc="Idle";
71 SR, AccessPermission:Read_Only, desc="Shared";
72 OR, AccessPermission:Read_Only, desc="Owned";
73 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
74 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
75
76 // Transient States
77 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
78 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
79 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
80 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
81 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
82 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
83 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
84 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
85 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
86 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
87 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
88 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
89 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
90 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
91 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
92 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
93
94 //Transition States Related to Flushing
95 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
96 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
97 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
98 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
99 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
100 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
101 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
102 }
103
104 // EVENTS
105 enumeration(Event, desc="Cache events") {
106 Load, desc="Load request from the processor";
107 Ifetch, desc="I-fetch request from the processor";
108 Store, desc="Store request from the processor";
109 L2_Replacement, desc="L2 Replacement";
110 L1_to_L2, desc="L1 to L2 transfer";
111 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
112 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
113 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
114
115 // Requests
116 Other_GETX, desc="A GetX from another processor";
117 Other_GETS, desc="A GetS from another processor";
118 Merged_GETS, desc="A Merged GetS from another processor";
119 Other_GETS_No_Mig, desc="A GetS from another processor";
120 NC_DMA_GETS, desc="special GetS when only DMA exists";
121 Invalidate, desc="Invalidate block";
122
123 // Responses
124 Ack, desc="Received an ack message";
125 Shared_Ack, desc="Received an ack message, responder has a shared copy";
126 Data, desc="Received a data message";
127 Shared_Data, desc="Received a data message, responder has a shared copy";
128 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
129
130 Writeback_Ack, desc="Writeback O.K. from directory";
131 Writeback_Nack, desc="Writeback not O.K. from directory";
132
133 // Triggers
134 All_acks, desc="Received all required data and message acks";
135 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
136
137 // For Flush
138 Flush_line, desc="flush the cache line from all caches";
139 Block_Ack, desc="the directory is blocked and ready for the flush";
140 }
141
142 // TYPES
143
144 // STRUCTURE DEFINITIONS
145
146 MessageBuffer mandatoryQueue, ordered="false";
147
148 // CacheEntry
149 structure(Entry, desc="...", interface="AbstractCacheEntry") {
150 State CacheState, desc="cache state";
151 bool Dirty, desc="Is the data dirty (different than memory)?";
152 DataBlock DataBlk, desc="data for the block";
153 bool FromL2, default="false", desc="block just moved from L2";
154 bool AtomicAccessed, default="false", desc="block just moved from L2";
155 }
156
157 // TBE fields
158 structure(TBE, desc="...") {
159 State TBEState, desc="Transient state";
160 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
161 bool Dirty, desc="Is the data dirty (different than memory)?";
162 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
163 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
164 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
165 MachineID LastResponder, desc="last machine to send a response for this request";
166 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
167
168 Cycles InitialRequestTime, default="Cycles(0)",
169 desc="time the initial requests was sent from the L1Cache";
170 Cycles ForwardRequestTime, default="Cycles(0)",
171 desc="time the dir forwarded the request";
172 Cycles FirstResponseTime, default="Cycles(0)",
173 desc="the time the first response was received";
174 }
175
176 structure(TBETable, external="yes") {
177 TBE lookup(Address);
178 void allocate(Address);
179 void deallocate(Address);
180 bool isPresent(Address);
181 }
182
183 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
184
185 void set_cache_entry(AbstractCacheEntry b);
186 void unset_cache_entry();
187 void set_tbe(TBE b);
188 void unset_tbe();
189 void wakeUpAllBuffers();
190 void wakeUpBuffers(Address a);
191 Cycles curCycle();
192
193 Entry getCacheEntry(Address address), return_by_pointer="yes" {
194 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
195 if(is_valid(L2cache_entry)) {
196 return L2cache_entry;
197 }
198
199 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
200 if(is_valid(L1Dcache_entry)) {
201 return L1Dcache_entry;
202 }
203
204 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
205 return L1Icache_entry;
206 }
207
208 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
209 Entry cache_entry := getCacheEntry(addr);
210 if(is_valid(cache_entry)) {
211 return cache_entry.DataBlk;
212 }
213
214 TBE tbe := TBEs[addr];
215 if(is_valid(tbe)) {
216 return tbe.DataBlk;
217 }
218
219 error("Missing data block");
220 }
221
222 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
223 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
224 return L2cache_entry;
225 }
226
227 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
228 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
229 return L1Dcache_entry;
230 }
231
232 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
233 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
234 return L1Icache_entry;
235 }
236
237 State getState(TBE tbe, Entry cache_entry, Address addr) {
238 if(is_valid(tbe)) {
239 return tbe.TBEState;
240 } else if (is_valid(cache_entry)) {
241 return cache_entry.CacheState;
242 }
243 return State:I;
244 }
245
246 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
247 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
248 assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
249 assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
250
251 if (is_valid(tbe)) {
252 tbe.TBEState := state;
253 }
254
255 if (is_valid(cache_entry)) {
256 cache_entry.CacheState := state;
257 }
258 }
259
260 AccessPermission getAccessPermission(Address addr) {
261 TBE tbe := TBEs[addr];
262 if(is_valid(tbe)) {
263 return L1Cache_State_to_permission(tbe.TBEState);
264 }
265
266 Entry cache_entry := getCacheEntry(addr);
267 if(is_valid(cache_entry)) {
268 return L1Cache_State_to_permission(cache_entry.CacheState);
269 }
270
271 return AccessPermission:NotPresent;
272 }
273
274 void setAccessPermission(Entry cache_entry, Address addr, State state) {
275 if (is_valid(cache_entry)) {
276 cache_entry.changePermission(L1Cache_State_to_permission(state));
277 }
278 }
279
280 Event mandatory_request_type_to_event(RubyRequestType type) {
281 if (type == RubyRequestType:LD) {
282 return Event:Load;
283 } else if (type == RubyRequestType:IFETCH) {
284 return Event:Ifetch;
285 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
286 return Event:Store;
287 } else if ((type == RubyRequestType:FLUSH)) {
288 return Event:Flush_line;
289 } else {
290 error("Invalid RubyRequestType");
291 }
292 }
293
294 MachineType testAndClearLocalHit(Entry cache_entry) {
295 if (is_valid(cache_entry) && cache_entry.FromL2) {
296 cache_entry.FromL2 := false;
297 return MachineType:L2Cache;
298 }
299 return MachineType:L1Cache;
300 }
301
302 bool IsAtomicAccessed(Entry cache_entry) {
303 assert(is_valid(cache_entry));
304 return cache_entry.AtomicAccessed;
305 }
306
307 MessageBuffer triggerQueue, ordered="false";
308
309 // ** OUT_PORTS **
310
311 out_port(requestNetwork_out, RequestMsg, requestFromCache);
312 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
313 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
314 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
315
316 // ** IN_PORTS **
317
318 // Trigger Queue
319 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
320 if (triggerQueue_in.isReady()) {
321 peek(triggerQueue_in, TriggerMsg) {
322
323 Entry cache_entry := getCacheEntry(in_msg.Addr);
324 TBE tbe := TBEs[in_msg.Addr];
325
326 if (in_msg.Type == TriggerType:L2_to_L1) {
327 trigger(Event:Complete_L2_to_L1, in_msg.Addr, cache_entry, tbe);
328 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
329 trigger(Event:All_acks, in_msg.Addr, cache_entry, tbe);
330 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
331 trigger(Event:All_acks_no_sharers, in_msg.Addr, cache_entry, tbe);
332 } else {
333 error("Unexpected message");
334 }
335 }
336 }
337 }
338
339 // Nothing from the unblock network
340
341 // Response Network
342 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
343 if (responseToCache_in.isReady()) {
344 peek(responseToCache_in, ResponseMsg, block_on="Addr") {
345
346 Entry cache_entry := getCacheEntry(in_msg.Addr);
347 TBE tbe := TBEs[in_msg.Addr];
348
349 if (in_msg.Type == CoherenceResponseType:ACK) {
350 trigger(Event:Ack, in_msg.Addr, cache_entry, tbe);
351 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
352 trigger(Event:Shared_Ack, in_msg.Addr, cache_entry, tbe);
353 } else if (in_msg.Type == CoherenceResponseType:DATA) {
354 trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
355 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
356 trigger(Event:Shared_Data, in_msg.Addr, cache_entry, tbe);
357 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
358 trigger(Event:Exclusive_Data, in_msg.Addr, cache_entry, tbe);
359 } else {
360 error("Unexpected message");
361 }
362 }
363 }
364 }
365
366 // Forward Network
367 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
368 if (forwardToCache_in.isReady()) {
369 peek(forwardToCache_in, RequestMsg, block_on="Addr") {
370
371 Entry cache_entry := getCacheEntry(in_msg.Addr);
372 TBE tbe := TBEs[in_msg.Addr];
373
374 if ((in_msg.Type == CoherenceRequestType:GETX) ||
375 (in_msg.Type == CoherenceRequestType:GETF)) {
376 trigger(Event:Other_GETX, in_msg.Addr, cache_entry, tbe);
377 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
378 trigger(Event:Merged_GETS, in_msg.Addr, cache_entry, tbe);
379 } else if (in_msg.Type == CoherenceRequestType:GETS) {
380 if (machineCount(MachineType:L1Cache) > 1) {
381 if (is_valid(cache_entry)) {
382 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
383 trigger(Event:Other_GETS_No_Mig, in_msg.Addr, cache_entry, tbe);
384 } else {
385 trigger(Event:Other_GETS, in_msg.Addr, cache_entry, tbe);
386 }
387 } else {
388 trigger(Event:Other_GETS, in_msg.Addr, cache_entry, tbe);
389 }
390 } else {
391 trigger(Event:NC_DMA_GETS, in_msg.Addr, cache_entry, tbe);
392 }
393 } else if (in_msg.Type == CoherenceRequestType:INV) {
394 trigger(Event:Invalidate, in_msg.Addr, cache_entry, tbe);
395 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
396 trigger(Event:Writeback_Ack, in_msg.Addr, cache_entry, tbe);
397 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
398 trigger(Event:Writeback_Nack, in_msg.Addr, cache_entry, tbe);
399 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
400 trigger(Event:Block_Ack, in_msg.Addr, cache_entry, tbe);
401 } else {
402 error("Unexpected message");
403 }
404 }
405 }
406 }
407
408 // Nothing from the request network
409
410 // Mandatory Queue
411 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
412 if (mandatoryQueue_in.isReady()) {
413 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
414
415 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
416 TBE tbe := TBEs[in_msg.LineAddress];
417
418 if (in_msg.Type == RubyRequestType:IFETCH) {
419 // ** INSTRUCTION ACCESS ***
420
421 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
422 if (is_valid(L1Icache_entry)) {
423 // The tag matches for the L1, so the L1 fetches the line.
424 // We know it can't be in the L2 due to exclusion
425 trigger(mandatory_request_type_to_event(in_msg.Type),
426 in_msg.LineAddress, L1Icache_entry, tbe);
427 } else {
428 // Check to see if it is in the OTHER L1
429 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
430 if (is_valid(L1Dcache_entry)) {
431 // The block is in the wrong L1, try to write it to the L2
432 if (L2cache.cacheAvail(in_msg.LineAddress)) {
433 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
434 } else {
435 Address l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
436 trigger(Event:L2_Replacement,
437 l2_victim_addr,
438 getL2CacheEntry(l2_victim_addr),
439 TBEs[l2_victim_addr]);
440 }
441 }
442
443 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
444 // L1 does't have the line, but we have space for it in the L1
445
446 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
447 if (is_valid(L2cache_entry)) {
448 // L2 has it (maybe not with the right permissions)
449 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
450 L2cache_entry, tbe);
451 } else {
452 // We have room, the L2 doesn't have it, so the L1 fetches the line
453 trigger(mandatory_request_type_to_event(in_msg.Type),
454 in_msg.LineAddress, L1Icache_entry, tbe);
455 }
456 } else {
457 // No room in the L1, so we need to make room
458 Address l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
459 if (L2cache.cacheAvail(l1i_victim_addr)) {
460 // The L2 has room, so we move the line from the L1 to the L2
461 trigger(Event:L1_to_L2,
462 l1i_victim_addr,
463 getL1ICacheEntry(l1i_victim_addr),
464 TBEs[l1i_victim_addr]);
465 } else {
466 Address l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
467 // The L2 does not have room, so we replace a line from the L2
468 trigger(Event:L2_Replacement,
469 l2_victim_addr,
470 getL2CacheEntry(l2_victim_addr),
471 TBEs[l2_victim_addr]);
472 }
473 }
474 }
475 } else {
476 // *** DATA ACCESS ***
477
478 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
479 if (is_valid(L1Dcache_entry)) {
480 // The tag matches for the L1, so the L1 fetches the line.
481 // We know it can't be in the L2 due to exclusion
482 trigger(mandatory_request_type_to_event(in_msg.Type),
483 in_msg.LineAddress, L1Dcache_entry, tbe);
484 } else {
485
486 // Check to see if it is in the OTHER L1
487 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
488 if (is_valid(L1Icache_entry)) {
489 // The block is in the wrong L1, try to write it to the L2
490 if (L2cache.cacheAvail(in_msg.LineAddress)) {
491 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
492 } else {
493 Address l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
494 trigger(Event:L2_Replacement,
495 l2_victim_addr,
496 getL2CacheEntry(l2_victim_addr),
497 TBEs[l2_victim_addr]);
498 }
499 }
500
501 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
502 // L1 does't have the line, but we have space for it in the L1
503 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
504 if (is_valid(L2cache_entry)) {
505 // L2 has it (maybe not with the right permissions)
506 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
507 L2cache_entry, tbe);
508 } else {
509 // We have room, the L2 doesn't have it, so the L1 fetches the line
510 trigger(mandatory_request_type_to_event(in_msg.Type),
511 in_msg.LineAddress, L1Dcache_entry, tbe);
512 }
513 } else {
514 // No room in the L1, so we need to make room
515 Address l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
516 if (L2cache.cacheAvail(l1d_victim_addr)) {
517 // The L2 has room, so we move the line from the L1 to the L2
518 trigger(Event:L1_to_L2,
519 l1d_victim_addr,
520 getL1DCacheEntry(l1d_victim_addr),
521 TBEs[l1d_victim_addr]);
522 } else {
523 Address l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
524 // The L2 does not have room, so we replace a line from the L2
525 trigger(Event:L2_Replacement,
526 l2_victim_addr,
527 getL2CacheEntry(l2_victim_addr),
528 TBEs[l2_victim_addr]);
529 }
530 }
531 }
532 }
533 }
534 }
535 }
536
537 // ACTIONS
538
539 action(a_issueGETS, "a", desc="Issue GETS") {
540 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
541 assert(is_valid(tbe));
542 out_msg.Addr := address;
543 out_msg.Type := CoherenceRequestType:GETS;
544 out_msg.Requestor := machineID;
545 out_msg.Destination.add(map_Address_to_Directory(address));
546 out_msg.MessageSize := MessageSizeType:Request_Control;
547 out_msg.InitialRequestTime := curCycle();
548
549 // One from each other cache (n-1) plus the memory (+1)
550 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
551 }
552 }
553
554 action(b_issueGETX, "b", desc="Issue GETX") {
555 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
556 assert(is_valid(tbe));
557 out_msg.Addr := address;
558 out_msg.Type := CoherenceRequestType:GETX;
559 out_msg.Requestor := machineID;
560 out_msg.Destination.add(map_Address_to_Directory(address));
561 out_msg.MessageSize := MessageSizeType:Request_Control;
562 out_msg.InitialRequestTime := curCycle();
563
564 // One from each other cache (n-1) plus the memory (+1)
565 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
566 }
567 }
568
569 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
570 if (machineCount(MachineType:L1Cache) > 1) {
571 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
572 assert(is_valid(tbe));
573 out_msg.Addr := address;
574 out_msg.Type := CoherenceRequestType:GETX;
575 out_msg.Requestor := machineID;
576 out_msg.Destination.add(map_Address_to_Directory(address));
577 out_msg.MessageSize := MessageSizeType:Request_Control;
578 out_msg.InitialRequestTime := curCycle();
579 }
580 }
581
582 // One from each other cache (n-1) plus the memory (+1)
583 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
584 }
585
586 action(bf_issueGETF, "bf", desc="Issue GETF") {
587 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
588 assert(is_valid(tbe));
589 out_msg.Addr := address;
590 out_msg.Type := CoherenceRequestType:GETF;
591 out_msg.Requestor := machineID;
592 out_msg.Destination.add(map_Address_to_Directory(address));
593 out_msg.MessageSize := MessageSizeType:Request_Control;
594 out_msg.InitialRequestTime := curCycle();
595
596 // One from each other cache (n-1) plus the memory (+1)
597 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
598 }
599 }
600
601 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
602 peek(forwardToCache_in, RequestMsg) {
603 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
604 assert(is_valid(cache_entry));
605 out_msg.Addr := address;
606 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
607 out_msg.Sender := machineID;
608 out_msg.Destination.add(in_msg.Requestor);
609 out_msg.DataBlk := cache_entry.DataBlk;
610 out_msg.Dirty := cache_entry.Dirty;
611 if (in_msg.DirectedProbe) {
612 out_msg.Acks := machineCount(MachineType:L1Cache);
613 } else {
614 out_msg.Acks := 2;
615 }
616 out_msg.SilentAcks := in_msg.SilentAcks;
617 out_msg.MessageSize := MessageSizeType:Response_Data;
618 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
619 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
620 }
621 }
622 }
623
624 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
625 peek(forwardToCache_in, RequestMsg) {
626 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
627 assert(is_valid(tbe));
628 out_msg.Addr := address;
629 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
630 out_msg.Sender := machineID;
631 out_msg.Destination.add(in_msg.Requestor);
632 out_msg.DataBlk := tbe.DataBlk;
633 out_msg.Dirty := tbe.Dirty;
634 if (in_msg.DirectedProbe) {
635 out_msg.Acks := machineCount(MachineType:L1Cache);
636 } else {
637 out_msg.Acks := 2;
638 }
639 out_msg.SilentAcks := in_msg.SilentAcks;
640 out_msg.MessageSize := MessageSizeType:Response_Data;
641 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
642 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
643 }
644 }
645 }
646
647 action(d_issuePUT, "d", desc="Issue PUT") {
648 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
649 out_msg.Addr := address;
650 out_msg.Type := CoherenceRequestType:PUT;
651 out_msg.Requestor := machineID;
652 out_msg.Destination.add(map_Address_to_Directory(address));
653 out_msg.MessageSize := MessageSizeType:Writeback_Control;
654 }
655 }
656
657 action(df_issuePUTF, "df", desc="Issue PUTF") {
658 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
659 out_msg.Addr := address;
660 out_msg.Type := CoherenceRequestType:PUTF;
661 out_msg.Requestor := machineID;
662 out_msg.Destination.add(map_Address_to_Directory(address));
663 out_msg.MessageSize := MessageSizeType:Writeback_Control;
664 }
665 }
666
667 action(e_sendData, "e", desc="Send data from cache to requestor") {
668 peek(forwardToCache_in, RequestMsg) {
669 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
670 assert(is_valid(cache_entry));
671 out_msg.Addr := address;
672 out_msg.Type := CoherenceResponseType:DATA;
673 out_msg.Sender := machineID;
674 out_msg.Destination.add(in_msg.Requestor);
675 out_msg.DataBlk := cache_entry.DataBlk;
676 out_msg.Dirty := cache_entry.Dirty;
677 if (in_msg.DirectedProbe) {
678 out_msg.Acks := machineCount(MachineType:L1Cache);
679 } else {
680 out_msg.Acks := 2;
681 }
682 out_msg.SilentAcks := in_msg.SilentAcks;
683 out_msg.MessageSize := MessageSizeType:Response_Data;
684 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
685 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
686 }
687 }
688 }
689
690 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
691 peek(forwardToCache_in, RequestMsg) {
692 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
693 assert(is_valid(cache_entry));
694 out_msg.Addr := address;
695 out_msg.Type := CoherenceResponseType:DATA_SHARED;
696 out_msg.Sender := machineID;
697 out_msg.Destination.add(in_msg.Requestor);
698 out_msg.DataBlk := cache_entry.DataBlk;
699 out_msg.Dirty := cache_entry.Dirty;
700 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
701 if (in_msg.DirectedProbe) {
702 out_msg.Acks := machineCount(MachineType:L1Cache);
703 } else {
704 out_msg.Acks := 2;
705 }
706 out_msg.SilentAcks := in_msg.SilentAcks;
707 out_msg.MessageSize := MessageSizeType:Response_Data;
708 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
709 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
710 }
711 }
712 }
713
714 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
715 peek(forwardToCache_in, RequestMsg) {
716 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
717 assert(is_valid(tbe));
718 out_msg.Addr := address;
719 out_msg.Type := CoherenceResponseType:DATA_SHARED;
720 out_msg.Sender := machineID;
721 out_msg.Destination.add(in_msg.Requestor);
722 out_msg.DataBlk := tbe.DataBlk;
723 out_msg.Dirty := tbe.Dirty;
724 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
725 if (in_msg.DirectedProbe) {
726 out_msg.Acks := machineCount(MachineType:L1Cache);
727 } else {
728 out_msg.Acks := 2;
729 }
730 out_msg.SilentAcks := in_msg.SilentAcks;
731 out_msg.MessageSize := MessageSizeType:Response_Data;
732 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
733 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
734 }
735 }
736 }
737
738 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
739 peek(forwardToCache_in, RequestMsg) {
740 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
741 assert(is_valid(cache_entry));
742 out_msg.Addr := address;
743 out_msg.Type := CoherenceResponseType:DATA_SHARED;
744 out_msg.Sender := machineID;
745 out_msg.Destination := in_msg.MergedRequestors;
746 out_msg.DataBlk := cache_entry.DataBlk;
747 out_msg.Dirty := cache_entry.Dirty;
748 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
749 out_msg.Acks := machineCount(MachineType:L1Cache);
750 out_msg.SilentAcks := in_msg.SilentAcks;
751 out_msg.MessageSize := MessageSizeType:Response_Data;
752 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
753 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
754 }
755 }
756 }
757
758 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
759 peek(forwardToCache_in, RequestMsg) {
760 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
761 assert(is_valid(tbe));
762 out_msg.Addr := address;
763 out_msg.Type := CoherenceResponseType:DATA_SHARED;
764 out_msg.Sender := machineID;
765 out_msg.Destination := in_msg.MergedRequestors;
766 out_msg.DataBlk := tbe.DataBlk;
767 out_msg.Dirty := tbe.Dirty;
768 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
769 out_msg.Acks := machineCount(MachineType:L1Cache);
770 out_msg.SilentAcks := in_msg.SilentAcks;
771 out_msg.MessageSize := MessageSizeType:Response_Data;
772 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
773 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
774 }
775 }
776 }
777
778 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
779 peek(forwardToCache_in, RequestMsg) {
780 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
781 out_msg.Addr := address;
782 out_msg.Type := CoherenceResponseType:ACK;
783 out_msg.Sender := machineID;
784 out_msg.Destination.add(in_msg.Requestor);
785 out_msg.Acks := 1;
786 out_msg.SilentAcks := in_msg.SilentAcks;
787 assert(in_msg.DirectedProbe == false);
788 out_msg.MessageSize := MessageSizeType:Response_Control;
789 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
790 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
791 }
792 }
793 }
794
795 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
796 peek(forwardToCache_in, RequestMsg) {
797 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
798 out_msg.Addr := address;
799 out_msg.Type := CoherenceResponseType:ACK_SHARED;
800 out_msg.Sender := machineID;
801 out_msg.Destination.add(in_msg.Requestor);
802 out_msg.Acks := 1;
803 out_msg.SilentAcks := in_msg.SilentAcks;
804 assert(in_msg.DirectedProbe == false);
805 out_msg.MessageSize := MessageSizeType:Response_Control;
806 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
807 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
808 }
809 }
810 }
811
812 action(g_sendUnblock, "g", desc="Send unblock to memory") {
813 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
814 out_msg.Addr := address;
815 out_msg.Type := CoherenceResponseType:UNBLOCK;
816 out_msg.Sender := machineID;
817 out_msg.Destination.add(map_Address_to_Directory(address));
818 out_msg.MessageSize := MessageSizeType:Unblock_Control;
819 }
820 }
821
822 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
823 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
824 out_msg.Addr := address;
825 out_msg.Type := CoherenceResponseType:UNBLOCKM;
826 out_msg.Sender := machineID;
827 out_msg.Destination.add(map_Address_to_Directory(address));
828 out_msg.MessageSize := MessageSizeType:Unblock_Control;
829 }
830 }
831
832 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
833 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
834 assert(is_valid(tbe));
835 out_msg.Addr := address;
836 out_msg.Type := CoherenceResponseType:UNBLOCKS;
837 out_msg.Sender := machineID;
838 out_msg.CurOwner := tbe.CurOwner;
839 out_msg.Destination.add(map_Address_to_Directory(address));
840 out_msg.MessageSize := MessageSizeType:Unblock_Control;
841 }
842 }
843
844 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
845 assert(is_valid(cache_entry));
846 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
847 sequencer.readCallback(address, cache_entry.DataBlk, false,
848 testAndClearLocalHit(cache_entry));
849 }
850
851 action(hx_external_load_hit, "hx", desc="load required external msgs") {
852 assert(is_valid(cache_entry));
853 assert(is_valid(tbe));
854 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
855 peek(responseToCache_in, ResponseMsg) {
856
857 sequencer.readCallback(address, cache_entry.DataBlk, true,
858 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
859 tbe.ForwardRequestTime, tbe.FirstResponseTime);
860 }
861 }
862
863 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
864 assert(is_valid(cache_entry));
865 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
866 peek(mandatoryQueue_in, RubyRequest) {
867 sequencer.writeCallback(address, cache_entry.DataBlk, false,
868 testAndClearLocalHit(cache_entry));
869
870 cache_entry.Dirty := true;
871 if (in_msg.Type == RubyRequestType:ATOMIC) {
872 cache_entry.AtomicAccessed := true;
873 }
874 }
875 }
876
877 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
878 assert(is_valid(tbe));
879 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
880 sequencer.writeCallback(address, tbe.DataBlk, false, MachineType:L1Cache);
881 }
882
883 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
884 assert(is_valid(cache_entry));
885 assert(is_valid(tbe));
886 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
887 peek(responseToCache_in, ResponseMsg) {
888
889 sequencer.writeCallback(address, cache_entry.DataBlk, true,
890 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
891 tbe.ForwardRequestTime, tbe.FirstResponseTime);
892 }
893 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
894 cache_entry.Dirty := true;
895 }
896
897 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
898 assert(is_valid(cache_entry));
899 assert(is_valid(tbe));
900 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
901
902 sequencer.writeCallback(address, cache_entry.DataBlk, true,
903 machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
904 tbe.ForwardRequestTime, tbe.FirstResponseTime);
905
906 cache_entry.Dirty := true;
907 }
908
909 action(i_allocateTBE, "i", desc="Allocate TBE") {
910 check_allocate(TBEs);
911 assert(is_valid(cache_entry));
912 TBEs.allocate(address);
913 set_tbe(TBEs[address]);
914 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
915 tbe.Dirty := cache_entry.Dirty;
916 tbe.Sharers := false;
917 }
918
919 action(it_allocateTBE, "it", desc="Allocate TBE") {
920 check_allocate(TBEs);
921 TBEs.allocate(address);
922 set_tbe(TBEs[address]);
923 tbe.Dirty := false;
924 tbe.Sharers := false;
925 }
926
927 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
928 triggerQueue_in.dequeue();
929 }
930
931 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
932 mandatoryQueue_in.dequeue();
933 }
934
935 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
936 forwardToCache_in.dequeue();
937 }
938
939 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
940 assert(is_valid(cache_entry));
941 assert(is_valid(tbe));
942 cache_entry.Dirty := tbe.Dirty;
943 cache_entry.DataBlk := tbe.DataBlk;
944 }
945
946 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
947 assert(is_valid(cache_entry));
948 assert(is_valid(tbe));
949 cache_entry.Dirty := tbe.Dirty;
950 cache_entry.DataBlk := tbe.DataBlk;
951 cache_entry.FromL2 := true;
952 }
953
954 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
955 peek(responseToCache_in, ResponseMsg) {
956 assert(in_msg.Acks >= 0);
957 assert(is_valid(tbe));
958 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
959 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
960 if (tbe.AppliedSilentAcks == false) {
961 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
962 tbe.AppliedSilentAcks := true;
963 }
964 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
965 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
966 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
967 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
968 APPEND_TRANSITION_COMMENT(in_msg.Sender);
969 tbe.LastResponder := in_msg.Sender;
970 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
971 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
972 }
973 if (in_msg.InitialRequestTime != zero_time()) {
974 tbe.InitialRequestTime := in_msg.InitialRequestTime;
975 }
976 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
977 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
978 }
979 if (in_msg.ForwardRequestTime != zero_time()) {
980 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
981 }
982 if (tbe.FirstResponseTime == zero_time()) {
983 tbe.FirstResponseTime := curCycle();
984 }
985 }
986 }
987 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
988 peek(responseToCache_in, ResponseMsg) {
989 assert(is_valid(tbe));
990 tbe.CurOwner := in_msg.Sender;
991 }
992 }
993
994 action(n_popResponseQueue, "n", desc="Pop response queue") {
995 responseToCache_in.dequeue();
996 }
997
998 action(ll_L2toL1Transfer, "ll", desc="") {
999 enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
1000 out_msg.Addr := address;
1001 out_msg.Type := TriggerType:L2_to_L1;
1002 }
1003 }
1004
1005 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1006 assert(is_valid(tbe));
1007 if (tbe.NumPendingMsgs == 0) {
1008 enqueue(triggerQueue_out, TriggerMsg) {
1009 out_msg.Addr := address;
1010 if (tbe.Sharers) {
1011 out_msg.Type := TriggerType:ALL_ACKS;
1012 } else {
1013 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1014 }
1015 }
1016 }
1017 }
1018
1019 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1020 assert(is_valid(tbe));
1021 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1022 }
1023
1024 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1025 assert(is_valid(tbe));
1026 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1027 }
1028
1029 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1030 peek(forwardToCache_in, RequestMsg) {
1031 assert(in_msg.Requestor != machineID);
1032 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1033 assert(is_valid(tbe));
1034 out_msg.Addr := address;
1035 out_msg.Type := CoherenceResponseType:DATA;
1036 out_msg.Sender := machineID;
1037 out_msg.Destination.add(in_msg.Requestor);
1038 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1039 out_msg.DataBlk := tbe.DataBlk;
1040 out_msg.Dirty := tbe.Dirty;
1041 if (in_msg.DirectedProbe) {
1042 out_msg.Acks := machineCount(MachineType:L1Cache);
1043 } else {
1044 out_msg.Acks := 2;
1045 }
1046 out_msg.SilentAcks := in_msg.SilentAcks;
1047 out_msg.MessageSize := MessageSizeType:Response_Data;
1048 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1049 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1050 }
1051 }
1052 }
1053
1054 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1055 peek(forwardToCache_in, RequestMsg) {
1056 assert(in_msg.Requestor != machineID);
1057 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1058 assert(is_valid(tbe));
1059 out_msg.Addr := address;
1060 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1061 out_msg.Sender := machineID;
1062 out_msg.Destination.add(in_msg.Requestor);
1063 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1064 out_msg.DataBlk := tbe.DataBlk;
1065 out_msg.Dirty := tbe.Dirty;
1066 if (in_msg.DirectedProbe) {
1067 out_msg.Acks := machineCount(MachineType:L1Cache);
1068 } else {
1069 out_msg.Acks := 2;
1070 }
1071 out_msg.SilentAcks := in_msg.SilentAcks;
1072 out_msg.MessageSize := MessageSizeType:Response_Data;
1073 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1074 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1075 }
1076 }
1077 }
1078
1079 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1080 peek(forwardToCache_in, RequestMsg) {
1081 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1082 assert(is_valid(tbe));
1083 out_msg.Addr := address;
1084 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1085 out_msg.Sender := machineID;
1086 out_msg.Destination := in_msg.MergedRequestors;
1087 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1088 out_msg.DataBlk := tbe.DataBlk;
1089 out_msg.Dirty := tbe.Dirty;
1090 out_msg.Acks := machineCount(MachineType:L1Cache);
1091 out_msg.SilentAcks := in_msg.SilentAcks;
1092 out_msg.MessageSize := MessageSizeType:Response_Data;
1093 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1094 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1095 }
1096 }
1097 }
1098
1099 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1100 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1101 assert(is_valid(tbe));
1102 out_msg.Addr := address;
1103 out_msg.Sender := machineID;
1104 out_msg.Destination.add(map_Address_to_Directory(address));
1105 out_msg.Dirty := tbe.Dirty;
1106 if (tbe.Dirty) {
1107 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1108 out_msg.DataBlk := tbe.DataBlk;
1109 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1110 } else {
1111 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1112 // NOTE: in a real system this would not send data. We send
1113 // data here only so we can check it at the memory
1114 out_msg.DataBlk := tbe.DataBlk;
1115 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1116 }
1117 }
1118 }
1119
1120 action(r_setSharerBit, "r", desc="We saw other sharers") {
1121 assert(is_valid(tbe));
1122 tbe.Sharers := true;
1123 }
1124
1125 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1126 TBEs.deallocate(address);
1127 unset_tbe();
1128 }
1129
1130 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1131 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1132 assert(is_valid(tbe));
1133 out_msg.Addr := address;
1134 out_msg.Sender := machineID;
1135 out_msg.Destination.add(map_Address_to_Directory(address));
1136 out_msg.DataBlk := tbe.DataBlk;
1137 out_msg.Dirty := tbe.Dirty;
1138 if (tbe.Dirty) {
1139 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1140 out_msg.DataBlk := tbe.DataBlk;
1141 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1142 } else {
1143 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1144 // NOTE: in a real system this would not send data. We send
1145 // data here only so we can check it at the memory
1146 out_msg.DataBlk := tbe.DataBlk;
1147 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1148 }
1149 }
1150 }
1151
1152 action(u_writeDataToCache, "u", desc="Write data to cache") {
1153 peek(responseToCache_in, ResponseMsg) {
1154 assert(is_valid(cache_entry));
1155 cache_entry.DataBlk := in_msg.DataBlk;
1156 cache_entry.Dirty := in_msg.Dirty;
1157 }
1158 }
1159
1160 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1161 peek(responseToCache_in, ResponseMsg) {
1162 assert(is_valid(tbe));
1163 tbe.DataBlk := in_msg.DataBlk;
1164 tbe.Dirty := in_msg.Dirty;
1165 }
1166 }
1167
1168 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1169 peek(responseToCache_in, ResponseMsg) {
1170 assert(is_valid(cache_entry));
1171 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1172 cache_entry.DataBlk, in_msg.DataBlk);
1173 assert(cache_entry.DataBlk == in_msg.DataBlk);
1174 cache_entry.DataBlk := in_msg.DataBlk;
1175 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1176 }
1177 }
1178
1179 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1180 peek(responseToCache_in, ResponseMsg) {
1181 assert(is_valid(tbe));
1182 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1183 tbe.DataBlk, in_msg.DataBlk);
1184 assert(tbe.DataBlk == in_msg.DataBlk);
1185 tbe.DataBlk := in_msg.DataBlk;
1186 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1187 }
1188 }
1189
1190 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1191 if (L1Dcache.isTagPresent(address)) {
1192 L1Dcache.deallocate(address);
1193 } else {
1194 L1Icache.deallocate(address);
1195 }
1196 unset_cache_entry();
1197 }
1198
1199 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1200 if (is_invalid(cache_entry)) {
1201 set_cache_entry(L1Dcache.allocate(address, new Entry));
1202 }
1203 }
1204
1205 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1206 if (is_invalid(cache_entry)) {
1207 set_cache_entry(L1Icache.allocate(address, new Entry));
1208 }
1209 }
1210
1211 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1212 set_cache_entry(L2cache.allocate(address, new Entry));
1213 }
1214
1215 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1216 L2cache.deallocate(address);
1217 unset_cache_entry();
1218 }
1219
1220 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1221 if (send_evictions) {
1222 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
1223 sequencer.evictionCallback(address);
1224 }
1225 }
1226
1227 action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
1228 ++L1Dcache.demand_misses;
1229 }
1230
1231 action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
1232 ++L1Dcache.demand_hits;
1233 }
1234
1235 action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
1236 ++L1Icache.demand_misses;
1237 }
1238
1239 action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
1240 ++L1Icache.demand_hits;
1241 }
1242
1243 action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
1244 ++L2cache.demand_misses;
1245 }
1246
1247 action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
1248 ++L2cache.demand_hits;
1249 }
1250
1251 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1252 stall_and_wait(mandatoryQueue_in, address);
1253 }
1254
1255 action(z_stall, "z", desc="stall") {
1256 // do nothing and the special z_stall action will return a protocol stall
1257 // so that the next port is checked
1258 }
1259
1260 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1261 wakeUpBuffers(address);
1262 }
1263
1264 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1265 wakeUpAllBuffers();
1266 }
1267
1268 //*****************************************************
1269 // TRANSITIONS
1270 //*****************************************************
1271
1272 // Transitions for Load/Store/L2_Replacement from transient states
1273 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1274 zz_stallAndWaitMandatoryQueue;
1275 }
1276
1277 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1278 zz_stallAndWaitMandatoryQueue;
1279 }
1280
1281 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1282 zz_stallAndWaitMandatoryQueue;
1283 }
1284
1285 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1286 zz_stallAndWaitMandatoryQueue;
1287 }
1288
1289 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1290 zz_stallAndWaitMandatoryQueue;
1291 }
1292
1293 transition({MI_F, MM_F}, {Store}) {
1294 zz_stallAndWaitMandatoryQueue;
1295 }
1296
1297 transition({MM_F, MI_F}, {Flush_line}) {
1298 zz_stallAndWaitMandatoryQueue;
1299 }
1300
1301 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1302 z_stall;
1303 }
1304
1305 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1306 z_stall;
1307 }
1308
1309 // Transitions moving data between the L1 and L2 caches
1310 transition({I, S, O, M, MM}, L1_to_L2) {
1311 i_allocateTBE;
1312 gg_deallocateL1CacheBlock;
1313 vv_allocateL2CacheBlock;
1314 hp_copyFromTBEToL2;
1315 s_deallocateTBE;
1316 }
1317
1318 transition(I, Trigger_L2_to_L1D, IT) {
1319 i_allocateTBE;
1320 rr_deallocateL2CacheBlock;
1321 ii_allocateL1DCacheBlock;
1322 nb_copyFromTBEToL1; // Not really needed for state I
1323 s_deallocateTBE;
1324 zz_stallAndWaitMandatoryQueue;
1325 ll_L2toL1Transfer;
1326 }
1327
1328 transition(S, Trigger_L2_to_L1D, ST) {
1329 i_allocateTBE;
1330 rr_deallocateL2CacheBlock;
1331 ii_allocateL1DCacheBlock;
1332 nb_copyFromTBEToL1;
1333 s_deallocateTBE;
1334 zz_stallAndWaitMandatoryQueue;
1335 ll_L2toL1Transfer;
1336 }
1337
1338 transition(O, Trigger_L2_to_L1D, OT) {
1339 i_allocateTBE;
1340 rr_deallocateL2CacheBlock;
1341 ii_allocateL1DCacheBlock;
1342 nb_copyFromTBEToL1;
1343 s_deallocateTBE;
1344 zz_stallAndWaitMandatoryQueue;
1345 ll_L2toL1Transfer;
1346 }
1347
1348 transition(M, Trigger_L2_to_L1D, MT) {
1349 i_allocateTBE;
1350 rr_deallocateL2CacheBlock;
1351 ii_allocateL1DCacheBlock;
1352 nb_copyFromTBEToL1;
1353 s_deallocateTBE;
1354 zz_stallAndWaitMandatoryQueue;
1355 ll_L2toL1Transfer;
1356 }
1357
1358 transition(MM, Trigger_L2_to_L1D, MMT) {
1359 i_allocateTBE;
1360 rr_deallocateL2CacheBlock;
1361 ii_allocateL1DCacheBlock;
1362 nb_copyFromTBEToL1;
1363 s_deallocateTBE;
1364 zz_stallAndWaitMandatoryQueue;
1365 ll_L2toL1Transfer;
1366 }
1367
1368 transition(I, Trigger_L2_to_L1I, IT) {
1369 i_allocateTBE;
1370 rr_deallocateL2CacheBlock;
1371 jj_allocateL1ICacheBlock;
1372 nb_copyFromTBEToL1;
1373 s_deallocateTBE;
1374 zz_stallAndWaitMandatoryQueue;
1375 ll_L2toL1Transfer;
1376 }
1377
1378 transition(S, Trigger_L2_to_L1I, ST) {
1379 i_allocateTBE;
1380 rr_deallocateL2CacheBlock;
1381 jj_allocateL1ICacheBlock;
1382 nb_copyFromTBEToL1;
1383 s_deallocateTBE;
1384 zz_stallAndWaitMandatoryQueue;
1385 ll_L2toL1Transfer;
1386 }
1387
1388 transition(O, Trigger_L2_to_L1I, OT) {
1389 i_allocateTBE;
1390 rr_deallocateL2CacheBlock;
1391 jj_allocateL1ICacheBlock;
1392 nb_copyFromTBEToL1;
1393 s_deallocateTBE;
1394 zz_stallAndWaitMandatoryQueue;
1395 ll_L2toL1Transfer;
1396 }
1397
1398 transition(M, Trigger_L2_to_L1I, MT) {
1399 i_allocateTBE;
1400 rr_deallocateL2CacheBlock;
1401 jj_allocateL1ICacheBlock;
1402 nb_copyFromTBEToL1;
1403 s_deallocateTBE;
1404 zz_stallAndWaitMandatoryQueue;
1405 ll_L2toL1Transfer;
1406 }
1407
1408 transition(MM, Trigger_L2_to_L1I, MMT) {
1409 i_allocateTBE;
1410 rr_deallocateL2CacheBlock;
1411 jj_allocateL1ICacheBlock;
1412 nb_copyFromTBEToL1;
1413 s_deallocateTBE;
1414 zz_stallAndWaitMandatoryQueue;
1415 ll_L2toL1Transfer;
1416 }
1417
1418 transition(IT, Complete_L2_to_L1, IR) {
1419 j_popTriggerQueue;
1420 kd_wakeUpDependents;
1421 }
1422
1423 transition(ST, Complete_L2_to_L1, SR) {
1424 j_popTriggerQueue;
1425 kd_wakeUpDependents;
1426 }
1427
1428 transition(OT, Complete_L2_to_L1, OR) {
1429 j_popTriggerQueue;
1430 kd_wakeUpDependents;
1431 }
1432
1433 transition(MT, Complete_L2_to_L1, MR) {
1434 j_popTriggerQueue;
1435 kd_wakeUpDependents;
1436 }
1437
1438 transition(MMT, Complete_L2_to_L1, MMR) {
1439 j_popTriggerQueue;
1440 kd_wakeUpDependents;
1441 }
1442
1443 // Transitions from Idle
1444 transition({I,IR}, Load, IS) {
1445 ii_allocateL1DCacheBlock;
1446 i_allocateTBE;
1447 a_issueGETS;
1448 uu_profileL1DataMiss;
1449 uu_profileL2Miss;
1450 k_popMandatoryQueue;
1451 }
1452
1453 transition({I,IR}, Ifetch, IS) {
1454 jj_allocateL1ICacheBlock;
1455 i_allocateTBE;
1456 a_issueGETS;
1457 uu_profileL1InstMiss;
1458 uu_profileL2Miss;
1459 k_popMandatoryQueue;
1460 }
1461
1462 transition({I,IR}, Store, IM) {
1463 ii_allocateL1DCacheBlock;
1464 i_allocateTBE;
1465 b_issueGETX;
1466 uu_profileL1DataMiss;
1467 uu_profileL2Miss;
1468 k_popMandatoryQueue;
1469 }
1470
1471 transition({I, IR}, Flush_line, IM_F) {
1472 it_allocateTBE;
1473 bf_issueGETF;
1474 k_popMandatoryQueue;
1475 }
1476
1477 transition(I, L2_Replacement) {
1478 rr_deallocateL2CacheBlock;
1479 ka_wakeUpAllDependents;
1480 }
1481
1482 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1483 f_sendAck;
1484 l_popForwardQueue;
1485 }
1486
1487 // Transitions from Shared
1488 transition({S, SM, ISM}, Load) {
1489 h_load_hit;
1490 uu_profileL1DataHit;
1491 k_popMandatoryQueue;
1492 }
1493
1494 transition({S, SM, ISM}, Ifetch) {
1495 h_load_hit;
1496 uu_profileL1InstHit;
1497 k_popMandatoryQueue;
1498 }
1499
1500 transition(SR, Load, S) {
1501 h_load_hit;
1502 uu_profileL1DataMiss;
1503 uu_profileL2Hit;
1504 k_popMandatoryQueue;
1505 ka_wakeUpAllDependents;
1506 }
1507
1508 transition(SR, Ifetch, S) {
1509 h_load_hit;
1510 uu_profileL1InstMiss;
1511 uu_profileL2Hit;
1512 k_popMandatoryQueue;
1513 ka_wakeUpAllDependents;
1514 }
1515
1516 transition({S,SR}, Store, SM) {
1517 i_allocateTBE;
1518 b_issueGETX;
1519 uu_profileL1DataMiss;
1520 uu_profileL2Miss;
1521 k_popMandatoryQueue;
1522 }
1523
1524 transition({S, SR}, Flush_line, SM_F) {
1525 i_allocateTBE;
1526 bf_issueGETF;
1527 forward_eviction_to_cpu;
1528 gg_deallocateL1CacheBlock;
1529 k_popMandatoryQueue;
1530 }
1531
1532 transition(S, L2_Replacement, I) {
1533 forward_eviction_to_cpu;
1534 rr_deallocateL2CacheBlock;
1535 ka_wakeUpAllDependents;
1536 }
1537
1538 transition(S, {Other_GETX, Invalidate}, I) {
1539 f_sendAck;
1540 forward_eviction_to_cpu;
1541 l_popForwardQueue;
1542 }
1543
1544 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1545 ff_sendAckShared;
1546 l_popForwardQueue;
1547 }
1548
1549 // Transitions from Owned
1550 transition({O, OM, SS, MM_W, M_W}, {Load}) {
1551 h_load_hit;
1552 uu_profileL1DataHit;
1553 k_popMandatoryQueue;
1554 }
1555
1556 transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
1557 h_load_hit;
1558 uu_profileL1InstHit;
1559 k_popMandatoryQueue;
1560 }
1561
1562 transition(OR, Load, O) {
1563 h_load_hit;
1564 uu_profileL1DataMiss;
1565 uu_profileL2Hit;
1566 k_popMandatoryQueue;
1567 ka_wakeUpAllDependents;
1568 }
1569
1570 transition(OR, Ifetch, O) {
1571 h_load_hit;
1572 uu_profileL1InstMiss;
1573 uu_profileL2Hit;
1574 k_popMandatoryQueue;
1575 ka_wakeUpAllDependents;
1576 }
1577
1578 transition({O,OR}, Store, OM) {
1579 i_allocateTBE;
1580 b_issueGETX;
1581 p_decrementNumberOfMessagesByOne;
1582 uu_profileL1DataMiss;
1583 uu_profileL2Miss;
1584 k_popMandatoryQueue;
1585 }
1586
1587 transition({O, OR}, Flush_line, OM_F) {
1588 i_allocateTBE;
1589 bf_issueGETF;
1590 p_decrementNumberOfMessagesByOne;
1591 forward_eviction_to_cpu;
1592 gg_deallocateL1CacheBlock;
1593 k_popMandatoryQueue;
1594 }
1595
1596 transition(O, L2_Replacement, OI) {
1597 i_allocateTBE;
1598 d_issuePUT;
1599 forward_eviction_to_cpu;
1600 rr_deallocateL2CacheBlock;
1601 ka_wakeUpAllDependents;
1602 }
1603
1604 transition(O, {Other_GETX, Invalidate}, I) {
1605 e_sendData;
1606 forward_eviction_to_cpu;
1607 l_popForwardQueue;
1608 }
1609
1610 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1611 ee_sendDataShared;
1612 l_popForwardQueue;
1613 }
1614
1615 transition(O, Merged_GETS) {
1616 em_sendDataSharedMultiple;
1617 l_popForwardQueue;
1618 }
1619
1620 // Transitions from Modified
1621 transition({MM, M}, {Ifetch}) {
1622 h_load_hit;
1623 uu_profileL1InstHit;
1624 k_popMandatoryQueue;
1625 }
1626
1627 transition({MM, M}, {Load}) {
1628 h_load_hit;
1629 uu_profileL1DataHit;
1630 k_popMandatoryQueue;
1631 }
1632
1633 transition(MM, Store) {
1634 hh_store_hit;
1635 uu_profileL1DataHit;
1636 k_popMandatoryQueue;
1637 }
1638
1639 transition(MMR, Load, MM) {
1640 h_load_hit;
1641 uu_profileL1DataMiss;
1642 uu_profileL2Hit;
1643 k_popMandatoryQueue;
1644 ka_wakeUpAllDependents;
1645 }
1646
1647 transition(MMR, Ifetch, MM) {
1648 h_load_hit;
1649 uu_profileL1InstMiss;
1650 uu_profileL2Hit;
1651 k_popMandatoryQueue;
1652 ka_wakeUpAllDependents;
1653 }
1654
1655 transition(MMR, Store, MM) {
1656 hh_store_hit;
1657 uu_profileL1DataMiss;
1658 uu_profileL2Hit;
1659 k_popMandatoryQueue;
1660 ka_wakeUpAllDependents;
1661 }
1662
1663 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1664 i_allocateTBE;
1665 bf_issueGETF;
1666 p_decrementNumberOfMessagesByOne;
1667 forward_eviction_to_cpu;
1668 gg_deallocateL1CacheBlock;
1669 k_popMandatoryQueue;
1670 }
1671
1672 transition(MM_F, Block_Ack, MI_F) {
1673 df_issuePUTF;
1674 l_popForwardQueue;
1675 kd_wakeUpDependents;
1676 }
1677
1678 transition(MM, L2_Replacement, MI) {
1679 i_allocateTBE;
1680 d_issuePUT;
1681 forward_eviction_to_cpu;
1682 rr_deallocateL2CacheBlock;
1683 ka_wakeUpAllDependents;
1684 }
1685
1686 transition(MM, {Other_GETX, Invalidate}, I) {
1687 c_sendExclusiveData;
1688 forward_eviction_to_cpu;
1689 l_popForwardQueue;
1690 }
1691
1692 transition(MM, Other_GETS, I) {
1693 c_sendExclusiveData;
1694 forward_eviction_to_cpu;
1695 l_popForwardQueue;
1696 }
1697
1698 transition(MM, NC_DMA_GETS, O) {
1699 ee_sendDataShared;
1700 l_popForwardQueue;
1701 }
1702
1703 transition(MM, Other_GETS_No_Mig, O) {
1704 ee_sendDataShared;
1705 l_popForwardQueue;
1706 }
1707
1708 transition(MM, Merged_GETS, O) {
1709 em_sendDataSharedMultiple;
1710 l_popForwardQueue;
1711 }
1712
1713 // Transitions from Dirty Exclusive
1714 transition(M, Store, MM) {
1715 hh_store_hit;
1716 uu_profileL1DataHit;
1717 k_popMandatoryQueue;
1718 }
1719
1720 transition(MR, Load, M) {
1721 h_load_hit;
1722 uu_profileL1DataMiss;
1723 uu_profileL2Hit;
1724 k_popMandatoryQueue;
1725 ka_wakeUpAllDependents;
1726 }
1727
1728 transition(MR, Ifetch, M) {
1729 h_load_hit;
1730 uu_profileL1InstMiss;
1731 uu_profileL2Hit;
1732 k_popMandatoryQueue;
1733 ka_wakeUpAllDependents;
1734 }
1735
1736 transition(MR, Store, MM) {
1737 hh_store_hit;
1738 uu_profileL1DataMiss;
1739 uu_profileL2Hit;
1740 k_popMandatoryQueue;
1741 ka_wakeUpAllDependents;
1742 }
1743
1744 transition(M, L2_Replacement, MI) {
1745 i_allocateTBE;
1746 d_issuePUT;
1747 forward_eviction_to_cpu;
1748 rr_deallocateL2CacheBlock;
1749 ka_wakeUpAllDependents;
1750 }
1751
1752 transition(M, {Other_GETX, Invalidate}, I) {
1753 c_sendExclusiveData;
1754 forward_eviction_to_cpu;
1755 l_popForwardQueue;
1756 }
1757
1758 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1759 ee_sendDataShared;
1760 l_popForwardQueue;
1761 }
1762
1763 transition(M, NC_DMA_GETS, O) {
1764 ee_sendDataShared;
1765 l_popForwardQueue;
1766 }
1767
1768 transition(M, Merged_GETS, O) {
1769 em_sendDataSharedMultiple;
1770 l_popForwardQueue;
1771 }
1772
1773 // Transitions from IM
1774
1775 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1776 f_sendAck;
1777 l_popForwardQueue;
1778 }
1779
1780 transition({IM, IM_F, MM_F}, Ack) {
1781 m_decrementNumberOfMessages;
1782 o_checkForCompletion;
1783 n_popResponseQueue;
1784 }
1785
1786 transition(IM, Data, ISM) {
1787 u_writeDataToCache;
1788 m_decrementNumberOfMessages;
1789 o_checkForCompletion;
1790 n_popResponseQueue;
1791 }
1792
1793 transition(IM_F, Data, ISM_F) {
1794 uf_writeDataToCacheTBE;
1795 m_decrementNumberOfMessages;
1796 o_checkForCompletion;
1797 n_popResponseQueue;
1798 }
1799
1800 transition(IM, Exclusive_Data, MM_W) {
1801 u_writeDataToCache;
1802 m_decrementNumberOfMessages;
1803 o_checkForCompletion;
1804 sx_external_store_hit;
1805 n_popResponseQueue;
1806 kd_wakeUpDependents;
1807 }
1808
1809 transition(IM_F, Exclusive_Data, MM_WF) {
1810 uf_writeDataToCacheTBE;
1811 m_decrementNumberOfMessages;
1812 o_checkForCompletion;
1813 n_popResponseQueue;
1814 }
1815
1816 // Transitions from SM
1817 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1818 ff_sendAckShared;
1819 l_popForwardQueue;
1820 }
1821
1822 transition(SM, {Other_GETX, Invalidate}, IM) {
1823 f_sendAck;
1824 forward_eviction_to_cpu;
1825 l_popForwardQueue;
1826 }
1827
1828 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1829 f_sendAck;
1830 forward_eviction_to_cpu;
1831 l_popForwardQueue;
1832 }
1833
1834 transition({SM, SM_F}, Ack) {
1835 m_decrementNumberOfMessages;
1836 o_checkForCompletion;
1837 n_popResponseQueue;
1838 }
1839
1840 transition(SM, {Data, Exclusive_Data}, ISM) {
1841 v_writeDataToCacheVerify;
1842 m_decrementNumberOfMessages;
1843 o_checkForCompletion;
1844 n_popResponseQueue;
1845 }
1846
1847 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1848 vt_writeDataToTBEVerify;
1849 m_decrementNumberOfMessages;
1850 o_checkForCompletion;
1851 n_popResponseQueue;
1852 }
1853
1854 // Transitions from ISM
1855 transition({ISM, ISM_F}, Ack) {
1856 m_decrementNumberOfMessages;
1857 o_checkForCompletion;
1858 n_popResponseQueue;
1859 }
1860
1861 transition(ISM, All_acks_no_sharers, MM) {
1862 sxt_trig_ext_store_hit;
1863 gm_sendUnblockM;
1864 s_deallocateTBE;
1865 j_popTriggerQueue;
1866 kd_wakeUpDependents;
1867 }
1868
1869 transition(ISM_F, All_acks_no_sharers, MI_F) {
1870 df_issuePUTF;
1871 j_popTriggerQueue;
1872 kd_wakeUpDependents;
1873 }
1874
1875 // Transitions from OM
1876
1877 transition(OM, {Other_GETX, Invalidate}, IM) {
1878 e_sendData;
1879 pp_incrementNumberOfMessagesByOne;
1880 forward_eviction_to_cpu;
1881 l_popForwardQueue;
1882 }
1883
1884 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1885 q_sendDataFromTBEToCache;
1886 pp_incrementNumberOfMessagesByOne;
1887 forward_eviction_to_cpu;
1888 l_popForwardQueue;
1889 }
1890
1891 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1892 ee_sendDataShared;
1893 l_popForwardQueue;
1894 }
1895
1896 transition(OM, Merged_GETS) {
1897 em_sendDataSharedMultiple;
1898 l_popForwardQueue;
1899 }
1900
1901 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1902 et_sendDataSharedFromTBE;
1903 l_popForwardQueue;
1904 }
1905
1906 transition(OM_F, Merged_GETS) {
1907 emt_sendDataSharedMultipleFromTBE;
1908 l_popForwardQueue;
1909 }
1910
1911 transition({OM, OM_F}, Ack) {
1912 m_decrementNumberOfMessages;
1913 o_checkForCompletion;
1914 n_popResponseQueue;
1915 }
1916
1917 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1918 sxt_trig_ext_store_hit;
1919 gm_sendUnblockM;
1920 s_deallocateTBE;
1921 j_popTriggerQueue;
1922 kd_wakeUpDependents;
1923 }
1924
1925 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1926 df_issuePUTF;
1927 j_popTriggerQueue;
1928 kd_wakeUpDependents;
1929 }
1930 // Transitions from IS
1931
1932 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1933 f_sendAck;
1934 l_popForwardQueue;
1935 }
1936
1937 transition(IS, Ack) {
1938 m_decrementNumberOfMessages;
1939 o_checkForCompletion;
1940 n_popResponseQueue;
1941 }
1942
1943 transition(IS, Shared_Ack) {
1944 m_decrementNumberOfMessages;
1945 r_setSharerBit;
1946 o_checkForCompletion;
1947 n_popResponseQueue;
1948 }
1949
1950 transition(IS, Data, SS) {
1951 u_writeDataToCache;
1952 m_decrementNumberOfMessages;
1953 o_checkForCompletion;
1954 hx_external_load_hit;
1955 uo_updateCurrentOwner;
1956 n_popResponseQueue;
1957 kd_wakeUpDependents;
1958 }
1959
1960 transition(IS, Exclusive_Data, M_W) {
1961 u_writeDataToCache;
1962 m_decrementNumberOfMessages;
1963 o_checkForCompletion;
1964 hx_external_load_hit;
1965 n_popResponseQueue;
1966 kd_wakeUpDependents;
1967 }
1968
1969 transition(IS, Shared_Data, SS) {
1970 u_writeDataToCache;
1971 r_setSharerBit;
1972 m_decrementNumberOfMessages;
1973 o_checkForCompletion;
1974 hx_external_load_hit;
1975 uo_updateCurrentOwner;
1976 n_popResponseQueue;
1977 kd_wakeUpDependents;
1978 }
1979
1980 // Transitions from SS
1981
1982 transition(SS, Ack) {
1983 m_decrementNumberOfMessages;
1984 o_checkForCompletion;
1985 n_popResponseQueue;
1986 }
1987
1988 transition(SS, Shared_Ack) {
1989 m_decrementNumberOfMessages;
1990 r_setSharerBit;
1991 o_checkForCompletion;
1992 n_popResponseQueue;
1993 }
1994
1995 transition(SS, All_acks, S) {
1996 gs_sendUnblockS;
1997 s_deallocateTBE;
1998 j_popTriggerQueue;
1999 kd_wakeUpDependents;
2000 }
2001
2002 transition(SS, All_acks_no_sharers, S) {
2003 // Note: The directory might still be the owner, so that is why we go to S
2004 gs_sendUnblockS;
2005 s_deallocateTBE;
2006 j_popTriggerQueue;
2007 kd_wakeUpDependents;
2008 }
2009
2010 // Transitions from MM_W
2011
2012 transition(MM_W, Store) {
2013 hh_store_hit;
2014 uu_profileL1DataHit;
2015 k_popMandatoryQueue;
2016 }
2017
2018 transition({MM_W, MM_WF}, Ack) {
2019 m_decrementNumberOfMessages;
2020 o_checkForCompletion;
2021 n_popResponseQueue;
2022 }
2023
2024 transition(MM_W, All_acks_no_sharers, MM) {
2025 gm_sendUnblockM;
2026 s_deallocateTBE;
2027 j_popTriggerQueue;
2028 kd_wakeUpDependents;
2029 }
2030
2031 transition(MM_WF, All_acks_no_sharers, MI_F) {
2032 df_issuePUTF;
2033 j_popTriggerQueue;
2034 kd_wakeUpDependents;
2035 }
2036 // Transitions from M_W
2037
2038 transition(M_W, Store, MM_W) {
2039 hh_store_hit;
2040 uu_profileL1DataHit;
2041 k_popMandatoryQueue;
2042 }
2043
2044 transition(M_W, Ack) {
2045 m_decrementNumberOfMessages;
2046 o_checkForCompletion;
2047 n_popResponseQueue;
2048 }
2049
2050 transition(M_W, All_acks_no_sharers, M) {
2051 gm_sendUnblockM;
2052 s_deallocateTBE;
2053 j_popTriggerQueue;
2054 kd_wakeUpDependents;
2055 }
2056
2057 // Transitions from OI/MI
2058
2059 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
2060 q_sendDataFromTBEToCache;
2061 l_popForwardQueue;
2062 }
2063
2064 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
2065 sq_sendSharedDataFromTBEToCache;
2066 l_popForwardQueue;
2067 }
2068
2069 transition({OI, MI}, Merged_GETS, OI) {
2070 qm_sendDataFromTBEToCache;
2071 l_popForwardQueue;
2072 }
2073
2074 transition(MI, Writeback_Ack, I) {
2075 t_sendExclusiveDataFromTBEToMemory;
2076 s_deallocateTBE;
2077 l_popForwardQueue;
2078 kd_wakeUpDependents;
2079 }
2080
2081 transition(MI_F, Writeback_Ack, I) {
2082 hh_flush_hit;
2083 t_sendExclusiveDataFromTBEToMemory;
2084 s_deallocateTBE;
2085 l_popForwardQueue;
2086 kd_wakeUpDependents;
2087 }
2088
2089 transition(OI, Writeback_Ack, I) {
2090 qq_sendDataFromTBEToMemory;
2091 s_deallocateTBE;
2092 l_popForwardQueue;
2093 kd_wakeUpDependents;
2094 }
2095
2096 // Transitions from II
2097 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2098 f_sendAck;
2099 l_popForwardQueue;
2100 }
2101
2102 transition(II, Writeback_Ack, I) {
2103 g_sendUnblock;
2104 s_deallocateTBE;
2105 l_popForwardQueue;
2106 kd_wakeUpDependents;
2107 }
2108
2109 transition(II, Writeback_Nack, I) {
2110 s_deallocateTBE;
2111 l_popForwardQueue;
2112 kd_wakeUpDependents;
2113 }
2114
2115 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2116 ct_sendExclusiveDataFromTBE;
2117 pp_incrementNumberOfMessagesByOne;
2118 l_popForwardQueue;
2119 }
2120
2121 transition(MM_F, Other_GETS, IM_F) {
2122 ct_sendExclusiveDataFromTBE;
2123 pp_incrementNumberOfMessagesByOne;
2124 l_popForwardQueue;
2125 }
2126
2127 transition(MM_F, NC_DMA_GETS, OM_F) {
2128 sq_sendSharedDataFromTBEToCache;
2129 l_popForwardQueue;
2130 }
2131
2132 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2133 et_sendDataSharedFromTBE;
2134 l_popForwardQueue;
2135 }
2136
2137 transition(MM_F, Merged_GETS, OM_F) {
2138 emt_sendDataSharedMultipleFromTBE;
2139 l_popForwardQueue;
2140 }
2141 }