5d23835410b3067de34544f778e33360fc56a701
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine({L1Cache, L2Cache}, "AMD Hammer-like protocol")
37 : Sequencer * sequencer;
38 CacheMemory * L1Icache;
39 CacheMemory * L1Dcache;
40 CacheMemory * L2cache;
41 Cycles cache_response_latency := 10;
42 Cycles issue_latency := 2;
43 Cycles l2_cache_hit_latency := 10;
44 bool no_mig_atomic := "True";
45 bool send_evictions;
46
47 // NETWORK BUFFERS
48 MessageBuffer * requestFromCache, network="To", virtual_network="2",
49 vnet_type="request";
50 MessageBuffer * responseFromCache, network="To", virtual_network="4",
51 vnet_type="response";
52 MessageBuffer * unblockFromCache, network="To", virtual_network="5",
53 vnet_type="unblock";
54
55 MessageBuffer * forwardToCache, network="From", virtual_network="3",
56 vnet_type="forward";
57 MessageBuffer * responseToCache, network="From", virtual_network="4",
58 vnet_type="response";
59
60 MessageBuffer * mandatoryQueue;
61
62 MessageBuffer * triggerQueue;
63 {
64 // STATES
65 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
66 // Base states
67 I, AccessPermission:Invalid, desc="Idle";
68 S, AccessPermission:Read_Only, desc="Shared";
69 O, AccessPermission:Read_Only, desc="Owned";
70 M, AccessPermission:Read_Only, desc="Modified (dirty)";
71 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73 // Base states, locked and ready to service the mandatory queue
74 IR, AccessPermission:Invalid, desc="Idle";
75 SR, AccessPermission:Read_Only, desc="Shared";
76 OR, AccessPermission:Read_Only, desc="Owned";
77 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
78 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
79
80 // Transient States
81 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
82 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
83 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
84 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
85 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
86 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
87 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
88 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
89 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
90 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
91 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
92 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
93 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
94 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
95 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
96 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
97
98 //Transition States Related to Flushing
99 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
100 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
101 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
102 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
103 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
104 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
105 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
106 }
107
108 // EVENTS
109 enumeration(Event, desc="Cache events") {
110 Load, desc="Load request from the processor";
111 Ifetch, desc="I-fetch request from the processor";
112 Store, desc="Store request from the processor";
113 L2_Replacement, desc="L2 Replacement";
114 L1_to_L2, desc="L1 to L2 transfer";
115 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
116 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
117 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
118
119 // Requests
120 Other_GETX, desc="A GetX from another processor";
121 Other_GETS, desc="A GetS from another processor";
122 Merged_GETS, desc="A Merged GetS from another processor";
123 Other_GETS_No_Mig, desc="A GetS from another processor";
124 NC_DMA_GETS, desc="special GetS when only DMA exists";
125 Invalidate, desc="Invalidate block";
126
127 // Responses
128 Ack, desc="Received an ack message";
129 Shared_Ack, desc="Received an ack message, responder has a shared copy";
130 Data, desc="Received a data message";
131 Shared_Data, desc="Received a data message, responder has a shared copy";
132 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
133
134 Writeback_Ack, desc="Writeback O.K. from directory";
135 Writeback_Nack, desc="Writeback not O.K. from directory";
136
137 // Triggers
138 All_acks, desc="Received all required data and message acks";
139 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
140
141 // For Flush
142 Flush_line, desc="flush the cache line from all caches";
143 Block_Ack, desc="the directory is blocked and ready for the flush";
144 }
145
146 // STRUCTURE DEFINITIONS
147 // CacheEntry
148 structure(Entry, desc="...", interface="AbstractCacheEntry") {
149 State CacheState, desc="cache state";
150 bool Dirty, desc="Is the data dirty (different than memory)?";
151 DataBlock DataBlk, desc="data for the block";
152 bool FromL2, default="false", desc="block just moved from L2";
153 bool AtomicAccessed, default="false", desc="block just moved from L2";
154 }
155
156 // TBE fields
157 structure(TBE, desc="...") {
158 State TBEState, desc="Transient state";
159 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
160 bool Dirty, desc="Is the data dirty (different than memory)?";
161 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
162 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
163 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
164 MachineID LastResponder, desc="last machine to send a response for this request";
165 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
166
167 Cycles InitialRequestTime, default="Cycles(0)",
168 desc="time the initial requests was sent from the L1Cache";
169 Cycles ForwardRequestTime, default="Cycles(0)",
170 desc="time the dir forwarded the request";
171 Cycles FirstResponseTime, default="Cycles(0)",
172 desc="the time the first response was received";
173 }
174
175 structure(TBETable, external="yes") {
176 TBE lookup(Addr);
177 void allocate(Addr);
178 void deallocate(Addr);
179 bool isPresent(Addr);
180 }
181
182 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
183
184 Tick clockEdge();
185 void set_cache_entry(AbstractCacheEntry b);
186 void unset_cache_entry();
187 void set_tbe(TBE b);
188 void unset_tbe();
189 void wakeUpAllBuffers();
190 void wakeUpBuffers(Addr a);
191 Cycles curCycle();
192
193 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
194 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
195 if(is_valid(L2cache_entry)) {
196 return L2cache_entry;
197 }
198
199 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
200 if(is_valid(L1Dcache_entry)) {
201 return L1Dcache_entry;
202 }
203
204 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
205 return L1Icache_entry;
206 }
207
208 void functionalRead(Addr addr, Packet *pkt) {
209 Entry cache_entry := getCacheEntry(addr);
210 if(is_valid(cache_entry)) {
211 testAndRead(addr, cache_entry.DataBlk, pkt);
212 } else {
213 TBE tbe := TBEs[addr];
214 if(is_valid(tbe)) {
215 testAndRead(addr, tbe.DataBlk, pkt);
216 } else {
217 error("Missing data block");
218 }
219 }
220 }
221
222 int functionalWrite(Addr addr, Packet *pkt) {
223 int num_functional_writes := 0;
224
225 Entry cache_entry := getCacheEntry(addr);
226 if(is_valid(cache_entry)) {
227 num_functional_writes := num_functional_writes +
228 testAndWrite(addr, cache_entry.DataBlk, pkt);
229 return num_functional_writes;
230 }
231
232 TBE tbe := TBEs[addr];
233 num_functional_writes := num_functional_writes +
234 testAndWrite(addr, tbe.DataBlk, pkt);
235 return num_functional_writes;
236 }
237
238 Entry getL2CacheEntry(Addr address), return_by_pointer="yes" {
239 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
240 return L2cache_entry;
241 }
242
243 Entry getL1DCacheEntry(Addr address), return_by_pointer="yes" {
244 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
245 return L1Dcache_entry;
246 }
247
248 Entry getL1ICacheEntry(Addr address), return_by_pointer="yes" {
249 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
250 return L1Icache_entry;
251 }
252
253 State getState(TBE tbe, Entry cache_entry, Addr addr) {
254 if(is_valid(tbe)) {
255 return tbe.TBEState;
256 } else if (is_valid(cache_entry)) {
257 return cache_entry.CacheState;
258 }
259 return State:I;
260 }
261
262 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
263 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
264 assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
265 assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
266
267 if (is_valid(tbe)) {
268 tbe.TBEState := state;
269 }
270
271 if (is_valid(cache_entry)) {
272 cache_entry.CacheState := state;
273 }
274 }
275
276 AccessPermission getAccessPermission(Addr addr) {
277 TBE tbe := TBEs[addr];
278 if(is_valid(tbe)) {
279 return L1Cache_State_to_permission(tbe.TBEState);
280 }
281
282 Entry cache_entry := getCacheEntry(addr);
283 if(is_valid(cache_entry)) {
284 return L1Cache_State_to_permission(cache_entry.CacheState);
285 }
286
287 return AccessPermission:NotPresent;
288 }
289
290 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
291 if (is_valid(cache_entry)) {
292 cache_entry.changePermission(L1Cache_State_to_permission(state));
293 }
294 }
295
296 Event mandatory_request_type_to_event(RubyRequestType type) {
297 if (type == RubyRequestType:LD) {
298 return Event:Load;
299 } else if (type == RubyRequestType:IFETCH) {
300 return Event:Ifetch;
301 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
302 return Event:Store;
303 } else if ((type == RubyRequestType:FLUSH)) {
304 return Event:Flush_line;
305 } else {
306 error("Invalid RubyRequestType");
307 }
308 }
309
310 MachineType testAndClearLocalHit(Entry cache_entry) {
311 if (is_valid(cache_entry) && cache_entry.FromL2) {
312 cache_entry.FromL2 := false;
313 return MachineType:L2Cache;
314 }
315 return MachineType:L1Cache;
316 }
317
318 bool IsAtomicAccessed(Entry cache_entry) {
319 assert(is_valid(cache_entry));
320 return cache_entry.AtomicAccessed;
321 }
322
323 // ** OUT_PORTS **
324 out_port(requestNetwork_out, RequestMsg, requestFromCache);
325 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
326 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
327 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
328
329 // ** IN_PORTS **
330
331 // Trigger Queue
332 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
333 if (triggerQueue_in.isReady(clockEdge())) {
334 peek(triggerQueue_in, TriggerMsg) {
335
336 Entry cache_entry := getCacheEntry(in_msg.addr);
337 TBE tbe := TBEs[in_msg.addr];
338
339 if (in_msg.Type == TriggerType:L2_to_L1) {
340 trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
341 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
342 trigger(Event:All_acks, in_msg.addr, cache_entry, tbe);
343 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
344 trigger(Event:All_acks_no_sharers, in_msg.addr, cache_entry, tbe);
345 } else {
346 error("Unexpected message");
347 }
348 }
349 }
350 }
351
352 // Nothing from the unblock network
353
354 // Response Network
355 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
356 if (responseToCache_in.isReady(clockEdge())) {
357 peek(responseToCache_in, ResponseMsg, block_on="addr") {
358
359 Entry cache_entry := getCacheEntry(in_msg.addr);
360 TBE tbe := TBEs[in_msg.addr];
361
362 if (in_msg.Type == CoherenceResponseType:ACK) {
363 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
364 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
365 trigger(Event:Shared_Ack, in_msg.addr, cache_entry, tbe);
366 } else if (in_msg.Type == CoherenceResponseType:DATA) {
367 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
368 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
369 trigger(Event:Shared_Data, in_msg.addr, cache_entry, tbe);
370 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
371 trigger(Event:Exclusive_Data, in_msg.addr, cache_entry, tbe);
372 } else {
373 error("Unexpected message");
374 }
375 }
376 }
377 }
378
379 // Forward Network
380 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
381 if (forwardToCache_in.isReady(clockEdge())) {
382 peek(forwardToCache_in, RequestMsg, block_on="addr") {
383
384 Entry cache_entry := getCacheEntry(in_msg.addr);
385 TBE tbe := TBEs[in_msg.addr];
386
387 if ((in_msg.Type == CoherenceRequestType:GETX) ||
388 (in_msg.Type == CoherenceRequestType:GETF)) {
389 trigger(Event:Other_GETX, in_msg.addr, cache_entry, tbe);
390 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
391 trigger(Event:Merged_GETS, in_msg.addr, cache_entry, tbe);
392 } else if (in_msg.Type == CoherenceRequestType:GETS) {
393 if (machineCount(MachineType:L1Cache) > 1) {
394 if (is_valid(cache_entry)) {
395 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
396 trigger(Event:Other_GETS_No_Mig, in_msg.addr, cache_entry, tbe);
397 } else {
398 trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
399 }
400 } else {
401 trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
402 }
403 } else {
404 trigger(Event:NC_DMA_GETS, in_msg.addr, cache_entry, tbe);
405 }
406 } else if (in_msg.Type == CoherenceRequestType:INV) {
407 trigger(Event:Invalidate, in_msg.addr, cache_entry, tbe);
408 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
409 trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
410 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
411 trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
412 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
413 trigger(Event:Block_Ack, in_msg.addr, cache_entry, tbe);
414 } else {
415 error("Unexpected message");
416 }
417 }
418 }
419 }
420
421 // Nothing from the request network
422
423 // Mandatory Queue
424 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
425 if (mandatoryQueue_in.isReady(clockEdge())) {
426 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
427
428 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
429 TBE tbe := TBEs[in_msg.LineAddress];
430
431 if (in_msg.Type == RubyRequestType:IFETCH) {
432 // ** INSTRUCTION ACCESS ***
433
434 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
435 if (is_valid(L1Icache_entry)) {
436 // The tag matches for the L1, so the L1 fetches the line.
437 // We know it can't be in the L2 due to exclusion
438 trigger(mandatory_request_type_to_event(in_msg.Type),
439 in_msg.LineAddress, L1Icache_entry, tbe);
440 } else {
441 // Check to see if it is in the OTHER L1
442 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
443 if (is_valid(L1Dcache_entry)) {
444 // The block is in the wrong L1, try to write it to the L2
445 if (L2cache.cacheAvail(in_msg.LineAddress)) {
446 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
447 } else {
448 Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
449 trigger(Event:L2_Replacement,
450 l2_victim_addr,
451 getL2CacheEntry(l2_victim_addr),
452 TBEs[l2_victim_addr]);
453 }
454 }
455
456 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
457 // L1 does't have the line, but we have space for it in the L1
458
459 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
460 if (is_valid(L2cache_entry)) {
461 // L2 has it (maybe not with the right permissions)
462 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
463 L2cache_entry, tbe);
464 } else {
465 // We have room, the L2 doesn't have it, so the L1 fetches the line
466 trigger(mandatory_request_type_to_event(in_msg.Type),
467 in_msg.LineAddress, L1Icache_entry, tbe);
468 }
469 } else {
470 // No room in the L1, so we need to make room
471 Addr l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
472 if (L2cache.cacheAvail(l1i_victim_addr)) {
473 // The L2 has room, so we move the line from the L1 to the L2
474 trigger(Event:L1_to_L2,
475 l1i_victim_addr,
476 getL1ICacheEntry(l1i_victim_addr),
477 TBEs[l1i_victim_addr]);
478 } else {
479 Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
480 // The L2 does not have room, so we replace a line from the L2
481 trigger(Event:L2_Replacement,
482 l2_victim_addr,
483 getL2CacheEntry(l2_victim_addr),
484 TBEs[l2_victim_addr]);
485 }
486 }
487 }
488 } else {
489 // *** DATA ACCESS ***
490
491 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
492 if (is_valid(L1Dcache_entry)) {
493 // The tag matches for the L1, so the L1 fetches the line.
494 // We know it can't be in the L2 due to exclusion
495 trigger(mandatory_request_type_to_event(in_msg.Type),
496 in_msg.LineAddress, L1Dcache_entry, tbe);
497 } else {
498
499 // Check to see if it is in the OTHER L1
500 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
501 if (is_valid(L1Icache_entry)) {
502 // The block is in the wrong L1, try to write it to the L2
503 if (L2cache.cacheAvail(in_msg.LineAddress)) {
504 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
505 } else {
506 Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
507 trigger(Event:L2_Replacement,
508 l2_victim_addr,
509 getL2CacheEntry(l2_victim_addr),
510 TBEs[l2_victim_addr]);
511 }
512 }
513
514 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
515 // L1 does't have the line, but we have space for it in the L1
516 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
517 if (is_valid(L2cache_entry)) {
518 // L2 has it (maybe not with the right permissions)
519 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
520 L2cache_entry, tbe);
521 } else {
522 // We have room, the L2 doesn't have it, so the L1 fetches the line
523 trigger(mandatory_request_type_to_event(in_msg.Type),
524 in_msg.LineAddress, L1Dcache_entry, tbe);
525 }
526 } else {
527 // No room in the L1, so we need to make room
528 Addr l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
529 if (L2cache.cacheAvail(l1d_victim_addr)) {
530 // The L2 has room, so we move the line from the L1 to the L2
531 trigger(Event:L1_to_L2,
532 l1d_victim_addr,
533 getL1DCacheEntry(l1d_victim_addr),
534 TBEs[l1d_victim_addr]);
535 } else {
536 Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
537 // The L2 does not have room, so we replace a line from the L2
538 trigger(Event:L2_Replacement,
539 l2_victim_addr,
540 getL2CacheEntry(l2_victim_addr),
541 TBEs[l2_victim_addr]);
542 }
543 }
544 }
545 }
546 }
547 }
548 }
549
550 // ACTIONS
551
552 action(a_issueGETS, "a", desc="Issue GETS") {
553 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
554 assert(is_valid(tbe));
555 out_msg.addr := address;
556 out_msg.Type := CoherenceRequestType:GETS;
557 out_msg.Requestor := machineID;
558 out_msg.Destination.add(map_Address_to_Directory(address));
559 out_msg.MessageSize := MessageSizeType:Request_Control;
560 out_msg.InitialRequestTime := curCycle();
561
562 // One from each other cache (n-1) plus the memory (+1)
563 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
564 }
565 }
566
567 action(b_issueGETX, "b", desc="Issue GETX") {
568 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
569 assert(is_valid(tbe));
570 out_msg.addr := address;
571 out_msg.Type := CoherenceRequestType:GETX;
572 out_msg.Requestor := machineID;
573 out_msg.Destination.add(map_Address_to_Directory(address));
574 out_msg.MessageSize := MessageSizeType:Request_Control;
575 out_msg.InitialRequestTime := curCycle();
576
577 // One from each other cache (n-1) plus the memory (+1)
578 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
579 }
580 }
581
582 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
583 if (machineCount(MachineType:L1Cache) > 1) {
584 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
585 assert(is_valid(tbe));
586 out_msg.addr := address;
587 out_msg.Type := CoherenceRequestType:GETX;
588 out_msg.Requestor := machineID;
589 out_msg.Destination.add(map_Address_to_Directory(address));
590 out_msg.MessageSize := MessageSizeType:Request_Control;
591 out_msg.InitialRequestTime := curCycle();
592 }
593 }
594
595 // One from each other cache (n-1) plus the memory (+1)
596 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
597 }
598
599 action(bf_issueGETF, "bf", desc="Issue GETF") {
600 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
601 assert(is_valid(tbe));
602 out_msg.addr := address;
603 out_msg.Type := CoherenceRequestType:GETF;
604 out_msg.Requestor := machineID;
605 out_msg.Destination.add(map_Address_to_Directory(address));
606 out_msg.MessageSize := MessageSizeType:Request_Control;
607 out_msg.InitialRequestTime := curCycle();
608
609 // One from each other cache (n-1) plus the memory (+1)
610 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
611 }
612 }
613
614 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
615 peek(forwardToCache_in, RequestMsg) {
616 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
617 assert(is_valid(cache_entry));
618 out_msg.addr := address;
619 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
620 out_msg.Sender := machineID;
621 out_msg.Destination.add(in_msg.Requestor);
622 out_msg.DataBlk := cache_entry.DataBlk;
623 out_msg.Dirty := cache_entry.Dirty;
624 if (in_msg.DirectedProbe) {
625 out_msg.Acks := machineCount(MachineType:L1Cache);
626 } else {
627 out_msg.Acks := 2;
628 }
629 out_msg.SilentAcks := in_msg.SilentAcks;
630 out_msg.MessageSize := MessageSizeType:Response_Data;
631 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
632 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
633 }
634 }
635 }
636
637 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
638 peek(forwardToCache_in, RequestMsg) {
639 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
640 assert(is_valid(tbe));
641 out_msg.addr := address;
642 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
643 out_msg.Sender := machineID;
644 out_msg.Destination.add(in_msg.Requestor);
645 out_msg.DataBlk := tbe.DataBlk;
646 out_msg.Dirty := tbe.Dirty;
647 if (in_msg.DirectedProbe) {
648 out_msg.Acks := machineCount(MachineType:L1Cache);
649 } else {
650 out_msg.Acks := 2;
651 }
652 out_msg.SilentAcks := in_msg.SilentAcks;
653 out_msg.MessageSize := MessageSizeType:Response_Data;
654 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
655 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
656 }
657 }
658 }
659
660 action(d_issuePUT, "d", desc="Issue PUT") {
661 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
662 out_msg.addr := address;
663 out_msg.Type := CoherenceRequestType:PUT;
664 out_msg.Requestor := machineID;
665 out_msg.Destination.add(map_Address_to_Directory(address));
666 out_msg.MessageSize := MessageSizeType:Writeback_Control;
667 }
668 }
669
670 action(df_issuePUTF, "df", desc="Issue PUTF") {
671 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
672 out_msg.addr := address;
673 out_msg.Type := CoherenceRequestType:PUTF;
674 out_msg.Requestor := machineID;
675 out_msg.Destination.add(map_Address_to_Directory(address));
676 out_msg.MessageSize := MessageSizeType:Writeback_Control;
677 }
678 }
679
680 action(e_sendData, "e", desc="Send data from cache to requestor") {
681 peek(forwardToCache_in, RequestMsg) {
682 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
683 assert(is_valid(cache_entry));
684 out_msg.addr := address;
685 out_msg.Type := CoherenceResponseType:DATA;
686 out_msg.Sender := machineID;
687 out_msg.Destination.add(in_msg.Requestor);
688 out_msg.DataBlk := cache_entry.DataBlk;
689 out_msg.Dirty := cache_entry.Dirty;
690 if (in_msg.DirectedProbe) {
691 out_msg.Acks := machineCount(MachineType:L1Cache);
692 } else {
693 out_msg.Acks := 2;
694 }
695 out_msg.SilentAcks := in_msg.SilentAcks;
696 out_msg.MessageSize := MessageSizeType:Response_Data;
697 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
698 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
699 }
700 }
701 }
702
703 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
704 peek(forwardToCache_in, RequestMsg) {
705 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
706 assert(is_valid(cache_entry));
707 out_msg.addr := address;
708 out_msg.Type := CoherenceResponseType:DATA_SHARED;
709 out_msg.Sender := machineID;
710 out_msg.Destination.add(in_msg.Requestor);
711 out_msg.DataBlk := cache_entry.DataBlk;
712 out_msg.Dirty := cache_entry.Dirty;
713 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
714 if (in_msg.DirectedProbe) {
715 out_msg.Acks := machineCount(MachineType:L1Cache);
716 } else {
717 out_msg.Acks := 2;
718 }
719 out_msg.SilentAcks := in_msg.SilentAcks;
720 out_msg.MessageSize := MessageSizeType:Response_Data;
721 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
722 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
723 }
724 }
725 }
726
727 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
728 peek(forwardToCache_in, RequestMsg) {
729 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
730 assert(is_valid(tbe));
731 out_msg.addr := address;
732 out_msg.Type := CoherenceResponseType:DATA_SHARED;
733 out_msg.Sender := machineID;
734 out_msg.Destination.add(in_msg.Requestor);
735 out_msg.DataBlk := tbe.DataBlk;
736 out_msg.Dirty := tbe.Dirty;
737 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
738 if (in_msg.DirectedProbe) {
739 out_msg.Acks := machineCount(MachineType:L1Cache);
740 } else {
741 out_msg.Acks := 2;
742 }
743 out_msg.SilentAcks := in_msg.SilentAcks;
744 out_msg.MessageSize := MessageSizeType:Response_Data;
745 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
746 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
747 }
748 }
749 }
750
751 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
752 peek(forwardToCache_in, RequestMsg) {
753 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
754 assert(is_valid(cache_entry));
755 out_msg.addr := address;
756 out_msg.Type := CoherenceResponseType:DATA_SHARED;
757 out_msg.Sender := machineID;
758 out_msg.Destination := in_msg.MergedRequestors;
759 out_msg.DataBlk := cache_entry.DataBlk;
760 out_msg.Dirty := cache_entry.Dirty;
761 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
762 out_msg.Acks := machineCount(MachineType:L1Cache);
763 out_msg.SilentAcks := in_msg.SilentAcks;
764 out_msg.MessageSize := MessageSizeType:Response_Data;
765 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
766 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
767 }
768 }
769 }
770
771 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
772 peek(forwardToCache_in, RequestMsg) {
773 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
774 assert(is_valid(tbe));
775 out_msg.addr := address;
776 out_msg.Type := CoherenceResponseType:DATA_SHARED;
777 out_msg.Sender := machineID;
778 out_msg.Destination := in_msg.MergedRequestors;
779 out_msg.DataBlk := tbe.DataBlk;
780 out_msg.Dirty := tbe.Dirty;
781 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
782 out_msg.Acks := machineCount(MachineType:L1Cache);
783 out_msg.SilentAcks := in_msg.SilentAcks;
784 out_msg.MessageSize := MessageSizeType:Response_Data;
785 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
786 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
787 }
788 }
789 }
790
791 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
792 peek(forwardToCache_in, RequestMsg) {
793 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
794 out_msg.addr := address;
795 out_msg.Type := CoherenceResponseType:ACK;
796 out_msg.Sender := machineID;
797 out_msg.Destination.add(in_msg.Requestor);
798 out_msg.Acks := 1;
799 out_msg.SilentAcks := in_msg.SilentAcks;
800 assert(in_msg.DirectedProbe == false);
801 out_msg.MessageSize := MessageSizeType:Response_Control;
802 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
803 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
804 }
805 }
806 }
807
808 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
809 peek(forwardToCache_in, RequestMsg) {
810 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
811 out_msg.addr := address;
812 out_msg.Type := CoherenceResponseType:ACK_SHARED;
813 out_msg.Sender := machineID;
814 out_msg.Destination.add(in_msg.Requestor);
815 out_msg.Acks := 1;
816 out_msg.SilentAcks := in_msg.SilentAcks;
817 assert(in_msg.DirectedProbe == false);
818 out_msg.MessageSize := MessageSizeType:Response_Control;
819 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
820 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
821 }
822 }
823 }
824
825 action(g_sendUnblock, "g", desc="Send unblock to memory") {
826 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
827 out_msg.addr := address;
828 out_msg.Type := CoherenceResponseType:UNBLOCK;
829 out_msg.Sender := machineID;
830 out_msg.Destination.add(map_Address_to_Directory(address));
831 out_msg.MessageSize := MessageSizeType:Unblock_Control;
832 }
833 }
834
835 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
836 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
837 out_msg.addr := address;
838 out_msg.Type := CoherenceResponseType:UNBLOCKM;
839 out_msg.Sender := machineID;
840 out_msg.Destination.add(map_Address_to_Directory(address));
841 out_msg.MessageSize := MessageSizeType:Unblock_Control;
842 }
843 }
844
845 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
846 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
847 assert(is_valid(tbe));
848 out_msg.addr := address;
849 out_msg.Type := CoherenceResponseType:UNBLOCKS;
850 out_msg.Sender := machineID;
851 out_msg.CurOwner := tbe.CurOwner;
852 out_msg.Destination.add(map_Address_to_Directory(address));
853 out_msg.MessageSize := MessageSizeType:Unblock_Control;
854 }
855 }
856
857 action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
858 assert(is_valid(cache_entry));
859 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
860 L1Dcache.setMRU(cache_entry);
861 sequencer.readCallback(address, cache_entry.DataBlk, false,
862 testAndClearLocalHit(cache_entry));
863 }
864
865 action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
866 assert(is_valid(cache_entry));
867 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
868 L1Icache.setMRU(cache_entry);
869 sequencer.readCallback(address, cache_entry.DataBlk, false,
870 testAndClearLocalHit(cache_entry));
871 }
872
873 action(hx_external_load_hit, "hx", desc="load required external msgs") {
874 assert(is_valid(cache_entry));
875 assert(is_valid(tbe));
876 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
877 peek(responseToCache_in, ResponseMsg) {
878 L1Icache.setMRU(address);
879 L1Dcache.setMRU(address);
880 sequencer.readCallback(address, cache_entry.DataBlk, true,
881 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
882 tbe.ForwardRequestTime, tbe.FirstResponseTime);
883 }
884 }
885
886 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
887 assert(is_valid(cache_entry));
888 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
889 peek(mandatoryQueue_in, RubyRequest) {
890 L1Dcache.setMRU(cache_entry);
891 sequencer.writeCallback(address, cache_entry.DataBlk, false,
892 testAndClearLocalHit(cache_entry));
893
894 cache_entry.Dirty := true;
895 if (in_msg.Type == RubyRequestType:ATOMIC) {
896 cache_entry.AtomicAccessed := true;
897 }
898 }
899 }
900
901 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
902 assert(is_valid(tbe));
903 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
904 sequencer.writeCallback(address, tbe.DataBlk, false, MachineType:L1Cache);
905 }
906
907 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
908 assert(is_valid(cache_entry));
909 assert(is_valid(tbe));
910 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
911 peek(responseToCache_in, ResponseMsg) {
912 L1Icache.setMRU(address);
913 L1Dcache.setMRU(address);
914 sequencer.writeCallback(address, cache_entry.DataBlk, true,
915 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
916 tbe.ForwardRequestTime, tbe.FirstResponseTime);
917 }
918 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
919 cache_entry.Dirty := true;
920 }
921
922 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
923 assert(is_valid(cache_entry));
924 assert(is_valid(tbe));
925 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
926 L1Icache.setMRU(address);
927 L1Dcache.setMRU(address);
928 sequencer.writeCallback(address, cache_entry.DataBlk, true,
929 machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
930 tbe.ForwardRequestTime, tbe.FirstResponseTime);
931
932 cache_entry.Dirty := true;
933 }
934
935 action(i_allocateTBE, "i", desc="Allocate TBE") {
936 check_allocate(TBEs);
937 assert(is_valid(cache_entry));
938 TBEs.allocate(address);
939 set_tbe(TBEs[address]);
940 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
941 tbe.Dirty := cache_entry.Dirty;
942 tbe.Sharers := false;
943 }
944
945 action(it_allocateTBE, "it", desc="Allocate TBE") {
946 check_allocate(TBEs);
947 TBEs.allocate(address);
948 set_tbe(TBEs[address]);
949 tbe.Dirty := false;
950 tbe.Sharers := false;
951 }
952
953 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
954 triggerQueue_in.dequeue(clockEdge());
955 }
956
957 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
958 mandatoryQueue_in.dequeue(clockEdge());
959 }
960
961 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
962 forwardToCache_in.dequeue(clockEdge());
963 }
964
965 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
966 assert(is_valid(cache_entry));
967 assert(is_valid(tbe));
968 cache_entry.Dirty := tbe.Dirty;
969 cache_entry.DataBlk := tbe.DataBlk;
970 }
971
972 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
973 assert(is_valid(cache_entry));
974 assert(is_valid(tbe));
975 cache_entry.Dirty := tbe.Dirty;
976 cache_entry.DataBlk := tbe.DataBlk;
977 cache_entry.FromL2 := true;
978 }
979
980 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
981 peek(responseToCache_in, ResponseMsg) {
982 assert(in_msg.Acks >= 0);
983 assert(is_valid(tbe));
984 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
985 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
986 if (tbe.AppliedSilentAcks == false) {
987 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
988 tbe.AppliedSilentAcks := true;
989 }
990 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
991 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
992 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
993 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
994 APPEND_TRANSITION_COMMENT(in_msg.Sender);
995 tbe.LastResponder := in_msg.Sender;
996 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
997 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
998 }
999 if (in_msg.InitialRequestTime != zero_time()) {
1000 tbe.InitialRequestTime := in_msg.InitialRequestTime;
1001 }
1002 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
1003 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
1004 }
1005 if (in_msg.ForwardRequestTime != zero_time()) {
1006 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
1007 }
1008 if (tbe.FirstResponseTime == zero_time()) {
1009 tbe.FirstResponseTime := curCycle();
1010 }
1011 }
1012 }
1013 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
1014 peek(responseToCache_in, ResponseMsg) {
1015 assert(is_valid(tbe));
1016 tbe.CurOwner := in_msg.Sender;
1017 }
1018 }
1019
1020 action(n_popResponseQueue, "n", desc="Pop response queue") {
1021 responseToCache_in.dequeue(clockEdge());
1022 }
1023
1024 action(ll_L2toL1Transfer, "ll", desc="") {
1025 enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
1026 out_msg.addr := address;
1027 out_msg.Type := TriggerType:L2_to_L1;
1028 }
1029 }
1030
1031 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1032 assert(is_valid(tbe));
1033 if (tbe.NumPendingMsgs == 0) {
1034 enqueue(triggerQueue_out, TriggerMsg) {
1035 out_msg.addr := address;
1036 if (tbe.Sharers) {
1037 out_msg.Type := TriggerType:ALL_ACKS;
1038 } else {
1039 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1040 }
1041 }
1042 }
1043 }
1044
1045 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1046 assert(is_valid(tbe));
1047 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1048 }
1049
1050 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1051 assert(is_valid(tbe));
1052 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1053 }
1054
1055 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1056 peek(forwardToCache_in, RequestMsg) {
1057 assert(in_msg.Requestor != machineID);
1058 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1059 assert(is_valid(tbe));
1060 out_msg.addr := address;
1061 out_msg.Type := CoherenceResponseType:DATA;
1062 out_msg.Sender := machineID;
1063 out_msg.Destination.add(in_msg.Requestor);
1064 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1065 out_msg.DataBlk := tbe.DataBlk;
1066 out_msg.Dirty := tbe.Dirty;
1067 if (in_msg.DirectedProbe) {
1068 out_msg.Acks := machineCount(MachineType:L1Cache);
1069 } else {
1070 out_msg.Acks := 2;
1071 }
1072 out_msg.SilentAcks := in_msg.SilentAcks;
1073 out_msg.MessageSize := MessageSizeType:Response_Data;
1074 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1075 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1076 }
1077 }
1078 }
1079
1080 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1081 peek(forwardToCache_in, RequestMsg) {
1082 assert(in_msg.Requestor != machineID);
1083 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1084 assert(is_valid(tbe));
1085 out_msg.addr := address;
1086 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1087 out_msg.Sender := machineID;
1088 out_msg.Destination.add(in_msg.Requestor);
1089 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1090 out_msg.DataBlk := tbe.DataBlk;
1091 out_msg.Dirty := tbe.Dirty;
1092 if (in_msg.DirectedProbe) {
1093 out_msg.Acks := machineCount(MachineType:L1Cache);
1094 } else {
1095 out_msg.Acks := 2;
1096 }
1097 out_msg.SilentAcks := in_msg.SilentAcks;
1098 out_msg.MessageSize := MessageSizeType:Response_Data;
1099 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1100 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1101 }
1102 }
1103 }
1104
1105 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1106 peek(forwardToCache_in, RequestMsg) {
1107 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1108 assert(is_valid(tbe));
1109 out_msg.addr := address;
1110 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1111 out_msg.Sender := machineID;
1112 out_msg.Destination := in_msg.MergedRequestors;
1113 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1114 out_msg.DataBlk := tbe.DataBlk;
1115 out_msg.Dirty := tbe.Dirty;
1116 out_msg.Acks := machineCount(MachineType:L1Cache);
1117 out_msg.SilentAcks := in_msg.SilentAcks;
1118 out_msg.MessageSize := MessageSizeType:Response_Data;
1119 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1120 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1121 }
1122 }
1123 }
1124
1125 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1126 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1127 assert(is_valid(tbe));
1128 out_msg.addr := address;
1129 out_msg.Sender := machineID;
1130 out_msg.Destination.add(map_Address_to_Directory(address));
1131 out_msg.Dirty := tbe.Dirty;
1132 if (tbe.Dirty) {
1133 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1134 out_msg.DataBlk := tbe.DataBlk;
1135 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1136 } else {
1137 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1138 // NOTE: in a real system this would not send data. We send
1139 // data here only so we can check it at the memory
1140 out_msg.DataBlk := tbe.DataBlk;
1141 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1142 }
1143 }
1144 }
1145
1146 action(r_setSharerBit, "r", desc="We saw other sharers") {
1147 assert(is_valid(tbe));
1148 tbe.Sharers := true;
1149 }
1150
1151 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1152 TBEs.deallocate(address);
1153 unset_tbe();
1154 }
1155
1156 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1157 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1158 assert(is_valid(tbe));
1159 out_msg.addr := address;
1160 out_msg.Sender := machineID;
1161 out_msg.Destination.add(map_Address_to_Directory(address));
1162 out_msg.DataBlk := tbe.DataBlk;
1163 out_msg.Dirty := tbe.Dirty;
1164 if (tbe.Dirty) {
1165 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1166 out_msg.DataBlk := tbe.DataBlk;
1167 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1168 } else {
1169 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1170 // NOTE: in a real system this would not send data. We send
1171 // data here only so we can check it at the memory
1172 out_msg.DataBlk := tbe.DataBlk;
1173 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1174 }
1175 }
1176 }
1177
1178 action(u_writeDataToCache, "u", desc="Write data to cache") {
1179 peek(responseToCache_in, ResponseMsg) {
1180 assert(is_valid(cache_entry));
1181 cache_entry.DataBlk := in_msg.DataBlk;
1182 cache_entry.Dirty := in_msg.Dirty;
1183 }
1184 }
1185
1186 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1187 peek(responseToCache_in, ResponseMsg) {
1188 assert(is_valid(tbe));
1189 tbe.DataBlk := in_msg.DataBlk;
1190 tbe.Dirty := in_msg.Dirty;
1191 }
1192 }
1193
1194 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1195 peek(responseToCache_in, ResponseMsg) {
1196 assert(is_valid(cache_entry));
1197 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1198 cache_entry.DataBlk, in_msg.DataBlk);
1199 assert(cache_entry.DataBlk == in_msg.DataBlk);
1200 cache_entry.DataBlk := in_msg.DataBlk;
1201 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1202 }
1203 }
1204
1205 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1206 peek(responseToCache_in, ResponseMsg) {
1207 assert(is_valid(tbe));
1208 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1209 tbe.DataBlk, in_msg.DataBlk);
1210 assert(tbe.DataBlk == in_msg.DataBlk);
1211 tbe.DataBlk := in_msg.DataBlk;
1212 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1213 }
1214 }
1215
1216 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1217 if (L1Dcache.isTagPresent(address)) {
1218 L1Dcache.deallocate(address);
1219 } else {
1220 L1Icache.deallocate(address);
1221 }
1222 unset_cache_entry();
1223 }
1224
1225 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1226 if (is_invalid(cache_entry)) {
1227 set_cache_entry(L1Dcache.allocate(address, new Entry));
1228 }
1229 }
1230
1231 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1232 if (is_invalid(cache_entry)) {
1233 set_cache_entry(L1Icache.allocate(address, new Entry));
1234 }
1235 }
1236
1237 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1238 set_cache_entry(L2cache.allocate(address, new Entry));
1239 }
1240
1241 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1242 L2cache.deallocate(address);
1243 unset_cache_entry();
1244 }
1245
1246 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1247 if (send_evictions) {
1248 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
1249 sequencer.evictionCallback(address);
1250 }
1251 }
1252
1253 action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
1254 ++L1Dcache.demand_misses;
1255 }
1256
1257 action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
1258 ++L1Dcache.demand_hits;
1259 }
1260
1261 action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
1262 ++L1Icache.demand_misses;
1263 }
1264
1265 action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
1266 ++L1Icache.demand_hits;
1267 }
1268
1269 action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
1270 ++L2cache.demand_misses;
1271 }
1272
1273 action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
1274 ++L2cache.demand_hits;
1275 }
1276
1277 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1278 stall_and_wait(mandatoryQueue_in, address);
1279 }
1280
1281 action(z_stall, "z", desc="stall") {
1282 // do nothing and the special z_stall action will return a protocol stall
1283 // so that the next port is checked
1284 }
1285
1286 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1287 wakeUpBuffers(address);
1288 }
1289
1290 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1291 wakeUpAllBuffers();
1292 }
1293
1294 //*****************************************************
1295 // TRANSITIONS
1296 //*****************************************************
1297
1298 // Transitions for Load/Store/L2_Replacement from transient states
1299 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1300 zz_stallAndWaitMandatoryQueue;
1301 }
1302
1303 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1304 zz_stallAndWaitMandatoryQueue;
1305 }
1306
1307 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1308 zz_stallAndWaitMandatoryQueue;
1309 }
1310
1311 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1312 zz_stallAndWaitMandatoryQueue;
1313 }
1314
1315 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1316 zz_stallAndWaitMandatoryQueue;
1317 }
1318
1319 transition({MI_F, MM_F}, {Store}) {
1320 zz_stallAndWaitMandatoryQueue;
1321 }
1322
1323 transition({MM_F, MI_F}, {Flush_line}) {
1324 zz_stallAndWaitMandatoryQueue;
1325 }
1326
1327 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1328 z_stall;
1329 }
1330
1331 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1332 z_stall;
1333 }
1334
1335 // Transitions moving data between the L1 and L2 caches
1336 transition({I, S, O, M, MM}, L1_to_L2) {
1337 i_allocateTBE;
1338 gg_deallocateL1CacheBlock;
1339 vv_allocateL2CacheBlock;
1340 hp_copyFromTBEToL2;
1341 s_deallocateTBE;
1342 }
1343
1344 transition(I, Trigger_L2_to_L1D, IT) {
1345 i_allocateTBE;
1346 rr_deallocateL2CacheBlock;
1347 ii_allocateL1DCacheBlock;
1348 nb_copyFromTBEToL1; // Not really needed for state I
1349 s_deallocateTBE;
1350 zz_stallAndWaitMandatoryQueue;
1351 ll_L2toL1Transfer;
1352 }
1353
1354 transition(S, Trigger_L2_to_L1D, ST) {
1355 i_allocateTBE;
1356 rr_deallocateL2CacheBlock;
1357 ii_allocateL1DCacheBlock;
1358 nb_copyFromTBEToL1;
1359 s_deallocateTBE;
1360 zz_stallAndWaitMandatoryQueue;
1361 ll_L2toL1Transfer;
1362 }
1363
1364 transition(O, Trigger_L2_to_L1D, OT) {
1365 i_allocateTBE;
1366 rr_deallocateL2CacheBlock;
1367 ii_allocateL1DCacheBlock;
1368 nb_copyFromTBEToL1;
1369 s_deallocateTBE;
1370 zz_stallAndWaitMandatoryQueue;
1371 ll_L2toL1Transfer;
1372 }
1373
1374 transition(M, Trigger_L2_to_L1D, MT) {
1375 i_allocateTBE;
1376 rr_deallocateL2CacheBlock;
1377 ii_allocateL1DCacheBlock;
1378 nb_copyFromTBEToL1;
1379 s_deallocateTBE;
1380 zz_stallAndWaitMandatoryQueue;
1381 ll_L2toL1Transfer;
1382 }
1383
1384 transition(MM, Trigger_L2_to_L1D, MMT) {
1385 i_allocateTBE;
1386 rr_deallocateL2CacheBlock;
1387 ii_allocateL1DCacheBlock;
1388 nb_copyFromTBEToL1;
1389 s_deallocateTBE;
1390 zz_stallAndWaitMandatoryQueue;
1391 ll_L2toL1Transfer;
1392 }
1393
1394 transition(I, Trigger_L2_to_L1I, IT) {
1395 i_allocateTBE;
1396 rr_deallocateL2CacheBlock;
1397 jj_allocateL1ICacheBlock;
1398 nb_copyFromTBEToL1;
1399 s_deallocateTBE;
1400 zz_stallAndWaitMandatoryQueue;
1401 ll_L2toL1Transfer;
1402 }
1403
1404 transition(S, Trigger_L2_to_L1I, ST) {
1405 i_allocateTBE;
1406 rr_deallocateL2CacheBlock;
1407 jj_allocateL1ICacheBlock;
1408 nb_copyFromTBEToL1;
1409 s_deallocateTBE;
1410 zz_stallAndWaitMandatoryQueue;
1411 ll_L2toL1Transfer;
1412 }
1413
1414 transition(O, Trigger_L2_to_L1I, OT) {
1415 i_allocateTBE;
1416 rr_deallocateL2CacheBlock;
1417 jj_allocateL1ICacheBlock;
1418 nb_copyFromTBEToL1;
1419 s_deallocateTBE;
1420 zz_stallAndWaitMandatoryQueue;
1421 ll_L2toL1Transfer;
1422 }
1423
1424 transition(M, Trigger_L2_to_L1I, MT) {
1425 i_allocateTBE;
1426 rr_deallocateL2CacheBlock;
1427 jj_allocateL1ICacheBlock;
1428 nb_copyFromTBEToL1;
1429 s_deallocateTBE;
1430 zz_stallAndWaitMandatoryQueue;
1431 ll_L2toL1Transfer;
1432 }
1433
1434 transition(MM, Trigger_L2_to_L1I, MMT) {
1435 i_allocateTBE;
1436 rr_deallocateL2CacheBlock;
1437 jj_allocateL1ICacheBlock;
1438 nb_copyFromTBEToL1;
1439 s_deallocateTBE;
1440 zz_stallAndWaitMandatoryQueue;
1441 ll_L2toL1Transfer;
1442 }
1443
1444 transition(IT, Complete_L2_to_L1, IR) {
1445 j_popTriggerQueue;
1446 kd_wakeUpDependents;
1447 }
1448
1449 transition(ST, Complete_L2_to_L1, SR) {
1450 j_popTriggerQueue;
1451 kd_wakeUpDependents;
1452 }
1453
1454 transition(OT, Complete_L2_to_L1, OR) {
1455 j_popTriggerQueue;
1456 kd_wakeUpDependents;
1457 }
1458
1459 transition(MT, Complete_L2_to_L1, MR) {
1460 j_popTriggerQueue;
1461 kd_wakeUpDependents;
1462 }
1463
1464 transition(MMT, Complete_L2_to_L1, MMR) {
1465 j_popTriggerQueue;
1466 kd_wakeUpDependents;
1467 }
1468
1469 // Transitions from Idle
1470 transition({I,IR}, Load, IS) {
1471 ii_allocateL1DCacheBlock;
1472 i_allocateTBE;
1473 a_issueGETS;
1474 uu_profileL1DataMiss;
1475 uu_profileL2Miss;
1476 k_popMandatoryQueue;
1477 }
1478
1479 transition({I,IR}, Ifetch, IS) {
1480 jj_allocateL1ICacheBlock;
1481 i_allocateTBE;
1482 a_issueGETS;
1483 uu_profileL1InstMiss;
1484 uu_profileL2Miss;
1485 k_popMandatoryQueue;
1486 }
1487
1488 transition({I,IR}, Store, IM) {
1489 ii_allocateL1DCacheBlock;
1490 i_allocateTBE;
1491 b_issueGETX;
1492 uu_profileL1DataMiss;
1493 uu_profileL2Miss;
1494 k_popMandatoryQueue;
1495 }
1496
1497 transition({I, IR}, Flush_line, IM_F) {
1498 it_allocateTBE;
1499 bf_issueGETF;
1500 k_popMandatoryQueue;
1501 }
1502
1503 transition(I, L2_Replacement) {
1504 rr_deallocateL2CacheBlock;
1505 ka_wakeUpAllDependents;
1506 }
1507
1508 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1509 f_sendAck;
1510 l_popForwardQueue;
1511 }
1512
1513 // Transitions from Shared
1514 transition({S, SM, ISM}, Load) {
1515 h_load_hit;
1516 uu_profileL1DataHit;
1517 k_popMandatoryQueue;
1518 }
1519
1520 transition({S, SM, ISM}, Ifetch) {
1521 h_ifetch_hit;
1522 uu_profileL1InstHit;
1523 k_popMandatoryQueue;
1524 }
1525
1526 transition(SR, Load, S) {
1527 h_load_hit;
1528 uu_profileL1DataMiss;
1529 uu_profileL2Hit;
1530 k_popMandatoryQueue;
1531 ka_wakeUpAllDependents;
1532 }
1533
1534 transition(SR, Ifetch, S) {
1535 h_ifetch_hit;
1536 uu_profileL1InstMiss;
1537 uu_profileL2Hit;
1538 k_popMandatoryQueue;
1539 ka_wakeUpAllDependents;
1540 }
1541
1542 transition({S,SR}, Store, SM) {
1543 i_allocateTBE;
1544 b_issueGETX;
1545 uu_profileL1DataMiss;
1546 uu_profileL2Miss;
1547 k_popMandatoryQueue;
1548 }
1549
1550 transition({S, SR}, Flush_line, SM_F) {
1551 i_allocateTBE;
1552 bf_issueGETF;
1553 forward_eviction_to_cpu;
1554 gg_deallocateL1CacheBlock;
1555 k_popMandatoryQueue;
1556 }
1557
1558 transition(S, L2_Replacement, I) {
1559 forward_eviction_to_cpu;
1560 rr_deallocateL2CacheBlock;
1561 ka_wakeUpAllDependents;
1562 }
1563
1564 transition(S, {Other_GETX, Invalidate}, I) {
1565 f_sendAck;
1566 forward_eviction_to_cpu;
1567 l_popForwardQueue;
1568 }
1569
1570 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1571 ff_sendAckShared;
1572 l_popForwardQueue;
1573 }
1574
1575 // Transitions from Owned
1576 transition({O, OM, SS, MM_W, M_W}, {Load}) {
1577 h_load_hit;
1578 uu_profileL1DataHit;
1579 k_popMandatoryQueue;
1580 }
1581
1582 transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
1583 h_ifetch_hit;
1584 uu_profileL1InstHit;
1585 k_popMandatoryQueue;
1586 }
1587
1588 transition(OR, Load, O) {
1589 h_load_hit;
1590 uu_profileL1DataMiss;
1591 uu_profileL2Hit;
1592 k_popMandatoryQueue;
1593 ka_wakeUpAllDependents;
1594 }
1595
1596 transition(OR, Ifetch, O) {
1597 h_ifetch_hit;
1598 uu_profileL1InstMiss;
1599 uu_profileL2Hit;
1600 k_popMandatoryQueue;
1601 ka_wakeUpAllDependents;
1602 }
1603
1604 transition({O,OR}, Store, OM) {
1605 i_allocateTBE;
1606 b_issueGETX;
1607 p_decrementNumberOfMessagesByOne;
1608 uu_profileL1DataMiss;
1609 uu_profileL2Miss;
1610 k_popMandatoryQueue;
1611 }
1612
1613 transition({O, OR}, Flush_line, OM_F) {
1614 i_allocateTBE;
1615 bf_issueGETF;
1616 p_decrementNumberOfMessagesByOne;
1617 forward_eviction_to_cpu;
1618 gg_deallocateL1CacheBlock;
1619 k_popMandatoryQueue;
1620 }
1621
1622 transition(O, L2_Replacement, OI) {
1623 i_allocateTBE;
1624 d_issuePUT;
1625 forward_eviction_to_cpu;
1626 rr_deallocateL2CacheBlock;
1627 ka_wakeUpAllDependents;
1628 }
1629
1630 transition(O, {Other_GETX, Invalidate}, I) {
1631 e_sendData;
1632 forward_eviction_to_cpu;
1633 l_popForwardQueue;
1634 }
1635
1636 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1637 ee_sendDataShared;
1638 l_popForwardQueue;
1639 }
1640
1641 transition(O, Merged_GETS) {
1642 em_sendDataSharedMultiple;
1643 l_popForwardQueue;
1644 }
1645
1646 // Transitions from Modified
1647 transition({MM, M}, {Ifetch}) {
1648 h_ifetch_hit;
1649 uu_profileL1InstHit;
1650 k_popMandatoryQueue;
1651 }
1652
1653 transition({MM, M}, {Load}) {
1654 h_load_hit;
1655 uu_profileL1DataHit;
1656 k_popMandatoryQueue;
1657 }
1658
1659 transition(MM, Store) {
1660 hh_store_hit;
1661 uu_profileL1DataHit;
1662 k_popMandatoryQueue;
1663 }
1664
1665 transition(MMR, Load, MM) {
1666 h_load_hit;
1667 uu_profileL1DataMiss;
1668 uu_profileL2Hit;
1669 k_popMandatoryQueue;
1670 ka_wakeUpAllDependents;
1671 }
1672
1673 transition(MMR, Ifetch, MM) {
1674 h_ifetch_hit;
1675 uu_profileL1InstMiss;
1676 uu_profileL2Hit;
1677 k_popMandatoryQueue;
1678 ka_wakeUpAllDependents;
1679 }
1680
1681 transition(MMR, Store, MM) {
1682 hh_store_hit;
1683 uu_profileL1DataMiss;
1684 uu_profileL2Hit;
1685 k_popMandatoryQueue;
1686 ka_wakeUpAllDependents;
1687 }
1688
1689 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1690 i_allocateTBE;
1691 bf_issueGETF;
1692 p_decrementNumberOfMessagesByOne;
1693 forward_eviction_to_cpu;
1694 gg_deallocateL1CacheBlock;
1695 k_popMandatoryQueue;
1696 }
1697
1698 transition(MM_F, Block_Ack, MI_F) {
1699 df_issuePUTF;
1700 l_popForwardQueue;
1701 kd_wakeUpDependents;
1702 }
1703
1704 transition(MM, L2_Replacement, MI) {
1705 i_allocateTBE;
1706 d_issuePUT;
1707 forward_eviction_to_cpu;
1708 rr_deallocateL2CacheBlock;
1709 ka_wakeUpAllDependents;
1710 }
1711
1712 transition(MM, {Other_GETX, Invalidate}, I) {
1713 c_sendExclusiveData;
1714 forward_eviction_to_cpu;
1715 l_popForwardQueue;
1716 }
1717
1718 transition(MM, Other_GETS, I) {
1719 c_sendExclusiveData;
1720 forward_eviction_to_cpu;
1721 l_popForwardQueue;
1722 }
1723
1724 transition(MM, NC_DMA_GETS, O) {
1725 ee_sendDataShared;
1726 l_popForwardQueue;
1727 }
1728
1729 transition(MM, Other_GETS_No_Mig, O) {
1730 ee_sendDataShared;
1731 l_popForwardQueue;
1732 }
1733
1734 transition(MM, Merged_GETS, O) {
1735 em_sendDataSharedMultiple;
1736 l_popForwardQueue;
1737 }
1738
1739 // Transitions from Dirty Exclusive
1740 transition(M, Store, MM) {
1741 hh_store_hit;
1742 uu_profileL1DataHit;
1743 k_popMandatoryQueue;
1744 }
1745
1746 transition(MR, Load, M) {
1747 h_load_hit;
1748 uu_profileL1DataMiss;
1749 uu_profileL2Hit;
1750 k_popMandatoryQueue;
1751 ka_wakeUpAllDependents;
1752 }
1753
1754 transition(MR, Ifetch, M) {
1755 h_ifetch_hit;
1756 uu_profileL1InstMiss;
1757 uu_profileL2Hit;
1758 k_popMandatoryQueue;
1759 ka_wakeUpAllDependents;
1760 }
1761
1762 transition(MR, Store, MM) {
1763 hh_store_hit;
1764 uu_profileL1DataMiss;
1765 uu_profileL2Hit;
1766 k_popMandatoryQueue;
1767 ka_wakeUpAllDependents;
1768 }
1769
1770 transition(M, L2_Replacement, MI) {
1771 i_allocateTBE;
1772 d_issuePUT;
1773 forward_eviction_to_cpu;
1774 rr_deallocateL2CacheBlock;
1775 ka_wakeUpAllDependents;
1776 }
1777
1778 transition(M, {Other_GETX, Invalidate}, I) {
1779 c_sendExclusiveData;
1780 forward_eviction_to_cpu;
1781 l_popForwardQueue;
1782 }
1783
1784 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1785 ee_sendDataShared;
1786 l_popForwardQueue;
1787 }
1788
1789 transition(M, NC_DMA_GETS, O) {
1790 ee_sendDataShared;
1791 l_popForwardQueue;
1792 }
1793
1794 transition(M, Merged_GETS, O) {
1795 em_sendDataSharedMultiple;
1796 l_popForwardQueue;
1797 }
1798
1799 // Transitions from IM
1800
1801 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1802 f_sendAck;
1803 l_popForwardQueue;
1804 }
1805
1806 transition({IM, IM_F, MM_F}, Ack) {
1807 m_decrementNumberOfMessages;
1808 o_checkForCompletion;
1809 n_popResponseQueue;
1810 }
1811
1812 transition(IM, Data, ISM) {
1813 u_writeDataToCache;
1814 m_decrementNumberOfMessages;
1815 o_checkForCompletion;
1816 n_popResponseQueue;
1817 }
1818
1819 transition(IM_F, Data, ISM_F) {
1820 uf_writeDataToCacheTBE;
1821 m_decrementNumberOfMessages;
1822 o_checkForCompletion;
1823 n_popResponseQueue;
1824 }
1825
1826 transition(IM, Exclusive_Data, MM_W) {
1827 u_writeDataToCache;
1828 m_decrementNumberOfMessages;
1829 o_checkForCompletion;
1830 sx_external_store_hit;
1831 n_popResponseQueue;
1832 kd_wakeUpDependents;
1833 }
1834
1835 transition(IM_F, Exclusive_Data, MM_WF) {
1836 uf_writeDataToCacheTBE;
1837 m_decrementNumberOfMessages;
1838 o_checkForCompletion;
1839 n_popResponseQueue;
1840 }
1841
1842 // Transitions from SM
1843 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1844 ff_sendAckShared;
1845 l_popForwardQueue;
1846 }
1847
1848 transition(SM, {Other_GETX, Invalidate}, IM) {
1849 f_sendAck;
1850 forward_eviction_to_cpu;
1851 l_popForwardQueue;
1852 }
1853
1854 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1855 f_sendAck;
1856 forward_eviction_to_cpu;
1857 l_popForwardQueue;
1858 }
1859
1860 transition({SM, SM_F}, Ack) {
1861 m_decrementNumberOfMessages;
1862 o_checkForCompletion;
1863 n_popResponseQueue;
1864 }
1865
1866 transition(SM, {Data, Exclusive_Data}, ISM) {
1867 v_writeDataToCacheVerify;
1868 m_decrementNumberOfMessages;
1869 o_checkForCompletion;
1870 n_popResponseQueue;
1871 }
1872
1873 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1874 vt_writeDataToTBEVerify;
1875 m_decrementNumberOfMessages;
1876 o_checkForCompletion;
1877 n_popResponseQueue;
1878 }
1879
1880 // Transitions from ISM
1881 transition({ISM, ISM_F}, Ack) {
1882 m_decrementNumberOfMessages;
1883 o_checkForCompletion;
1884 n_popResponseQueue;
1885 }
1886
1887 transition(ISM, All_acks_no_sharers, MM) {
1888 sxt_trig_ext_store_hit;
1889 gm_sendUnblockM;
1890 s_deallocateTBE;
1891 j_popTriggerQueue;
1892 kd_wakeUpDependents;
1893 }
1894
1895 transition(ISM_F, All_acks_no_sharers, MI_F) {
1896 df_issuePUTF;
1897 j_popTriggerQueue;
1898 kd_wakeUpDependents;
1899 }
1900
1901 // Transitions from OM
1902
1903 transition(OM, {Other_GETX, Invalidate}, IM) {
1904 e_sendData;
1905 pp_incrementNumberOfMessagesByOne;
1906 forward_eviction_to_cpu;
1907 l_popForwardQueue;
1908 }
1909
1910 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1911 q_sendDataFromTBEToCache;
1912 pp_incrementNumberOfMessagesByOne;
1913 forward_eviction_to_cpu;
1914 l_popForwardQueue;
1915 }
1916
1917 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1918 ee_sendDataShared;
1919 l_popForwardQueue;
1920 }
1921
1922 transition(OM, Merged_GETS) {
1923 em_sendDataSharedMultiple;
1924 l_popForwardQueue;
1925 }
1926
1927 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1928 et_sendDataSharedFromTBE;
1929 l_popForwardQueue;
1930 }
1931
1932 transition(OM_F, Merged_GETS) {
1933 emt_sendDataSharedMultipleFromTBE;
1934 l_popForwardQueue;
1935 }
1936
1937 transition({OM, OM_F}, Ack) {
1938 m_decrementNumberOfMessages;
1939 o_checkForCompletion;
1940 n_popResponseQueue;
1941 }
1942
1943 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1944 sxt_trig_ext_store_hit;
1945 gm_sendUnblockM;
1946 s_deallocateTBE;
1947 j_popTriggerQueue;
1948 kd_wakeUpDependents;
1949 }
1950
1951 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1952 df_issuePUTF;
1953 j_popTriggerQueue;
1954 kd_wakeUpDependents;
1955 }
1956 // Transitions from IS
1957
1958 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1959 f_sendAck;
1960 l_popForwardQueue;
1961 }
1962
1963 transition(IS, Ack) {
1964 m_decrementNumberOfMessages;
1965 o_checkForCompletion;
1966 n_popResponseQueue;
1967 }
1968
1969 transition(IS, Shared_Ack) {
1970 m_decrementNumberOfMessages;
1971 r_setSharerBit;
1972 o_checkForCompletion;
1973 n_popResponseQueue;
1974 }
1975
1976 transition(IS, Data, SS) {
1977 u_writeDataToCache;
1978 m_decrementNumberOfMessages;
1979 o_checkForCompletion;
1980 hx_external_load_hit;
1981 uo_updateCurrentOwner;
1982 n_popResponseQueue;
1983 kd_wakeUpDependents;
1984 }
1985
1986 transition(IS, Exclusive_Data, M_W) {
1987 u_writeDataToCache;
1988 m_decrementNumberOfMessages;
1989 o_checkForCompletion;
1990 hx_external_load_hit;
1991 n_popResponseQueue;
1992 kd_wakeUpDependents;
1993 }
1994
1995 transition(IS, Shared_Data, SS) {
1996 u_writeDataToCache;
1997 r_setSharerBit;
1998 m_decrementNumberOfMessages;
1999 o_checkForCompletion;
2000 hx_external_load_hit;
2001 uo_updateCurrentOwner;
2002 n_popResponseQueue;
2003 kd_wakeUpDependents;
2004 }
2005
2006 // Transitions from SS
2007
2008 transition(SS, Ack) {
2009 m_decrementNumberOfMessages;
2010 o_checkForCompletion;
2011 n_popResponseQueue;
2012 }
2013
2014 transition(SS, Shared_Ack) {
2015 m_decrementNumberOfMessages;
2016 r_setSharerBit;
2017 o_checkForCompletion;
2018 n_popResponseQueue;
2019 }
2020
2021 transition(SS, All_acks, S) {
2022 gs_sendUnblockS;
2023 s_deallocateTBE;
2024 j_popTriggerQueue;
2025 kd_wakeUpDependents;
2026 }
2027
2028 transition(SS, All_acks_no_sharers, S) {
2029 // Note: The directory might still be the owner, so that is why we go to S
2030 gs_sendUnblockS;
2031 s_deallocateTBE;
2032 j_popTriggerQueue;
2033 kd_wakeUpDependents;
2034 }
2035
2036 // Transitions from MM_W
2037
2038 transition(MM_W, Store) {
2039 hh_store_hit;
2040 uu_profileL1DataHit;
2041 k_popMandatoryQueue;
2042 }
2043
2044 transition({MM_W, MM_WF}, Ack) {
2045 m_decrementNumberOfMessages;
2046 o_checkForCompletion;
2047 n_popResponseQueue;
2048 }
2049
2050 transition(MM_W, All_acks_no_sharers, MM) {
2051 gm_sendUnblockM;
2052 s_deallocateTBE;
2053 j_popTriggerQueue;
2054 kd_wakeUpDependents;
2055 }
2056
2057 transition(MM_WF, All_acks_no_sharers, MI_F) {
2058 df_issuePUTF;
2059 j_popTriggerQueue;
2060 kd_wakeUpDependents;
2061 }
2062 // Transitions from M_W
2063
2064 transition(M_W, Store, MM_W) {
2065 hh_store_hit;
2066 uu_profileL1DataHit;
2067 k_popMandatoryQueue;
2068 }
2069
2070 transition(M_W, Ack) {
2071 m_decrementNumberOfMessages;
2072 o_checkForCompletion;
2073 n_popResponseQueue;
2074 }
2075
2076 transition(M_W, All_acks_no_sharers, M) {
2077 gm_sendUnblockM;
2078 s_deallocateTBE;
2079 j_popTriggerQueue;
2080 kd_wakeUpDependents;
2081 }
2082
2083 // Transitions from OI/MI
2084
2085 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
2086 q_sendDataFromTBEToCache;
2087 l_popForwardQueue;
2088 }
2089
2090 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
2091 sq_sendSharedDataFromTBEToCache;
2092 l_popForwardQueue;
2093 }
2094
2095 transition({OI, MI}, Merged_GETS, OI) {
2096 qm_sendDataFromTBEToCache;
2097 l_popForwardQueue;
2098 }
2099
2100 transition(MI, Writeback_Ack, I) {
2101 t_sendExclusiveDataFromTBEToMemory;
2102 s_deallocateTBE;
2103 l_popForwardQueue;
2104 kd_wakeUpDependents;
2105 }
2106
2107 transition(MI_F, Writeback_Ack, I) {
2108 hh_flush_hit;
2109 t_sendExclusiveDataFromTBEToMemory;
2110 s_deallocateTBE;
2111 l_popForwardQueue;
2112 kd_wakeUpDependents;
2113 }
2114
2115 transition(OI, Writeback_Ack, I) {
2116 qq_sendDataFromTBEToMemory;
2117 s_deallocateTBE;
2118 l_popForwardQueue;
2119 kd_wakeUpDependents;
2120 }
2121
2122 // Transitions from II
2123 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2124 f_sendAck;
2125 l_popForwardQueue;
2126 }
2127
2128 transition(II, Writeback_Ack, I) {
2129 g_sendUnblock;
2130 s_deallocateTBE;
2131 l_popForwardQueue;
2132 kd_wakeUpDependents;
2133 }
2134
2135 transition(II, Writeback_Nack, I) {
2136 s_deallocateTBE;
2137 l_popForwardQueue;
2138 kd_wakeUpDependents;
2139 }
2140
2141 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2142 ct_sendExclusiveDataFromTBE;
2143 pp_incrementNumberOfMessagesByOne;
2144 l_popForwardQueue;
2145 }
2146
2147 transition(MM_F, Other_GETS, IM_F) {
2148 ct_sendExclusiveDataFromTBE;
2149 pp_incrementNumberOfMessagesByOne;
2150 l_popForwardQueue;
2151 }
2152
2153 transition(MM_F, NC_DMA_GETS, OM_F) {
2154 sq_sendSharedDataFromTBEToCache;
2155 l_popForwardQueue;
2156 }
2157
2158 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2159 et_sendDataSharedFromTBE;
2160 l_popForwardQueue;
2161 }
2162
2163 transition(MM_F, Merged_GETS, OM_F) {
2164 emt_sendDataSharedMultipleFromTBE;
2165 l_popForwardQueue;
2166 }
2167 }