d4e7d7e3ca7defcd6bb19fc6e8bf3c3410d5eeb8
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer;
38 CacheMemory * L1Icache;
39 CacheMemory * L1Dcache;
40 CacheMemory * L2cache;
41 Cycles cache_response_latency := 10;
42 Cycles issue_latency := 2;
43 Cycles l2_cache_hit_latency := 10;
44 bool no_mig_atomic := "True";
45 bool send_evictions;
46
47 // NETWORK BUFFERS
48 MessageBuffer * requestFromCache, network="To", virtual_network="2",
49 vnet_type="request";
50 MessageBuffer * responseFromCache, network="To", virtual_network="4",
51 vnet_type="response";
52 MessageBuffer * unblockFromCache, network="To", virtual_network="5",
53 vnet_type="unblock";
54
55 MessageBuffer * forwardToCache, network="From", virtual_network="3",
56 vnet_type="forward";
57 MessageBuffer * responseToCache, network="From", virtual_network="4",
58 vnet_type="response";
59
60 MessageBuffer * mandatoryQueue;
61
62 MessageBuffer * triggerQueue;
63 {
64 // STATES
65 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
66 // Base states
67 I, AccessPermission:Invalid, desc="Idle";
68 S, AccessPermission:Read_Only, desc="Shared";
69 O, AccessPermission:Read_Only, desc="Owned";
70 M, AccessPermission:Read_Only, desc="Modified (dirty)";
71 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73 // Base states, locked and ready to service the mandatory queue
74 IR, AccessPermission:Invalid, desc="Idle";
75 SR, AccessPermission:Read_Only, desc="Shared";
76 OR, AccessPermission:Read_Only, desc="Owned";
77 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
78 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
79
80 // Transient States
81 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
82 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
83 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
84 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
85 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
86 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
87 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
88 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
89 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
90 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
91 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
92 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
93 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
94 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
95 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
96
97 //Transition States Related to Flushing
98 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
99 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
100 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
101 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
102 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
103 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
104 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
105 }
106
107 // EVENTS
108 enumeration(Event, desc="Cache events") {
109 Load, desc="Load request from the processor";
110 Ifetch, desc="I-fetch request from the processor";
111 Store, desc="Store request from the processor";
112 L2_Replacement, desc="L2 Replacement";
113 L1_to_L2, desc="L1 to L2 transfer";
114 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
115 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
116 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
117
118 // Requests
119 Other_GETX, desc="A GetX from another processor";
120 Other_GETS, desc="A GetS from another processor";
121 Merged_GETS, desc="A Merged GetS from another processor";
122 Other_GETS_No_Mig, desc="A GetS from another processor";
123 NC_DMA_GETS, desc="special GetS when only DMA exists";
124 Invalidate, desc="Invalidate block";
125
126 // Responses
127 Ack, desc="Received an ack message";
128 Shared_Ack, desc="Received an ack message, responder has a shared copy";
129 Data, desc="Received a data message";
130 Shared_Data, desc="Received a data message, responder has a shared copy";
131 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
132
133 Writeback_Ack, desc="Writeback O.K. from directory";
134 Writeback_Nack, desc="Writeback not O.K. from directory";
135
136 // Triggers
137 All_acks, desc="Received all required data and message acks";
138 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
139
140 // For Flush
141 Flush_line, desc="flush the cache line from all caches";
142 Block_Ack, desc="the directory is blocked and ready for the flush";
143 }
144
145 // STRUCTURE DEFINITIONS
146 // CacheEntry
147 structure(Entry, desc="...", interface="AbstractCacheEntry") {
148 State CacheState, desc="cache state";
149 bool Dirty, desc="Is the data dirty (different than memory)?";
150 DataBlock DataBlk, desc="data for the block";
151 bool FromL2, default="false", desc="block just moved from L2";
152 bool AtomicAccessed, default="false", desc="block just moved from L2";
153 }
154
155 // TBE fields
156 structure(TBE, desc="...") {
157 State TBEState, desc="Transient state";
158 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
159 bool Dirty, desc="Is the data dirty (different than memory)?";
160 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
161 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
162 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
163 MachineID LastResponder, desc="last machine to send a response for this request";
164 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
165
166 Cycles InitialRequestTime, default="Cycles(0)",
167 desc="time the initial requests was sent from the L1Cache";
168 Cycles ForwardRequestTime, default="Cycles(0)",
169 desc="time the dir forwarded the request";
170 Cycles FirstResponseTime, default="Cycles(0)",
171 desc="the time the first response was received";
172 }
173
174 structure(TBETable, external="yes") {
175 TBE lookup(Addr);
176 void allocate(Addr);
177 void deallocate(Addr);
178 bool isPresent(Addr);
179 }
180
181 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
182
183 Tick clockEdge();
184 void set_cache_entry(AbstractCacheEntry b);
185 void unset_cache_entry();
186 void set_tbe(TBE b);
187 void unset_tbe();
188 void wakeUpAllBuffers();
189 void wakeUpBuffers(Addr a);
190 Cycles curCycle();
191
192 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
193 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
194 if(is_valid(L2cache_entry)) {
195 return L2cache_entry;
196 }
197
198 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
199 if(is_valid(L1Dcache_entry)) {
200 return L1Dcache_entry;
201 }
202
203 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
204 return L1Icache_entry;
205 }
206
207 void functionalRead(Addr addr, Packet *pkt) {
208 Entry cache_entry := getCacheEntry(addr);
209 if(is_valid(cache_entry)) {
210 testAndRead(addr, cache_entry.DataBlk, pkt);
211 } else {
212 TBE tbe := TBEs[addr];
213 if(is_valid(tbe)) {
214 testAndRead(addr, tbe.DataBlk, pkt);
215 } else {
216 error("Missing data block");
217 }
218 }
219 }
220
221 int functionalWrite(Addr addr, Packet *pkt) {
222 int num_functional_writes := 0;
223
224 Entry cache_entry := getCacheEntry(addr);
225 if(is_valid(cache_entry)) {
226 num_functional_writes := num_functional_writes +
227 testAndWrite(addr, cache_entry.DataBlk, pkt);
228 return num_functional_writes;
229 }
230
231 TBE tbe := TBEs[addr];
232 num_functional_writes := num_functional_writes +
233 testAndWrite(addr, tbe.DataBlk, pkt);
234 return num_functional_writes;
235 }
236
237 Entry getL2CacheEntry(Addr address), return_by_pointer="yes" {
238 Entry L2cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
239 return L2cache_entry;
240 }
241
242 Entry getL1DCacheEntry(Addr address), return_by_pointer="yes" {
243 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(address));
244 return L1Dcache_entry;
245 }
246
247 Entry getL1ICacheEntry(Addr address), return_by_pointer="yes" {
248 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(address));
249 return L1Icache_entry;
250 }
251
252 State getState(TBE tbe, Entry cache_entry, Addr addr) {
253 if(is_valid(tbe)) {
254 return tbe.TBEState;
255 } else if (is_valid(cache_entry)) {
256 return cache_entry.CacheState;
257 }
258 return State:I;
259 }
260
261 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
262 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
263 assert((L1Icache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
264 assert((L1Dcache.isTagPresent(addr) && L2cache.isTagPresent(addr)) == false);
265
266 if (is_valid(tbe)) {
267 tbe.TBEState := state;
268 }
269
270 if (is_valid(cache_entry)) {
271 cache_entry.CacheState := state;
272 }
273 }
274
275 AccessPermission getAccessPermission(Addr addr) {
276 TBE tbe := TBEs[addr];
277 if(is_valid(tbe)) {
278 return L1Cache_State_to_permission(tbe.TBEState);
279 }
280
281 Entry cache_entry := getCacheEntry(addr);
282 if(is_valid(cache_entry)) {
283 return L1Cache_State_to_permission(cache_entry.CacheState);
284 }
285
286 return AccessPermission:NotPresent;
287 }
288
289 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
290 if (is_valid(cache_entry)) {
291 cache_entry.changePermission(L1Cache_State_to_permission(state));
292 }
293 }
294
295 Event mandatory_request_type_to_event(RubyRequestType type) {
296 if (type == RubyRequestType:LD) {
297 return Event:Load;
298 } else if (type == RubyRequestType:IFETCH) {
299 return Event:Ifetch;
300 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
301 return Event:Store;
302 } else if ((type == RubyRequestType:FLUSH)) {
303 return Event:Flush_line;
304 } else {
305 error("Invalid RubyRequestType");
306 }
307 }
308
309 MachineType testAndClearLocalHit(Entry cache_entry) {
310 if (is_valid(cache_entry) && cache_entry.FromL2) {
311 cache_entry.FromL2 := false;
312 return MachineType:L2Cache;
313 }
314 return MachineType:L1Cache;
315 }
316
317 bool IsAtomicAccessed(Entry cache_entry) {
318 assert(is_valid(cache_entry));
319 return cache_entry.AtomicAccessed;
320 }
321
322 // ** OUT_PORTS **
323 out_port(requestNetwork_out, RequestMsg, requestFromCache);
324 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
325 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
326 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
327
328 // ** IN_PORTS **
329
330 // Trigger Queue
331 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
332 if (triggerQueue_in.isReady(clockEdge())) {
333 peek(triggerQueue_in, TriggerMsg) {
334
335 Entry cache_entry := getCacheEntry(in_msg.addr);
336 TBE tbe := TBEs[in_msg.addr];
337
338 if (in_msg.Type == TriggerType:L2_to_L1) {
339 trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
340 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
341 trigger(Event:All_acks, in_msg.addr, cache_entry, tbe);
342 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
343 trigger(Event:All_acks_no_sharers, in_msg.addr, cache_entry, tbe);
344 } else {
345 error("Unexpected message");
346 }
347 }
348 }
349 }
350
351 // Nothing from the unblock network
352
353 // Response Network
354 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
355 if (responseToCache_in.isReady(clockEdge())) {
356 peek(responseToCache_in, ResponseMsg, block_on="addr") {
357
358 Entry cache_entry := getCacheEntry(in_msg.addr);
359 TBE tbe := TBEs[in_msg.addr];
360
361 if (in_msg.Type == CoherenceResponseType:ACK) {
362 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
363 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
364 trigger(Event:Shared_Ack, in_msg.addr, cache_entry, tbe);
365 } else if (in_msg.Type == CoherenceResponseType:DATA) {
366 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
367 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
368 trigger(Event:Shared_Data, in_msg.addr, cache_entry, tbe);
369 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
370 trigger(Event:Exclusive_Data, in_msg.addr, cache_entry, tbe);
371 } else {
372 error("Unexpected message");
373 }
374 }
375 }
376 }
377
378 // Forward Network
379 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
380 if (forwardToCache_in.isReady(clockEdge())) {
381 peek(forwardToCache_in, RequestMsg, block_on="addr") {
382
383 Entry cache_entry := getCacheEntry(in_msg.addr);
384 TBE tbe := TBEs[in_msg.addr];
385
386 if ((in_msg.Type == CoherenceRequestType:GETX) ||
387 (in_msg.Type == CoherenceRequestType:GETF)) {
388 trigger(Event:Other_GETX, in_msg.addr, cache_entry, tbe);
389 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
390 trigger(Event:Merged_GETS, in_msg.addr, cache_entry, tbe);
391 } else if (in_msg.Type == CoherenceRequestType:GETS) {
392 if (machineCount(MachineType:L1Cache) > 1) {
393 if (is_valid(cache_entry)) {
394 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
395 trigger(Event:Other_GETS_No_Mig, in_msg.addr, cache_entry, tbe);
396 } else {
397 trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
398 }
399 } else {
400 trigger(Event:Other_GETS, in_msg.addr, cache_entry, tbe);
401 }
402 } else {
403 trigger(Event:NC_DMA_GETS, in_msg.addr, cache_entry, tbe);
404 }
405 } else if (in_msg.Type == CoherenceRequestType:INV) {
406 trigger(Event:Invalidate, in_msg.addr, cache_entry, tbe);
407 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
408 trigger(Event:Writeback_Ack, in_msg.addr, cache_entry, tbe);
409 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
410 trigger(Event:Writeback_Nack, in_msg.addr, cache_entry, tbe);
411 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
412 trigger(Event:Block_Ack, in_msg.addr, cache_entry, tbe);
413 } else {
414 error("Unexpected message");
415 }
416 }
417 }
418 }
419
420 // Nothing from the request network
421
422 // Mandatory Queue
423 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
424 if (mandatoryQueue_in.isReady(clockEdge())) {
425 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
426
427 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
428 TBE tbe := TBEs[in_msg.LineAddress];
429
430 if (in_msg.Type == RubyRequestType:IFETCH) {
431 // ** INSTRUCTION ACCESS ***
432
433 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
434 if (is_valid(L1Icache_entry)) {
435 // The tag matches for the L1, so the L1 fetches the line.
436 // We know it can't be in the L2 due to exclusion
437 trigger(mandatory_request_type_to_event(in_msg.Type),
438 in_msg.LineAddress, L1Icache_entry, tbe);
439 } else {
440 // Check to see if it is in the OTHER L1
441 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
442 if (is_valid(L1Dcache_entry)) {
443 // The block is in the wrong L1, try to write it to the L2
444 if (L2cache.cacheAvail(in_msg.LineAddress)) {
445 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
446 } else {
447 Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
448 trigger(Event:L2_Replacement,
449 l2_victim_addr,
450 getL2CacheEntry(l2_victim_addr),
451 TBEs[l2_victim_addr]);
452 }
453 }
454
455 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
456 // L1 does't have the line, but we have space for it in the L1
457
458 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
459 if (is_valid(L2cache_entry)) {
460 // L2 has it (maybe not with the right permissions)
461 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
462 L2cache_entry, tbe);
463 } else {
464 // We have room, the L2 doesn't have it, so the L1 fetches the line
465 trigger(mandatory_request_type_to_event(in_msg.Type),
466 in_msg.LineAddress, L1Icache_entry, tbe);
467 }
468 } else {
469 // No room in the L1, so we need to make room
470 Addr l1i_victim_addr := L1Icache.cacheProbe(in_msg.LineAddress);
471 if (L2cache.cacheAvail(l1i_victim_addr)) {
472 // The L2 has room, so we move the line from the L1 to the L2
473 trigger(Event:L1_to_L2,
474 l1i_victim_addr,
475 getL1ICacheEntry(l1i_victim_addr),
476 TBEs[l1i_victim_addr]);
477 } else {
478 Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
479 // The L2 does not have room, so we replace a line from the L2
480 trigger(Event:L2_Replacement,
481 l2_victim_addr,
482 getL2CacheEntry(l2_victim_addr),
483 TBEs[l2_victim_addr]);
484 }
485 }
486 }
487 } else {
488 // *** DATA ACCESS ***
489
490 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
491 if (is_valid(L1Dcache_entry)) {
492 // The tag matches for the L1, so the L1 fetches the line.
493 // We know it can't be in the L2 due to exclusion
494 trigger(mandatory_request_type_to_event(in_msg.Type),
495 in_msg.LineAddress, L1Dcache_entry, tbe);
496 } else {
497
498 // Check to see if it is in the OTHER L1
499 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
500 if (is_valid(L1Icache_entry)) {
501 // The block is in the wrong L1, try to write it to the L2
502 if (L2cache.cacheAvail(in_msg.LineAddress)) {
503 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
504 } else {
505 Addr l2_victim_addr := L2cache.cacheProbe(in_msg.LineAddress);
506 trigger(Event:L2_Replacement,
507 l2_victim_addr,
508 getL2CacheEntry(l2_victim_addr),
509 TBEs[l2_victim_addr]);
510 }
511 }
512
513 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
514 // L1 does't have the line, but we have space for it in the L1
515 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
516 if (is_valid(L2cache_entry)) {
517 // L2 has it (maybe not with the right permissions)
518 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
519 L2cache_entry, tbe);
520 } else {
521 // We have room, the L2 doesn't have it, so the L1 fetches the line
522 trigger(mandatory_request_type_to_event(in_msg.Type),
523 in_msg.LineAddress, L1Dcache_entry, tbe);
524 }
525 } else {
526 // No room in the L1, so we need to make room
527 Addr l1d_victim_addr := L1Dcache.cacheProbe(in_msg.LineAddress);
528 if (L2cache.cacheAvail(l1d_victim_addr)) {
529 // The L2 has room, so we move the line from the L1 to the L2
530 trigger(Event:L1_to_L2,
531 l1d_victim_addr,
532 getL1DCacheEntry(l1d_victim_addr),
533 TBEs[l1d_victim_addr]);
534 } else {
535 Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
536 // The L2 does not have room, so we replace a line from the L2
537 trigger(Event:L2_Replacement,
538 l2_victim_addr,
539 getL2CacheEntry(l2_victim_addr),
540 TBEs[l2_victim_addr]);
541 }
542 }
543 }
544 }
545 }
546 }
547 }
548
549 // ACTIONS
550
551 action(a_issueGETS, "a", desc="Issue GETS") {
552 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
553 assert(is_valid(tbe));
554 out_msg.addr := address;
555 out_msg.Type := CoherenceRequestType:GETS;
556 out_msg.Requestor := machineID;
557 out_msg.Destination.add(map_Address_to_Directory(address));
558 out_msg.MessageSize := MessageSizeType:Request_Control;
559 out_msg.InitialRequestTime := curCycle();
560
561 // One from each other cache (n-1) plus the memory (+1)
562 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
563 }
564 }
565
566 action(b_issueGETX, "b", desc="Issue GETX") {
567 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
568 assert(is_valid(tbe));
569 out_msg.addr := address;
570 out_msg.Type := CoherenceRequestType:GETX;
571 out_msg.Requestor := machineID;
572 out_msg.Destination.add(map_Address_to_Directory(address));
573 out_msg.MessageSize := MessageSizeType:Request_Control;
574 out_msg.InitialRequestTime := curCycle();
575
576 // One from each other cache (n-1) plus the memory (+1)
577 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
578 }
579 }
580
581 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
582 if (machineCount(MachineType:L1Cache) > 1) {
583 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
584 assert(is_valid(tbe));
585 out_msg.addr := address;
586 out_msg.Type := CoherenceRequestType:GETX;
587 out_msg.Requestor := machineID;
588 out_msg.Destination.add(map_Address_to_Directory(address));
589 out_msg.MessageSize := MessageSizeType:Request_Control;
590 out_msg.InitialRequestTime := curCycle();
591 }
592 }
593
594 // One from each other cache (n-1) plus the memory (+1)
595 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
596 }
597
598 action(bf_issueGETF, "bf", desc="Issue GETF") {
599 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
600 assert(is_valid(tbe));
601 out_msg.addr := address;
602 out_msg.Type := CoherenceRequestType:GETF;
603 out_msg.Requestor := machineID;
604 out_msg.Destination.add(map_Address_to_Directory(address));
605 out_msg.MessageSize := MessageSizeType:Request_Control;
606 out_msg.InitialRequestTime := curCycle();
607
608 // One from each other cache (n-1) plus the memory (+1)
609 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache);
610 }
611 }
612
613 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
614 peek(forwardToCache_in, RequestMsg) {
615 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
616 assert(is_valid(cache_entry));
617 out_msg.addr := address;
618 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
619 out_msg.Sender := machineID;
620 out_msg.Destination.add(in_msg.Requestor);
621 out_msg.DataBlk := cache_entry.DataBlk;
622 out_msg.Dirty := cache_entry.Dirty;
623 if (in_msg.DirectedProbe) {
624 out_msg.Acks := machineCount(MachineType:L1Cache);
625 } else {
626 out_msg.Acks := 2;
627 }
628 out_msg.SilentAcks := in_msg.SilentAcks;
629 out_msg.MessageSize := MessageSizeType:Response_Data;
630 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
631 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
632 }
633 }
634 }
635
636 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
637 peek(forwardToCache_in, RequestMsg) {
638 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
639 assert(is_valid(tbe));
640 out_msg.addr := address;
641 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
642 out_msg.Sender := machineID;
643 out_msg.Destination.add(in_msg.Requestor);
644 out_msg.DataBlk := tbe.DataBlk;
645 out_msg.Dirty := tbe.Dirty;
646 if (in_msg.DirectedProbe) {
647 out_msg.Acks := machineCount(MachineType:L1Cache);
648 } else {
649 out_msg.Acks := 2;
650 }
651 out_msg.SilentAcks := in_msg.SilentAcks;
652 out_msg.MessageSize := MessageSizeType:Response_Data;
653 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
654 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
655 }
656 }
657 }
658
659 action(d_issuePUT, "d", desc="Issue PUT") {
660 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
661 out_msg.addr := address;
662 out_msg.Type := CoherenceRequestType:PUT;
663 out_msg.Requestor := machineID;
664 out_msg.Destination.add(map_Address_to_Directory(address));
665 out_msg.MessageSize := MessageSizeType:Writeback_Control;
666 }
667 }
668
669 action(df_issuePUTF, "df", desc="Issue PUTF") {
670 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
671 out_msg.addr := address;
672 out_msg.Type := CoherenceRequestType:PUTF;
673 out_msg.Requestor := machineID;
674 out_msg.Destination.add(map_Address_to_Directory(address));
675 out_msg.MessageSize := MessageSizeType:Writeback_Control;
676 }
677 }
678
679 action(e_sendData, "e", desc="Send data from cache to requestor") {
680 peek(forwardToCache_in, RequestMsg) {
681 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
682 assert(is_valid(cache_entry));
683 out_msg.addr := address;
684 out_msg.Type := CoherenceResponseType:DATA;
685 out_msg.Sender := machineID;
686 out_msg.Destination.add(in_msg.Requestor);
687 out_msg.DataBlk := cache_entry.DataBlk;
688 out_msg.Dirty := cache_entry.Dirty;
689 if (in_msg.DirectedProbe) {
690 out_msg.Acks := machineCount(MachineType:L1Cache);
691 } else {
692 out_msg.Acks := 2;
693 }
694 out_msg.SilentAcks := in_msg.SilentAcks;
695 out_msg.MessageSize := MessageSizeType:Response_Data;
696 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
697 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
698 }
699 }
700 }
701
702 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
703 peek(forwardToCache_in, RequestMsg) {
704 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
705 assert(is_valid(cache_entry));
706 out_msg.addr := address;
707 out_msg.Type := CoherenceResponseType:DATA_SHARED;
708 out_msg.Sender := machineID;
709 out_msg.Destination.add(in_msg.Requestor);
710 out_msg.DataBlk := cache_entry.DataBlk;
711 out_msg.Dirty := cache_entry.Dirty;
712 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
713 if (in_msg.DirectedProbe) {
714 out_msg.Acks := machineCount(MachineType:L1Cache);
715 } else {
716 out_msg.Acks := 2;
717 }
718 out_msg.SilentAcks := in_msg.SilentAcks;
719 out_msg.MessageSize := MessageSizeType:Response_Data;
720 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
721 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
722 }
723 }
724 }
725
726 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
727 peek(forwardToCache_in, RequestMsg) {
728 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
729 assert(is_valid(tbe));
730 out_msg.addr := address;
731 out_msg.Type := CoherenceResponseType:DATA_SHARED;
732 out_msg.Sender := machineID;
733 out_msg.Destination.add(in_msg.Requestor);
734 out_msg.DataBlk := tbe.DataBlk;
735 out_msg.Dirty := tbe.Dirty;
736 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
737 if (in_msg.DirectedProbe) {
738 out_msg.Acks := machineCount(MachineType:L1Cache);
739 } else {
740 out_msg.Acks := 2;
741 }
742 out_msg.SilentAcks := in_msg.SilentAcks;
743 out_msg.MessageSize := MessageSizeType:Response_Data;
744 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
745 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
746 }
747 }
748 }
749
750 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
751 peek(forwardToCache_in, RequestMsg) {
752 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
753 assert(is_valid(cache_entry));
754 out_msg.addr := address;
755 out_msg.Type := CoherenceResponseType:DATA_SHARED;
756 out_msg.Sender := machineID;
757 out_msg.Destination := in_msg.MergedRequestors;
758 out_msg.DataBlk := cache_entry.DataBlk;
759 out_msg.Dirty := cache_entry.Dirty;
760 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
761 out_msg.Acks := machineCount(MachineType:L1Cache);
762 out_msg.SilentAcks := in_msg.SilentAcks;
763 out_msg.MessageSize := MessageSizeType:Response_Data;
764 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
765 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
766 }
767 }
768 }
769
770 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
771 peek(forwardToCache_in, RequestMsg) {
772 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
773 assert(is_valid(tbe));
774 out_msg.addr := address;
775 out_msg.Type := CoherenceResponseType:DATA_SHARED;
776 out_msg.Sender := machineID;
777 out_msg.Destination := in_msg.MergedRequestors;
778 out_msg.DataBlk := tbe.DataBlk;
779 out_msg.Dirty := tbe.Dirty;
780 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
781 out_msg.Acks := machineCount(MachineType:L1Cache);
782 out_msg.SilentAcks := in_msg.SilentAcks;
783 out_msg.MessageSize := MessageSizeType:Response_Data;
784 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
785 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
786 }
787 }
788 }
789
790 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
791 peek(forwardToCache_in, RequestMsg) {
792 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
793 out_msg.addr := address;
794 out_msg.Type := CoherenceResponseType:ACK;
795 out_msg.Sender := machineID;
796 out_msg.Destination.add(in_msg.Requestor);
797 out_msg.Acks := 1;
798 out_msg.SilentAcks := in_msg.SilentAcks;
799 assert(in_msg.DirectedProbe == false);
800 out_msg.MessageSize := MessageSizeType:Response_Control;
801 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
802 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
803 }
804 }
805 }
806
807 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
808 peek(forwardToCache_in, RequestMsg) {
809 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
810 out_msg.addr := address;
811 out_msg.Type := CoherenceResponseType:ACK_SHARED;
812 out_msg.Sender := machineID;
813 out_msg.Destination.add(in_msg.Requestor);
814 out_msg.Acks := 1;
815 out_msg.SilentAcks := in_msg.SilentAcks;
816 assert(in_msg.DirectedProbe == false);
817 out_msg.MessageSize := MessageSizeType:Response_Control;
818 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
819 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
820 }
821 }
822 }
823
824 action(g_sendUnblock, "g", desc="Send unblock to memory") {
825 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
826 out_msg.addr := address;
827 out_msg.Type := CoherenceResponseType:UNBLOCK;
828 out_msg.Sender := machineID;
829 out_msg.Destination.add(map_Address_to_Directory(address));
830 out_msg.MessageSize := MessageSizeType:Unblock_Control;
831 }
832 }
833
834 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
835 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
836 out_msg.addr := address;
837 out_msg.Type := CoherenceResponseType:UNBLOCKM;
838 out_msg.Sender := machineID;
839 out_msg.Destination.add(map_Address_to_Directory(address));
840 out_msg.MessageSize := MessageSizeType:Unblock_Control;
841 }
842 }
843
844 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
845 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
846 assert(is_valid(tbe));
847 out_msg.addr := address;
848 out_msg.Type := CoherenceResponseType:UNBLOCKS;
849 out_msg.Sender := machineID;
850 out_msg.CurOwner := tbe.CurOwner;
851 out_msg.Destination.add(map_Address_to_Directory(address));
852 out_msg.MessageSize := MessageSizeType:Unblock_Control;
853 }
854 }
855
856 action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
857 assert(is_valid(cache_entry));
858 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
859 L1Dcache.setMRU(cache_entry);
860 sequencer.readCallback(address, cache_entry.DataBlk, false,
861 testAndClearLocalHit(cache_entry));
862 }
863
864 action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
865 assert(is_valid(cache_entry));
866 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
867 L1Icache.setMRU(cache_entry);
868 sequencer.readCallback(address, cache_entry.DataBlk, false,
869 testAndClearLocalHit(cache_entry));
870 }
871
872 action(hx_external_load_hit, "hx", desc="load required external msgs") {
873 assert(is_valid(cache_entry));
874 assert(is_valid(tbe));
875 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
876 peek(responseToCache_in, ResponseMsg) {
877 L1Icache.setMRU(address);
878 L1Dcache.setMRU(address);
879 sequencer.readCallback(address, cache_entry.DataBlk, true,
880 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
881 tbe.ForwardRequestTime, tbe.FirstResponseTime);
882 }
883 }
884
885 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
886 assert(is_valid(cache_entry));
887 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
888 peek(mandatoryQueue_in, RubyRequest) {
889 L1Dcache.setMRU(cache_entry);
890 sequencer.writeCallback(address, cache_entry.DataBlk, false,
891 testAndClearLocalHit(cache_entry));
892
893 cache_entry.Dirty := true;
894 if (in_msg.Type == RubyRequestType:ATOMIC) {
895 cache_entry.AtomicAccessed := true;
896 }
897 }
898 }
899
900 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
901 assert(is_valid(tbe));
902 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
903 sequencer.writeCallback(address, tbe.DataBlk, false, MachineType:L1Cache);
904 }
905
906 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
907 assert(is_valid(cache_entry));
908 assert(is_valid(tbe));
909 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
910 peek(responseToCache_in, ResponseMsg) {
911 L1Icache.setMRU(address);
912 L1Dcache.setMRU(address);
913 sequencer.writeCallback(address, cache_entry.DataBlk, true,
914 machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
915 tbe.ForwardRequestTime, tbe.FirstResponseTime);
916 }
917 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
918 cache_entry.Dirty := true;
919 }
920
921 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
922 assert(is_valid(cache_entry));
923 assert(is_valid(tbe));
924 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
925 L1Icache.setMRU(address);
926 L1Dcache.setMRU(address);
927 sequencer.writeCallback(address, cache_entry.DataBlk, true,
928 machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
929 tbe.ForwardRequestTime, tbe.FirstResponseTime);
930
931 cache_entry.Dirty := true;
932 }
933
934 action(i_allocateTBE, "i", desc="Allocate TBE") {
935 check_allocate(TBEs);
936 assert(is_valid(cache_entry));
937 TBEs.allocate(address);
938 set_tbe(TBEs[address]);
939 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
940 tbe.Dirty := cache_entry.Dirty;
941 tbe.Sharers := false;
942 }
943
944 action(it_allocateTBE, "it", desc="Allocate TBE") {
945 check_allocate(TBEs);
946 TBEs.allocate(address);
947 set_tbe(TBEs[address]);
948 tbe.Dirty := false;
949 tbe.Sharers := false;
950 }
951
952 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
953 triggerQueue_in.dequeue(clockEdge());
954 }
955
956 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
957 mandatoryQueue_in.dequeue(clockEdge());
958 }
959
960 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
961 forwardToCache_in.dequeue(clockEdge());
962 }
963
964 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
965 assert(is_valid(cache_entry));
966 assert(is_valid(tbe));
967 cache_entry.Dirty := tbe.Dirty;
968 cache_entry.DataBlk := tbe.DataBlk;
969 }
970
971 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
972 assert(is_valid(cache_entry));
973 assert(is_valid(tbe));
974 cache_entry.Dirty := tbe.Dirty;
975 cache_entry.DataBlk := tbe.DataBlk;
976 cache_entry.FromL2 := true;
977 }
978
979 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
980 peek(responseToCache_in, ResponseMsg) {
981 assert(in_msg.Acks >= 0);
982 assert(is_valid(tbe));
983 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
984 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
985 if (tbe.AppliedSilentAcks == false) {
986 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
987 tbe.AppliedSilentAcks := true;
988 }
989 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
990 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
991 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
992 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
993 APPEND_TRANSITION_COMMENT(in_msg.Sender);
994 tbe.LastResponder := in_msg.Sender;
995 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
996 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
997 }
998 if (in_msg.InitialRequestTime != zero_time()) {
999 tbe.InitialRequestTime := in_msg.InitialRequestTime;
1000 }
1001 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
1002 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
1003 }
1004 if (in_msg.ForwardRequestTime != zero_time()) {
1005 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
1006 }
1007 if (tbe.FirstResponseTime == zero_time()) {
1008 tbe.FirstResponseTime := curCycle();
1009 }
1010 }
1011 }
1012 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
1013 peek(responseToCache_in, ResponseMsg) {
1014 assert(is_valid(tbe));
1015 tbe.CurOwner := in_msg.Sender;
1016 }
1017 }
1018
1019 action(n_popResponseQueue, "n", desc="Pop response queue") {
1020 responseToCache_in.dequeue(clockEdge());
1021 }
1022
1023 action(ll_L2toL1Transfer, "ll", desc="") {
1024 enqueue(triggerQueue_out, TriggerMsg, l2_cache_hit_latency) {
1025 out_msg.addr := address;
1026 out_msg.Type := TriggerType:L2_to_L1;
1027 }
1028 }
1029
1030 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
1031 assert(is_valid(tbe));
1032 if (tbe.NumPendingMsgs == 0) {
1033 enqueue(triggerQueue_out, TriggerMsg) {
1034 out_msg.addr := address;
1035 if (tbe.Sharers) {
1036 out_msg.Type := TriggerType:ALL_ACKS;
1037 } else {
1038 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1039 }
1040 }
1041 }
1042 }
1043
1044 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1045 assert(is_valid(tbe));
1046 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1047 }
1048
1049 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1050 assert(is_valid(tbe));
1051 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1052 }
1053
1054 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1055 peek(forwardToCache_in, RequestMsg) {
1056 assert(in_msg.Requestor != machineID);
1057 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1058 assert(is_valid(tbe));
1059 out_msg.addr := address;
1060 out_msg.Type := CoherenceResponseType:DATA;
1061 out_msg.Sender := machineID;
1062 out_msg.Destination.add(in_msg.Requestor);
1063 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1064 out_msg.DataBlk := tbe.DataBlk;
1065 out_msg.Dirty := tbe.Dirty;
1066 if (in_msg.DirectedProbe) {
1067 out_msg.Acks := machineCount(MachineType:L1Cache);
1068 } else {
1069 out_msg.Acks := 2;
1070 }
1071 out_msg.SilentAcks := in_msg.SilentAcks;
1072 out_msg.MessageSize := MessageSizeType:Response_Data;
1073 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1074 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1075 }
1076 }
1077 }
1078
1079 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1080 peek(forwardToCache_in, RequestMsg) {
1081 assert(in_msg.Requestor != machineID);
1082 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1083 assert(is_valid(tbe));
1084 out_msg.addr := address;
1085 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1086 out_msg.Sender := machineID;
1087 out_msg.Destination.add(in_msg.Requestor);
1088 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1089 out_msg.DataBlk := tbe.DataBlk;
1090 out_msg.Dirty := tbe.Dirty;
1091 if (in_msg.DirectedProbe) {
1092 out_msg.Acks := machineCount(MachineType:L1Cache);
1093 } else {
1094 out_msg.Acks := 2;
1095 }
1096 out_msg.SilentAcks := in_msg.SilentAcks;
1097 out_msg.MessageSize := MessageSizeType:Response_Data;
1098 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1099 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1100 }
1101 }
1102 }
1103
1104 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1105 peek(forwardToCache_in, RequestMsg) {
1106 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
1107 assert(is_valid(tbe));
1108 out_msg.addr := address;
1109 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1110 out_msg.Sender := machineID;
1111 out_msg.Destination := in_msg.MergedRequestors;
1112 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1113 out_msg.DataBlk := tbe.DataBlk;
1114 out_msg.Dirty := tbe.Dirty;
1115 out_msg.Acks := machineCount(MachineType:L1Cache);
1116 out_msg.SilentAcks := in_msg.SilentAcks;
1117 out_msg.MessageSize := MessageSizeType:Response_Data;
1118 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1119 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1120 }
1121 }
1122 }
1123
1124 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1125 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1126 assert(is_valid(tbe));
1127 out_msg.addr := address;
1128 out_msg.Sender := machineID;
1129 out_msg.Destination.add(map_Address_to_Directory(address));
1130 out_msg.Dirty := tbe.Dirty;
1131 if (tbe.Dirty) {
1132 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1133 out_msg.DataBlk := tbe.DataBlk;
1134 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1135 } else {
1136 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1137 // NOTE: in a real system this would not send data. We send
1138 // data here only so we can check it at the memory
1139 out_msg.DataBlk := tbe.DataBlk;
1140 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1141 }
1142 }
1143 }
1144
1145 action(r_setSharerBit, "r", desc="We saw other sharers") {
1146 assert(is_valid(tbe));
1147 tbe.Sharers := true;
1148 }
1149
1150 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1151 TBEs.deallocate(address);
1152 unset_tbe();
1153 }
1154
1155 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1156 enqueue(unblockNetwork_out, ResponseMsg, cache_response_latency) {
1157 assert(is_valid(tbe));
1158 out_msg.addr := address;
1159 out_msg.Sender := machineID;
1160 out_msg.Destination.add(map_Address_to_Directory(address));
1161 out_msg.DataBlk := tbe.DataBlk;
1162 out_msg.Dirty := tbe.Dirty;
1163 if (tbe.Dirty) {
1164 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1165 out_msg.DataBlk := tbe.DataBlk;
1166 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1167 } else {
1168 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1169 // NOTE: in a real system this would not send data. We send
1170 // data here only so we can check it at the memory
1171 out_msg.DataBlk := tbe.DataBlk;
1172 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1173 }
1174 }
1175 }
1176
1177 action(u_writeDataToCache, "u", desc="Write data to cache") {
1178 peek(responseToCache_in, ResponseMsg) {
1179 assert(is_valid(cache_entry));
1180 cache_entry.DataBlk := in_msg.DataBlk;
1181 cache_entry.Dirty := in_msg.Dirty;
1182 }
1183 }
1184
1185 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1186 peek(responseToCache_in, ResponseMsg) {
1187 assert(is_valid(tbe));
1188 tbe.DataBlk := in_msg.DataBlk;
1189 tbe.Dirty := in_msg.Dirty;
1190 }
1191 }
1192
1193 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1194 peek(responseToCache_in, ResponseMsg) {
1195 assert(is_valid(cache_entry));
1196 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1197 cache_entry.DataBlk, in_msg.DataBlk);
1198 assert(cache_entry.DataBlk == in_msg.DataBlk);
1199 cache_entry.DataBlk := in_msg.DataBlk;
1200 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1201 }
1202 }
1203
1204 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1205 peek(responseToCache_in, ResponseMsg) {
1206 assert(is_valid(tbe));
1207 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1208 tbe.DataBlk, in_msg.DataBlk);
1209 assert(tbe.DataBlk == in_msg.DataBlk);
1210 tbe.DataBlk := in_msg.DataBlk;
1211 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1212 }
1213 }
1214
1215 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1216 if (L1Dcache.isTagPresent(address)) {
1217 L1Dcache.deallocate(address);
1218 } else {
1219 L1Icache.deallocate(address);
1220 }
1221 unset_cache_entry();
1222 }
1223
1224 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1225 if (is_invalid(cache_entry)) {
1226 set_cache_entry(L1Dcache.allocate(address, new Entry));
1227 }
1228 }
1229
1230 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1231 if (is_invalid(cache_entry)) {
1232 set_cache_entry(L1Icache.allocate(address, new Entry));
1233 }
1234 }
1235
1236 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1237 set_cache_entry(L2cache.allocate(address, new Entry));
1238 }
1239
1240 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1241 L2cache.deallocate(address);
1242 unset_cache_entry();
1243 }
1244
1245 action(gr_deallocateCacheBlock, "\gr", desc="Deallocate an L1 or L2 cache block.") {
1246 if (L1Dcache.isTagPresent(address)) {
1247 L1Dcache.deallocate(address);
1248 }
1249 else if (L1Icache.isTagPresent(address)){
1250 L1Icache.deallocate(address);
1251 }
1252 else {
1253 assert(L2cache.isTagPresent(address));
1254 L2cache.deallocate(address);
1255 }
1256 unset_cache_entry();
1257 }
1258
1259 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
1260 if (send_evictions) {
1261 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
1262 sequencer.evictionCallback(address);
1263 }
1264 }
1265
1266 action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
1267 ++L1Dcache.demand_misses;
1268 }
1269
1270 action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
1271 ++L1Dcache.demand_hits;
1272 }
1273
1274 action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
1275 ++L1Icache.demand_misses;
1276 }
1277
1278 action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
1279 ++L1Icache.demand_hits;
1280 }
1281
1282 action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
1283 ++L2cache.demand_misses;
1284 }
1285
1286 action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
1287 ++L2cache.demand_hits;
1288 }
1289
1290 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1291 stall_and_wait(mandatoryQueue_in, address);
1292 }
1293
1294 action(z_stall, "z", desc="stall") {
1295 // do nothing and the special z_stall action will return a protocol stall
1296 // so that the next port is checked
1297 }
1298
1299 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1300 wakeUpBuffers(address);
1301 }
1302
1303 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1304 wakeUpAllBuffers();
1305 }
1306
1307 //*****************************************************
1308 // TRANSITIONS
1309 //*****************************************************
1310
1311 // Transitions for Load/Store/L2_Replacement from transient states
1312 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1313 zz_stallAndWaitMandatoryQueue;
1314 }
1315
1316 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1317 zz_stallAndWaitMandatoryQueue;
1318 }
1319
1320 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1321 zz_stallAndWaitMandatoryQueue;
1322 }
1323
1324 transition({IM, IS, OI, MI, II, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1325 zz_stallAndWaitMandatoryQueue;
1326 }
1327
1328 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1329 zz_stallAndWaitMandatoryQueue;
1330 }
1331
1332 transition({MI_F, MM_F}, {Store}) {
1333 zz_stallAndWaitMandatoryQueue;
1334 }
1335
1336 transition({MM_F, MI_F}, {Flush_line}) {
1337 zz_stallAndWaitMandatoryQueue;
1338 }
1339
1340 transition({ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1341 z_stall;
1342 }
1343
1344 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1345 z_stall;
1346 }
1347
1348 // Transitions moving data between the L1 and L2 caches
1349 transition({S, O, M, MM}, L1_to_L2) {
1350 i_allocateTBE;
1351 gg_deallocateL1CacheBlock;
1352 vv_allocateL2CacheBlock;
1353 hp_copyFromTBEToL2;
1354 s_deallocateTBE;
1355 }
1356
1357 transition(S, Trigger_L2_to_L1D, ST) {
1358 i_allocateTBE;
1359 rr_deallocateL2CacheBlock;
1360 ii_allocateL1DCacheBlock;
1361 nb_copyFromTBEToL1;
1362 s_deallocateTBE;
1363 zz_stallAndWaitMandatoryQueue;
1364 ll_L2toL1Transfer;
1365 }
1366
1367 transition(O, Trigger_L2_to_L1D, OT) {
1368 i_allocateTBE;
1369 rr_deallocateL2CacheBlock;
1370 ii_allocateL1DCacheBlock;
1371 nb_copyFromTBEToL1;
1372 s_deallocateTBE;
1373 zz_stallAndWaitMandatoryQueue;
1374 ll_L2toL1Transfer;
1375 }
1376
1377 transition(M, Trigger_L2_to_L1D, MT) {
1378 i_allocateTBE;
1379 rr_deallocateL2CacheBlock;
1380 ii_allocateL1DCacheBlock;
1381 nb_copyFromTBEToL1;
1382 s_deallocateTBE;
1383 zz_stallAndWaitMandatoryQueue;
1384 ll_L2toL1Transfer;
1385 }
1386
1387 transition(MM, Trigger_L2_to_L1D, MMT) {
1388 i_allocateTBE;
1389 rr_deallocateL2CacheBlock;
1390 ii_allocateL1DCacheBlock;
1391 nb_copyFromTBEToL1;
1392 s_deallocateTBE;
1393 zz_stallAndWaitMandatoryQueue;
1394 ll_L2toL1Transfer;
1395 }
1396
1397 transition(S, Trigger_L2_to_L1I, ST) {
1398 i_allocateTBE;
1399 rr_deallocateL2CacheBlock;
1400 jj_allocateL1ICacheBlock;
1401 nb_copyFromTBEToL1;
1402 s_deallocateTBE;
1403 zz_stallAndWaitMandatoryQueue;
1404 ll_L2toL1Transfer;
1405 }
1406
1407 transition(O, Trigger_L2_to_L1I, OT) {
1408 i_allocateTBE;
1409 rr_deallocateL2CacheBlock;
1410 jj_allocateL1ICacheBlock;
1411 nb_copyFromTBEToL1;
1412 s_deallocateTBE;
1413 zz_stallAndWaitMandatoryQueue;
1414 ll_L2toL1Transfer;
1415 }
1416
1417 transition(M, Trigger_L2_to_L1I, MT) {
1418 i_allocateTBE;
1419 rr_deallocateL2CacheBlock;
1420 jj_allocateL1ICacheBlock;
1421 nb_copyFromTBEToL1;
1422 s_deallocateTBE;
1423 zz_stallAndWaitMandatoryQueue;
1424 ll_L2toL1Transfer;
1425 }
1426
1427 transition(MM, Trigger_L2_to_L1I, MMT) {
1428 i_allocateTBE;
1429 rr_deallocateL2CacheBlock;
1430 jj_allocateL1ICacheBlock;
1431 nb_copyFromTBEToL1;
1432 s_deallocateTBE;
1433 zz_stallAndWaitMandatoryQueue;
1434 ll_L2toL1Transfer;
1435 }
1436
1437 transition(ST, Complete_L2_to_L1, SR) {
1438 j_popTriggerQueue;
1439 kd_wakeUpDependents;
1440 }
1441
1442 transition(OT, Complete_L2_to_L1, OR) {
1443 j_popTriggerQueue;
1444 kd_wakeUpDependents;
1445 }
1446
1447 transition(MT, Complete_L2_to_L1, MR) {
1448 j_popTriggerQueue;
1449 kd_wakeUpDependents;
1450 }
1451
1452 transition(MMT, Complete_L2_to_L1, MMR) {
1453 j_popTriggerQueue;
1454 kd_wakeUpDependents;
1455 }
1456
1457 // Transitions from Idle
1458 transition({I,IR}, Load, IS) {
1459 ii_allocateL1DCacheBlock;
1460 i_allocateTBE;
1461 a_issueGETS;
1462 uu_profileL1DataMiss;
1463 uu_profileL2Miss;
1464 k_popMandatoryQueue;
1465 }
1466
1467 transition({I,IR}, Ifetch, IS) {
1468 jj_allocateL1ICacheBlock;
1469 i_allocateTBE;
1470 a_issueGETS;
1471 uu_profileL1InstMiss;
1472 uu_profileL2Miss;
1473 k_popMandatoryQueue;
1474 }
1475
1476 transition({I,IR}, Store, IM) {
1477 ii_allocateL1DCacheBlock;
1478 i_allocateTBE;
1479 b_issueGETX;
1480 uu_profileL1DataMiss;
1481 uu_profileL2Miss;
1482 k_popMandatoryQueue;
1483 }
1484
1485 transition({I, IR}, Flush_line, IM_F) {
1486 it_allocateTBE;
1487 bf_issueGETF;
1488 k_popMandatoryQueue;
1489 }
1490
1491 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1492 f_sendAck;
1493 l_popForwardQueue;
1494 }
1495
1496 // Transitions from Shared
1497 transition({S, SM, ISM}, Load) {
1498 h_load_hit;
1499 uu_profileL1DataHit;
1500 k_popMandatoryQueue;
1501 }
1502
1503 transition({S, SM, ISM}, Ifetch) {
1504 h_ifetch_hit;
1505 uu_profileL1InstHit;
1506 k_popMandatoryQueue;
1507 }
1508
1509 transition(SR, Load, S) {
1510 h_load_hit;
1511 uu_profileL1DataMiss;
1512 uu_profileL2Hit;
1513 k_popMandatoryQueue;
1514 ka_wakeUpAllDependents;
1515 }
1516
1517 transition(SR, Ifetch, S) {
1518 h_ifetch_hit;
1519 uu_profileL1InstMiss;
1520 uu_profileL2Hit;
1521 k_popMandatoryQueue;
1522 ka_wakeUpAllDependents;
1523 }
1524
1525 transition({S,SR}, Store, SM) {
1526 i_allocateTBE;
1527 b_issueGETX;
1528 uu_profileL1DataMiss;
1529 uu_profileL2Miss;
1530 k_popMandatoryQueue;
1531 }
1532
1533 transition({S, SR}, Flush_line, SM_F) {
1534 i_allocateTBE;
1535 bf_issueGETF;
1536 forward_eviction_to_cpu;
1537 gg_deallocateL1CacheBlock;
1538 k_popMandatoryQueue;
1539 }
1540
1541 transition(S, L2_Replacement, I) {
1542 forward_eviction_to_cpu;
1543 rr_deallocateL2CacheBlock;
1544 ka_wakeUpAllDependents;
1545 }
1546
1547 transition(S, {Other_GETX, Invalidate}, I) {
1548 f_sendAck;
1549 forward_eviction_to_cpu;
1550 gr_deallocateCacheBlock;
1551 l_popForwardQueue;
1552 }
1553
1554 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1555 ff_sendAckShared;
1556 l_popForwardQueue;
1557 }
1558
1559 // Transitions from Owned
1560 transition({O, OM, SS, MM_W, M_W}, {Load}) {
1561 h_load_hit;
1562 uu_profileL1DataHit;
1563 k_popMandatoryQueue;
1564 }
1565
1566 transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
1567 h_ifetch_hit;
1568 uu_profileL1InstHit;
1569 k_popMandatoryQueue;
1570 }
1571
1572 transition(OR, Load, O) {
1573 h_load_hit;
1574 uu_profileL1DataMiss;
1575 uu_profileL2Hit;
1576 k_popMandatoryQueue;
1577 ka_wakeUpAllDependents;
1578 }
1579
1580 transition(OR, Ifetch, O) {
1581 h_ifetch_hit;
1582 uu_profileL1InstMiss;
1583 uu_profileL2Hit;
1584 k_popMandatoryQueue;
1585 ka_wakeUpAllDependents;
1586 }
1587
1588 transition({O,OR}, Store, OM) {
1589 i_allocateTBE;
1590 b_issueGETX;
1591 p_decrementNumberOfMessagesByOne;
1592 uu_profileL1DataMiss;
1593 uu_profileL2Miss;
1594 k_popMandatoryQueue;
1595 }
1596
1597 transition({O, OR}, Flush_line, OM_F) {
1598 i_allocateTBE;
1599 bf_issueGETF;
1600 p_decrementNumberOfMessagesByOne;
1601 forward_eviction_to_cpu;
1602 gg_deallocateL1CacheBlock;
1603 k_popMandatoryQueue;
1604 }
1605
1606 transition(O, L2_Replacement, OI) {
1607 i_allocateTBE;
1608 d_issuePUT;
1609 forward_eviction_to_cpu;
1610 rr_deallocateL2CacheBlock;
1611 ka_wakeUpAllDependents;
1612 }
1613
1614 transition(O, {Other_GETX, Invalidate}, I) {
1615 e_sendData;
1616 forward_eviction_to_cpu;
1617 gr_deallocateCacheBlock;
1618 l_popForwardQueue;
1619 }
1620
1621 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1622 ee_sendDataShared;
1623 l_popForwardQueue;
1624 }
1625
1626 transition(O, Merged_GETS) {
1627 em_sendDataSharedMultiple;
1628 l_popForwardQueue;
1629 }
1630
1631 // Transitions from Modified
1632 transition({MM, M}, {Ifetch}) {
1633 h_ifetch_hit;
1634 uu_profileL1InstHit;
1635 k_popMandatoryQueue;
1636 }
1637
1638 transition({MM, M}, {Load}) {
1639 h_load_hit;
1640 uu_profileL1DataHit;
1641 k_popMandatoryQueue;
1642 }
1643
1644 transition(MM, Store) {
1645 hh_store_hit;
1646 uu_profileL1DataHit;
1647 k_popMandatoryQueue;
1648 }
1649
1650 transition(MMR, Load, MM) {
1651 h_load_hit;
1652 uu_profileL1DataMiss;
1653 uu_profileL2Hit;
1654 k_popMandatoryQueue;
1655 ka_wakeUpAllDependents;
1656 }
1657
1658 transition(MMR, Ifetch, MM) {
1659 h_ifetch_hit;
1660 uu_profileL1InstMiss;
1661 uu_profileL2Hit;
1662 k_popMandatoryQueue;
1663 ka_wakeUpAllDependents;
1664 }
1665
1666 transition(MMR, Store, MM) {
1667 hh_store_hit;
1668 uu_profileL1DataMiss;
1669 uu_profileL2Hit;
1670 k_popMandatoryQueue;
1671 ka_wakeUpAllDependents;
1672 }
1673
1674 transition({MM, M, MMR, MR}, Flush_line, MM_F) {
1675 i_allocateTBE;
1676 bf_issueGETF;
1677 p_decrementNumberOfMessagesByOne;
1678 forward_eviction_to_cpu;
1679 gg_deallocateL1CacheBlock;
1680 k_popMandatoryQueue;
1681 }
1682
1683 transition(MM_F, Block_Ack, MI_F) {
1684 df_issuePUTF;
1685 l_popForwardQueue;
1686 kd_wakeUpDependents;
1687 }
1688
1689 transition(MM, L2_Replacement, MI) {
1690 i_allocateTBE;
1691 d_issuePUT;
1692 forward_eviction_to_cpu;
1693 rr_deallocateL2CacheBlock;
1694 ka_wakeUpAllDependents;
1695 }
1696
1697 transition(MM, {Other_GETX, Invalidate}, I) {
1698 c_sendExclusiveData;
1699 forward_eviction_to_cpu;
1700 gr_deallocateCacheBlock;
1701 l_popForwardQueue;
1702 }
1703
1704 transition(MM, Other_GETS, I) {
1705 c_sendExclusiveData;
1706 forward_eviction_to_cpu;
1707 gr_deallocateCacheBlock;
1708 l_popForwardQueue;
1709 }
1710
1711 transition(MM, NC_DMA_GETS, O) {
1712 ee_sendDataShared;
1713 l_popForwardQueue;
1714 }
1715
1716 transition(MM, Other_GETS_No_Mig, O) {
1717 ee_sendDataShared;
1718 l_popForwardQueue;
1719 }
1720
1721 transition(MM, Merged_GETS, O) {
1722 em_sendDataSharedMultiple;
1723 l_popForwardQueue;
1724 }
1725
1726 // Transitions from Dirty Exclusive
1727 transition(M, Store, MM) {
1728 hh_store_hit;
1729 uu_profileL1DataHit;
1730 k_popMandatoryQueue;
1731 }
1732
1733 transition(MR, Load, M) {
1734 h_load_hit;
1735 uu_profileL1DataMiss;
1736 uu_profileL2Hit;
1737 k_popMandatoryQueue;
1738 ka_wakeUpAllDependents;
1739 }
1740
1741 transition(MR, Ifetch, M) {
1742 h_ifetch_hit;
1743 uu_profileL1InstMiss;
1744 uu_profileL2Hit;
1745 k_popMandatoryQueue;
1746 ka_wakeUpAllDependents;
1747 }
1748
1749 transition(MR, Store, MM) {
1750 hh_store_hit;
1751 uu_profileL1DataMiss;
1752 uu_profileL2Hit;
1753 k_popMandatoryQueue;
1754 ka_wakeUpAllDependents;
1755 }
1756
1757 transition(M, L2_Replacement, MI) {
1758 i_allocateTBE;
1759 d_issuePUT;
1760 forward_eviction_to_cpu;
1761 rr_deallocateL2CacheBlock;
1762 ka_wakeUpAllDependents;
1763 }
1764
1765 transition(M, {Other_GETX, Invalidate}, I) {
1766 c_sendExclusiveData;
1767 forward_eviction_to_cpu;
1768 gr_deallocateCacheBlock;
1769 l_popForwardQueue;
1770 }
1771
1772 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1773 ee_sendDataShared;
1774 l_popForwardQueue;
1775 }
1776
1777 transition(M, NC_DMA_GETS, O) {
1778 ee_sendDataShared;
1779 l_popForwardQueue;
1780 }
1781
1782 transition(M, Merged_GETS, O) {
1783 em_sendDataSharedMultiple;
1784 l_popForwardQueue;
1785 }
1786
1787 // Transitions from IM
1788
1789 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1790 f_sendAck;
1791 l_popForwardQueue;
1792 }
1793
1794 transition({IM, IM_F, MM_F}, Ack) {
1795 m_decrementNumberOfMessages;
1796 o_checkForCompletion;
1797 n_popResponseQueue;
1798 }
1799
1800 transition(IM, Data, ISM) {
1801 u_writeDataToCache;
1802 m_decrementNumberOfMessages;
1803 o_checkForCompletion;
1804 n_popResponseQueue;
1805 }
1806
1807 transition(IM_F, Data, ISM_F) {
1808 uf_writeDataToCacheTBE;
1809 m_decrementNumberOfMessages;
1810 o_checkForCompletion;
1811 n_popResponseQueue;
1812 }
1813
1814 transition(IM, Exclusive_Data, MM_W) {
1815 u_writeDataToCache;
1816 m_decrementNumberOfMessages;
1817 o_checkForCompletion;
1818 sx_external_store_hit;
1819 n_popResponseQueue;
1820 kd_wakeUpDependents;
1821 }
1822
1823 transition(IM_F, Exclusive_Data, MM_WF) {
1824 uf_writeDataToCacheTBE;
1825 m_decrementNumberOfMessages;
1826 o_checkForCompletion;
1827 n_popResponseQueue;
1828 }
1829
1830 // Transitions from SM
1831 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1832 ff_sendAckShared;
1833 l_popForwardQueue;
1834 }
1835
1836 transition(SM, {Other_GETX, Invalidate}, IM) {
1837 f_sendAck;
1838 forward_eviction_to_cpu;
1839 l_popForwardQueue;
1840 }
1841
1842 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1843 f_sendAck;
1844 forward_eviction_to_cpu;
1845 l_popForwardQueue;
1846 }
1847
1848 transition({SM, SM_F}, Ack) {
1849 m_decrementNumberOfMessages;
1850 o_checkForCompletion;
1851 n_popResponseQueue;
1852 }
1853
1854 transition(SM, {Data, Exclusive_Data}, ISM) {
1855 v_writeDataToCacheVerify;
1856 m_decrementNumberOfMessages;
1857 o_checkForCompletion;
1858 n_popResponseQueue;
1859 }
1860
1861 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1862 vt_writeDataToTBEVerify;
1863 m_decrementNumberOfMessages;
1864 o_checkForCompletion;
1865 n_popResponseQueue;
1866 }
1867
1868 // Transitions from ISM
1869 transition({ISM, ISM_F}, Ack) {
1870 m_decrementNumberOfMessages;
1871 o_checkForCompletion;
1872 n_popResponseQueue;
1873 }
1874
1875 transition(ISM, All_acks_no_sharers, MM) {
1876 sxt_trig_ext_store_hit;
1877 gm_sendUnblockM;
1878 s_deallocateTBE;
1879 j_popTriggerQueue;
1880 kd_wakeUpDependents;
1881 }
1882
1883 transition(ISM_F, All_acks_no_sharers, MI_F) {
1884 df_issuePUTF;
1885 j_popTriggerQueue;
1886 kd_wakeUpDependents;
1887 }
1888
1889 // Transitions from OM
1890
1891 transition(OM, {Other_GETX, Invalidate}, IM) {
1892 e_sendData;
1893 pp_incrementNumberOfMessagesByOne;
1894 forward_eviction_to_cpu;
1895 l_popForwardQueue;
1896 }
1897
1898 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1899 q_sendDataFromTBEToCache;
1900 pp_incrementNumberOfMessagesByOne;
1901 forward_eviction_to_cpu;
1902 l_popForwardQueue;
1903 }
1904
1905 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1906 ee_sendDataShared;
1907 l_popForwardQueue;
1908 }
1909
1910 transition(OM, Merged_GETS) {
1911 em_sendDataSharedMultiple;
1912 l_popForwardQueue;
1913 }
1914
1915 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1916 et_sendDataSharedFromTBE;
1917 l_popForwardQueue;
1918 }
1919
1920 transition(OM_F, Merged_GETS) {
1921 emt_sendDataSharedMultipleFromTBE;
1922 l_popForwardQueue;
1923 }
1924
1925 transition({OM, OM_F}, Ack) {
1926 m_decrementNumberOfMessages;
1927 o_checkForCompletion;
1928 n_popResponseQueue;
1929 }
1930
1931 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1932 sxt_trig_ext_store_hit;
1933 gm_sendUnblockM;
1934 s_deallocateTBE;
1935 j_popTriggerQueue;
1936 kd_wakeUpDependents;
1937 }
1938
1939 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1940 df_issuePUTF;
1941 j_popTriggerQueue;
1942 kd_wakeUpDependents;
1943 }
1944 // Transitions from IS
1945
1946 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1947 f_sendAck;
1948 l_popForwardQueue;
1949 }
1950
1951 transition(IS, Ack) {
1952 m_decrementNumberOfMessages;
1953 o_checkForCompletion;
1954 n_popResponseQueue;
1955 }
1956
1957 transition(IS, Shared_Ack) {
1958 m_decrementNumberOfMessages;
1959 r_setSharerBit;
1960 o_checkForCompletion;
1961 n_popResponseQueue;
1962 }
1963
1964 transition(IS, Data, SS) {
1965 u_writeDataToCache;
1966 m_decrementNumberOfMessages;
1967 o_checkForCompletion;
1968 hx_external_load_hit;
1969 uo_updateCurrentOwner;
1970 n_popResponseQueue;
1971 kd_wakeUpDependents;
1972 }
1973
1974 transition(IS, Exclusive_Data, M_W) {
1975 u_writeDataToCache;
1976 m_decrementNumberOfMessages;
1977 o_checkForCompletion;
1978 hx_external_load_hit;
1979 n_popResponseQueue;
1980 kd_wakeUpDependents;
1981 }
1982
1983 transition(IS, Shared_Data, SS) {
1984 u_writeDataToCache;
1985 r_setSharerBit;
1986 m_decrementNumberOfMessages;
1987 o_checkForCompletion;
1988 hx_external_load_hit;
1989 uo_updateCurrentOwner;
1990 n_popResponseQueue;
1991 kd_wakeUpDependents;
1992 }
1993
1994 // Transitions from SS
1995
1996 transition(SS, Ack) {
1997 m_decrementNumberOfMessages;
1998 o_checkForCompletion;
1999 n_popResponseQueue;
2000 }
2001
2002 transition(SS, Shared_Ack) {
2003 m_decrementNumberOfMessages;
2004 r_setSharerBit;
2005 o_checkForCompletion;
2006 n_popResponseQueue;
2007 }
2008
2009 transition(SS, All_acks, S) {
2010 gs_sendUnblockS;
2011 s_deallocateTBE;
2012 j_popTriggerQueue;
2013 kd_wakeUpDependents;
2014 }
2015
2016 transition(SS, All_acks_no_sharers, S) {
2017 // Note: The directory might still be the owner, so that is why we go to S
2018 gs_sendUnblockS;
2019 s_deallocateTBE;
2020 j_popTriggerQueue;
2021 kd_wakeUpDependents;
2022 }
2023
2024 // Transitions from MM_W
2025
2026 transition(MM_W, Store) {
2027 hh_store_hit;
2028 uu_profileL1DataHit;
2029 k_popMandatoryQueue;
2030 }
2031
2032 transition({MM_W, MM_WF}, Ack) {
2033 m_decrementNumberOfMessages;
2034 o_checkForCompletion;
2035 n_popResponseQueue;
2036 }
2037
2038 transition(MM_W, All_acks_no_sharers, MM) {
2039 gm_sendUnblockM;
2040 s_deallocateTBE;
2041 j_popTriggerQueue;
2042 kd_wakeUpDependents;
2043 }
2044
2045 transition(MM_WF, All_acks_no_sharers, MI_F) {
2046 df_issuePUTF;
2047 j_popTriggerQueue;
2048 kd_wakeUpDependents;
2049 }
2050 // Transitions from M_W
2051
2052 transition(M_W, Store, MM_W) {
2053 hh_store_hit;
2054 uu_profileL1DataHit;
2055 k_popMandatoryQueue;
2056 }
2057
2058 transition(M_W, Ack) {
2059 m_decrementNumberOfMessages;
2060 o_checkForCompletion;
2061 n_popResponseQueue;
2062 }
2063
2064 transition(M_W, All_acks_no_sharers, M) {
2065 gm_sendUnblockM;
2066 s_deallocateTBE;
2067 j_popTriggerQueue;
2068 kd_wakeUpDependents;
2069 }
2070
2071 // Transitions from OI/MI
2072
2073 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
2074 q_sendDataFromTBEToCache;
2075 l_popForwardQueue;
2076 }
2077
2078 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
2079 sq_sendSharedDataFromTBEToCache;
2080 l_popForwardQueue;
2081 }
2082
2083 transition({OI, MI}, Merged_GETS, OI) {
2084 qm_sendDataFromTBEToCache;
2085 l_popForwardQueue;
2086 }
2087
2088 transition(MI, Writeback_Ack, I) {
2089 t_sendExclusiveDataFromTBEToMemory;
2090 s_deallocateTBE;
2091 l_popForwardQueue;
2092 kd_wakeUpDependents;
2093 }
2094
2095 transition(MI_F, Writeback_Ack, I) {
2096 hh_flush_hit;
2097 t_sendExclusiveDataFromTBEToMemory;
2098 s_deallocateTBE;
2099 l_popForwardQueue;
2100 kd_wakeUpDependents;
2101 }
2102
2103 transition(OI, Writeback_Ack, I) {
2104 qq_sendDataFromTBEToMemory;
2105 s_deallocateTBE;
2106 l_popForwardQueue;
2107 kd_wakeUpDependents;
2108 }
2109
2110 // Transitions from II
2111 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
2112 f_sendAck;
2113 l_popForwardQueue;
2114 }
2115
2116 transition(II, Writeback_Ack, I) {
2117 g_sendUnblock;
2118 s_deallocateTBE;
2119 l_popForwardQueue;
2120 kd_wakeUpDependents;
2121 }
2122
2123 transition(II, Writeback_Nack, I) {
2124 s_deallocateTBE;
2125 l_popForwardQueue;
2126 kd_wakeUpDependents;
2127 }
2128
2129 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
2130 ct_sendExclusiveDataFromTBE;
2131 pp_incrementNumberOfMessagesByOne;
2132 l_popForwardQueue;
2133 }
2134
2135 transition(MM_F, Other_GETS, IM_F) {
2136 ct_sendExclusiveDataFromTBE;
2137 pp_incrementNumberOfMessagesByOne;
2138 l_popForwardQueue;
2139 }
2140
2141 transition(MM_F, NC_DMA_GETS, OM_F) {
2142 sq_sendSharedDataFromTBEToCache;
2143 l_popForwardQueue;
2144 }
2145
2146 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2147 et_sendDataSharedFromTBE;
2148 l_popForwardQueue;
2149 }
2150
2151 transition(MM_F, Merged_GETS, OM_F) {
2152 emt_sendDataSharedMultipleFromTBE;
2153 l_popForwardQueue;
2154 }
2155 }