Merge with the main repository again.
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
45 {
46
47 // NETWORK BUFFERS
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false", vnet_type="request";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false", vnet_type="response";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false", vnet_type="unblock";
51
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false", vnet_type="forward";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false", vnet_type="response";
54
55
56 // STATES
57 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 I, AccessPermission:Invalid, desc="Idle";
60 S, AccessPermission:Read_Only, desc="Shared";
61 O, AccessPermission:Read_Only, desc="Owned";
62 M, AccessPermission:Read_Only, desc="Modified (dirty)";
63 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
64
65 // Base states, locked and ready to service the mandatory queue
66 IR, AccessPermission:Invalid, desc="Idle";
67 SR, AccessPermission:Read_Only, desc="Shared";
68 OR, AccessPermission:Read_Only, desc="Owned";
69 MR, AccessPermission:Read_Only, desc="Modified (dirty)";
70 MMR, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
71
72 // Transient States
73 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
74 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
75 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
76 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
77 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
78 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
79 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
80 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
81 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
82 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
83 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
84 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
85 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
86 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
87 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
88 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L0";
89
90 //Transition States Related to Flushing
91 MI_F, AccessPermission:Busy, "MI_F", desc="Issued PutX due to a Flush, waiting for ack";
92 MM_F, AccessPermission:Busy, "MM_F", desc="Issued GETF due to a Flush, waiting for ack";
93 IM_F, AccessPermission:Busy, "IM_F", desc="Issued GetX due to a Flush";
94 ISM_F, AccessPermission:Read_Only, "ISM_F", desc="Issued GetX, received data, waiting for all acks";
95 SM_F, AccessPermission:Read_Only, "SM_F", desc="Issued GetX, we still have an old copy of the line";
96 OM_F, AccessPermission:Read_Only, "OM_F", desc="Issued GetX, received data";
97 MM_WF, AccessPermission:Busy, "MM_WF", desc="Issued GetX, received exclusive data";
98 }
99
100 // EVENTS
101 enumeration(Event, desc="Cache events") {
102 Load, desc="Load request from the processor";
103 Ifetch, desc="I-fetch request from the processor";
104 Store, desc="Store request from the processor";
105 L2_Replacement, desc="L2 Replacement";
106 L1_to_L2, desc="L1 to L2 transfer";
107 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
108 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
109 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
110
111 // Requests
112 Other_GETX, desc="A GetX from another processor";
113 Other_GETS, desc="A GetS from another processor";
114 Merged_GETS, desc="A Merged GetS from another processor";
115 Other_GETS_No_Mig, desc="A GetS from another processor";
116 NC_DMA_GETS, desc="special GetS when only DMA exists";
117 Invalidate, desc="Invalidate block";
118
119 // Responses
120 Ack, desc="Received an ack message";
121 Shared_Ack, desc="Received an ack message, responder has a shared copy";
122 Data, desc="Received a data message";
123 Shared_Data, desc="Received a data message, responder has a shared copy";
124 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
125
126 Writeback_Ack, desc="Writeback O.K. from directory";
127 Writeback_Nack, desc="Writeback not O.K. from directory";
128
129 // Triggers
130 All_acks, desc="Received all required data and message acks";
131 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
132
133 // For Flush
134 Flush_line, desc="flush the cache line from all caches";
135 Block_Ack, desc="the directory is blocked and ready for the flush";
136 }
137
138 // TYPES
139
140 // STRUCTURE DEFINITIONS
141
142 MessageBuffer mandatoryQueue, ordered="false";
143
144 // CacheEntry
145 structure(Entry, desc="...", interface="AbstractCacheEntry") {
146 State CacheState, desc="cache state";
147 bool Dirty, desc="Is the data dirty (different than memory)?";
148 DataBlock DataBlk, desc="data for the block";
149 bool FromL2, default="false", desc="block just moved from L2";
150 bool AtomicAccessed, default="false", desc="block just moved from L2";
151 }
152
153 // TBE fields
154 structure(TBE, desc="...") {
155 State TBEState, desc="Transient state";
156 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
157 bool Dirty, desc="Is the data dirty (different than memory)?";
158 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
159 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
160 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
161 MachineID LastResponder, desc="last machine to send a response for this request";
162 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
163 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
164 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
165 Time FirstResponseTime, default="0", desc="the time the first response was received";
166 }
167
168 structure(TBETable, external="yes") {
169 TBE lookup(Address);
170 void allocate(Address);
171 void deallocate(Address);
172 bool isPresent(Address);
173 }
174
175 TBETable TBEs, template_hack="<L1Cache_TBE>";
176
177 void set_cache_entry(AbstractCacheEntry b);
178 void unset_cache_entry();
179 void set_tbe(TBE b);
180 void unset_tbe();
181 void wakeUpAllBuffers();
182 void wakeUpBuffers(Address a);
183
184 Entry getCacheEntry(Address address), return_by_pointer="yes" {
185 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
186 if(is_valid(L2cache_entry)) {
187 return L2cache_entry;
188 }
189
190 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
191 if(is_valid(L1Dcache_entry)) {
192 return L1Dcache_entry;
193 }
194
195 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
196 return L1Icache_entry;
197 }
198
199 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
200 return getCacheEntry(addr).DataBlk;
201 }
202
203 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
204 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
205 return L2cache_entry;
206 }
207
208 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
209 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
210 return L1Dcache_entry;
211 }
212
213 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
214 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
215 return L1Icache_entry;
216 }
217
218 State getState(TBE tbe, Entry cache_entry, Address addr) {
219 if(is_valid(tbe)) {
220 return tbe.TBEState;
221 } else if (is_valid(cache_entry)) {
222 return cache_entry.CacheState;
223 }
224 return State:I;
225 }
226
227 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
228 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
229 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
230 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
231
232 if (is_valid(tbe)) {
233 tbe.TBEState := state;
234 }
235
236 if (is_valid(cache_entry)) {
237 cache_entry.CacheState := state;
238 }
239 }
240
241 AccessPermission getAccessPermission(Address addr) {
242 TBE tbe := TBEs[addr];
243 if(is_valid(tbe)) {
244 return L1Cache_State_to_permission(tbe.TBEState);
245 }
246
247 Entry cache_entry := getCacheEntry(addr);
248 if(is_valid(cache_entry)) {
249 return L1Cache_State_to_permission(cache_entry.CacheState);
250 }
251
252 return AccessPermission:NotPresent;
253 }
254
255 void setAccessPermission(Entry cache_entry, Address addr, State state) {
256 if (is_valid(cache_entry)) {
257 cache_entry.changePermission(L1Cache_State_to_permission(state));
258 }
259 }
260
261 Event mandatory_request_type_to_event(RubyRequestType type) {
262 if (type == RubyRequestType:LD) {
263 return Event:Load;
264 } else if (type == RubyRequestType:IFETCH) {
265 return Event:Ifetch;
266 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
267 return Event:Store;
268 } else if ((type == RubyRequestType:FLUSH)) {
269 return Event:Flush_line;
270 } else {
271 error("Invalid RubyRequestType");
272 }
273 }
274
275 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
276 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
277 //
278 // NOTE direct local hits should not call this
279 //
280 return GenericMachineType:L1Cache_wCC;
281 } else {
282 return ConvertMachToGenericMach(machineIDToMachineType(sender));
283 }
284 }
285
286 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
287 if (is_valid(cache_entry) && cache_entry.FromL2) {
288 cache_entry.FromL2 := false;
289 return GenericMachineType:L2Cache;
290 } else {
291 return GenericMachineType:L1Cache;
292 }
293 }
294
295 bool IsAtomicAccessed(Entry cache_entry) {
296 assert(is_valid(cache_entry));
297 return cache_entry.AtomicAccessed;
298 }
299
300 MessageBuffer triggerQueue, ordered="false";
301
302 // ** OUT_PORTS **
303
304 out_port(requestNetwork_out, RequestMsg, requestFromCache);
305 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
306 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
307 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
308
309 // ** IN_PORTS **
310
311 // Trigger Queue
312 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
313 if (triggerQueue_in.isReady()) {
314 peek(triggerQueue_in, TriggerMsg) {
315
316 Entry cache_entry := getCacheEntry(in_msg.Address);
317 TBE tbe := TBEs[in_msg.Address];
318
319 if (in_msg.Type == TriggerType:L2_to_L1) {
320 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
321 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
322 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
323 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
324 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
325 } else {
326 error("Unexpected message");
327 }
328 }
329 }
330 }
331
332 // Nothing from the unblock network
333
334 // Response Network
335 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
336 if (responseToCache_in.isReady()) {
337 peek(responseToCache_in, ResponseMsg, block_on="Address") {
338
339 Entry cache_entry := getCacheEntry(in_msg.Address);
340 TBE tbe := TBEs[in_msg.Address];
341
342 if (in_msg.Type == CoherenceResponseType:ACK) {
343 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
344 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
345 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
346 } else if (in_msg.Type == CoherenceResponseType:DATA) {
347 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
348 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
349 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
350 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
351 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
352 } else {
353 error("Unexpected message");
354 }
355 }
356 }
357 }
358
359 // Forward Network
360 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
361 if (forwardToCache_in.isReady()) {
362 peek(forwardToCache_in, RequestMsg, block_on="Address") {
363
364 Entry cache_entry := getCacheEntry(in_msg.Address);
365 TBE tbe := TBEs[in_msg.Address];
366
367 if ((in_msg.Type == CoherenceRequestType:GETX) || (in_msg.Type == CoherenceRequestType:GETF)) {
368 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
369 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
370 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
371 } else if (in_msg.Type == CoherenceRequestType:GETS) {
372 if (machineCount(MachineType:L1Cache) > 1) {
373 if (is_valid(cache_entry)) {
374 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
375 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
376 } else {
377 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
378 }
379 } else {
380 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
381 }
382 } else {
383 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
384 }
385 } else if (in_msg.Type == CoherenceRequestType:INV) {
386 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
387 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
388 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
389 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
390 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
391 } else if (in_msg.Type == CoherenceRequestType:BLOCK_ACK) {
392 trigger(Event:Block_Ack, in_msg.Address, cache_entry, tbe);
393 } else {
394 error("Unexpected message");
395 }
396 }
397 }
398 }
399
400 // Nothing from the request network
401
402 // Mandatory Queue
403 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
404 if (mandatoryQueue_in.isReady()) {
405 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
406
407 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
408 TBE tbe := TBEs[in_msg.LineAddress];
409
410 if (in_msg.Type == RubyRequestType:IFETCH) {
411 // ** INSTRUCTION ACCESS ***
412
413 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
414 if (is_valid(L1Icache_entry)) {
415 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
416 trigger(mandatory_request_type_to_event(in_msg.Type),
417 in_msg.LineAddress, L1Icache_entry, tbe);
418 } else {
419 // Check to see if it is in the OTHER L1
420 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
421 if (is_valid(L1Dcache_entry)) {
422 // The block is in the wrong L1, try to write it to the L2
423 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
424 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
425 } else {
426 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
427 trigger(Event:L2_Replacement,
428 l2_victim_addr,
429 getL2CacheEntry(l2_victim_addr),
430 TBEs[l2_victim_addr]);
431 }
432 }
433
434 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
435 // L1 does't have the line, but we have space for it in the L1
436
437 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
438 if (is_valid(L2cache_entry)) {
439 // L2 has it (maybe not with the right permissions)
440 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
441 L2cache_entry, tbe);
442 } else {
443 // We have room, the L2 doesn't have it, so the L1 fetches the line
444 trigger(mandatory_request_type_to_event(in_msg.Type),
445 in_msg.LineAddress, L1Icache_entry, tbe);
446 }
447 } else {
448 // No room in the L1, so we need to make room
449 Address l1i_victim_addr := L1IcacheMemory.cacheProbe(in_msg.LineAddress);
450 if (L2cacheMemory.cacheAvail(l1i_victim_addr)) {
451 // The L2 has room, so we move the line from the L1 to the L2
452 trigger(Event:L1_to_L2,
453 l1i_victim_addr,
454 getL1ICacheEntry(l1i_victim_addr),
455 TBEs[l1i_victim_addr]);
456 } else {
457 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1i_victim_addr);
458 // The L2 does not have room, so we replace a line from the L2
459 trigger(Event:L2_Replacement,
460 l2_victim_addr,
461 getL2CacheEntry(l2_victim_addr),
462 TBEs[l2_victim_addr]);
463 }
464 }
465 }
466 } else {
467 // *** DATA ACCESS ***
468
469 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
470 if (is_valid(L1Dcache_entry)) {
471 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
472 trigger(mandatory_request_type_to_event(in_msg.Type),
473 in_msg.LineAddress, L1Dcache_entry, tbe);
474 } else {
475
476 // Check to see if it is in the OTHER L1
477 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
478 if (is_valid(L1Icache_entry)) {
479 // The block is in the wrong L1, try to write it to the L2
480 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
481 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
482 } else {
483 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
484 trigger(Event:L2_Replacement,
485 l2_victim_addr,
486 getL2CacheEntry(l2_victim_addr),
487 TBEs[l2_victim_addr]);
488 }
489 }
490
491 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
492 // L1 does't have the line, but we have space for it in the L1
493 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
494 if (is_valid(L2cache_entry)) {
495 // L2 has it (maybe not with the right permissions)
496 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
497 L2cache_entry, tbe);
498 } else {
499 // We have room, the L2 doesn't have it, so the L1 fetches the line
500 trigger(mandatory_request_type_to_event(in_msg.Type),
501 in_msg.LineAddress, L1Dcache_entry, tbe);
502 }
503 } else {
504 // No room in the L1, so we need to make room
505 Address l1d_victim_addr := L1DcacheMemory.cacheProbe(in_msg.LineAddress);
506 if (L2cacheMemory.cacheAvail(l1d_victim_addr)) {
507 // The L2 has room, so we move the line from the L1 to the L2
508 trigger(Event:L1_to_L2,
509 l1d_victim_addr,
510 getL1DCacheEntry(l1d_victim_addr),
511 TBEs[l1d_victim_addr]);
512 } else {
513 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1d_victim_addr);
514 // The L2 does not have room, so we replace a line from the L2
515 trigger(Event:L2_Replacement,
516 l2_victim_addr,
517 getL2CacheEntry(l2_victim_addr),
518 TBEs[l2_victim_addr]);
519 }
520 }
521 }
522 }
523 }
524 }
525 }
526
527 // ACTIONS
528
529 action(a_issueGETS, "a", desc="Issue GETS") {
530 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
531 assert(is_valid(tbe));
532 out_msg.Address := address;
533 out_msg.Type := CoherenceRequestType:GETS;
534 out_msg.Requestor := machineID;
535 out_msg.Destination.add(map_Address_to_Directory(address));
536 out_msg.MessageSize := MessageSizeType:Request_Control;
537 out_msg.InitialRequestTime := get_time();
538 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
539 }
540 }
541
542 action(b_issueGETX, "b", desc="Issue GETX") {
543 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
544 assert(is_valid(tbe));
545 out_msg.Address := address;
546 out_msg.Type := CoherenceRequestType:GETX;
547 out_msg.Requestor := machineID;
548 out_msg.Destination.add(map_Address_to_Directory(address));
549 out_msg.MessageSize := MessageSizeType:Request_Control;
550 out_msg.InitialRequestTime := get_time();
551 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
552 }
553 }
554
555 action(b_issueGETXIfMoreThanOne, "bo", desc="Issue GETX") {
556 if (machineCount(MachineType:L1Cache) > 1) {
557 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
558 assert(is_valid(tbe));
559 out_msg.Address := address;
560 out_msg.Type := CoherenceRequestType:GETX;
561 out_msg.Requestor := machineID;
562 out_msg.Destination.add(map_Address_to_Directory(address));
563 out_msg.MessageSize := MessageSizeType:Request_Control;
564 out_msg.InitialRequestTime := get_time();
565 }
566 }
567 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
568 }
569
570 action(bf_issueGETF, "bf", desc="Issue GETF") {
571 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
572 assert(is_valid(tbe));
573 out_msg.Address := address;
574 out_msg.Type := CoherenceRequestType:GETF;
575 out_msg.Requestor := machineID;
576 out_msg.Destination.add(map_Address_to_Directory(address));
577 out_msg.MessageSize := MessageSizeType:Request_Control;
578 out_msg.InitialRequestTime := get_time();
579 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
580 }
581 }
582
583 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
584 peek(forwardToCache_in, RequestMsg) {
585 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
586 assert(is_valid(cache_entry));
587 out_msg.Address := address;
588 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
589 out_msg.Sender := machineID;
590 out_msg.Destination.add(in_msg.Requestor);
591 out_msg.DataBlk := cache_entry.DataBlk;
592 out_msg.Dirty := cache_entry.Dirty;
593 if (in_msg.DirectedProbe) {
594 out_msg.Acks := machineCount(MachineType:L1Cache);
595 } else {
596 out_msg.Acks := 2;
597 }
598 out_msg.SilentAcks := in_msg.SilentAcks;
599 out_msg.MessageSize := MessageSizeType:Response_Data;
600 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
601 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
602 }
603 }
604 }
605
606 action(ct_sendExclusiveDataFromTBE, "ct", desc="Send exclusive data from tbe to requestor") {
607 peek(forwardToCache_in, RequestMsg) {
608 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
609 assert(is_valid(tbe));
610 out_msg.Address := address;
611 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
612 out_msg.Sender := machineID;
613 out_msg.Destination.add(in_msg.Requestor);
614 out_msg.DataBlk := tbe.DataBlk;
615 out_msg.Dirty := tbe.Dirty;
616 if (in_msg.DirectedProbe) {
617 out_msg.Acks := machineCount(MachineType:L1Cache);
618 } else {
619 out_msg.Acks := 2;
620 }
621 out_msg.SilentAcks := in_msg.SilentAcks;
622 out_msg.MessageSize := MessageSizeType:Response_Data;
623 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
624 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
625 }
626 }
627 }
628
629 action(d_issuePUT, "d", desc="Issue PUT") {
630 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
631 out_msg.Address := address;
632 out_msg.Type := CoherenceRequestType:PUT;
633 out_msg.Requestor := machineID;
634 out_msg.Destination.add(map_Address_to_Directory(address));
635 out_msg.MessageSize := MessageSizeType:Writeback_Control;
636 }
637 }
638
639 action(df_issuePUTF, "df", desc="Issue PUTF") {
640 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
641 out_msg.Address := address;
642 out_msg.Type := CoherenceRequestType:PUTF;
643 out_msg.Requestor := machineID;
644 out_msg.Destination.add(map_Address_to_Directory(address));
645 out_msg.MessageSize := MessageSizeType:Writeback_Control;
646 }
647 }
648
649 action(e_sendData, "e", desc="Send data from cache to requestor") {
650 peek(forwardToCache_in, RequestMsg) {
651 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
652 assert(is_valid(cache_entry));
653 out_msg.Address := address;
654 out_msg.Type := CoherenceResponseType:DATA;
655 out_msg.Sender := machineID;
656 out_msg.Destination.add(in_msg.Requestor);
657 out_msg.DataBlk := cache_entry.DataBlk;
658 out_msg.Dirty := cache_entry.Dirty;
659 if (in_msg.DirectedProbe) {
660 out_msg.Acks := machineCount(MachineType:L1Cache);
661 } else {
662 out_msg.Acks := 2;
663 }
664 out_msg.SilentAcks := in_msg.SilentAcks;
665 out_msg.MessageSize := MessageSizeType:Response_Data;
666 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
667 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
668 }
669 }
670 }
671
672 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
673 peek(forwardToCache_in, RequestMsg) {
674 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
675 assert(is_valid(cache_entry));
676 out_msg.Address := address;
677 out_msg.Type := CoherenceResponseType:DATA_SHARED;
678 out_msg.Sender := machineID;
679 out_msg.Destination.add(in_msg.Requestor);
680 out_msg.DataBlk := cache_entry.DataBlk;
681 out_msg.Dirty := cache_entry.Dirty;
682 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
683 if (in_msg.DirectedProbe) {
684 out_msg.Acks := machineCount(MachineType:L1Cache);
685 } else {
686 out_msg.Acks := 2;
687 }
688 out_msg.SilentAcks := in_msg.SilentAcks;
689 out_msg.MessageSize := MessageSizeType:Response_Data;
690 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
691 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
692 }
693 }
694 }
695
696 action(et_sendDataSharedFromTBE, "\et", desc="Send data from TBE to requestor, keep a shared copy") {
697 peek(forwardToCache_in, RequestMsg) {
698 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
699 assert(is_valid(tbe));
700 out_msg.Address := address;
701 out_msg.Type := CoherenceResponseType:DATA_SHARED;
702 out_msg.Sender := machineID;
703 out_msg.Destination.add(in_msg.Requestor);
704 out_msg.DataBlk := tbe.DataBlk;
705 out_msg.Dirty := tbe.Dirty;
706 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
707 if (in_msg.DirectedProbe) {
708 out_msg.Acks := machineCount(MachineType:L1Cache);
709 } else {
710 out_msg.Acks := 2;
711 }
712 out_msg.SilentAcks := in_msg.SilentAcks;
713 out_msg.MessageSize := MessageSizeType:Response_Data;
714 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
715 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
716 }
717 }
718 }
719
720 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
721 peek(forwardToCache_in, RequestMsg) {
722 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
723 assert(is_valid(cache_entry));
724 out_msg.Address := address;
725 out_msg.Type := CoherenceResponseType:DATA_SHARED;
726 out_msg.Sender := machineID;
727 out_msg.Destination := in_msg.MergedRequestors;
728 out_msg.DataBlk := cache_entry.DataBlk;
729 out_msg.Dirty := cache_entry.Dirty;
730 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
731 out_msg.Acks := machineCount(MachineType:L1Cache);
732 out_msg.SilentAcks := in_msg.SilentAcks;
733 out_msg.MessageSize := MessageSizeType:Response_Data;
734 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
735 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
736 }
737 }
738 }
739
740 action(emt_sendDataSharedMultipleFromTBE, "emt", desc="Send data from tbe to all requestors") {
741 peek(forwardToCache_in, RequestMsg) {
742 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
743 assert(is_valid(tbe));
744 out_msg.Address := address;
745 out_msg.Type := CoherenceResponseType:DATA_SHARED;
746 out_msg.Sender := machineID;
747 out_msg.Destination := in_msg.MergedRequestors;
748 out_msg.DataBlk := tbe.DataBlk;
749 out_msg.Dirty := tbe.Dirty;
750 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
751 out_msg.Acks := machineCount(MachineType:L1Cache);
752 out_msg.SilentAcks := in_msg.SilentAcks;
753 out_msg.MessageSize := MessageSizeType:Response_Data;
754 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
755 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
756 }
757 }
758 }
759
760 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
761 peek(forwardToCache_in, RequestMsg) {
762 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
763 out_msg.Address := address;
764 out_msg.Type := CoherenceResponseType:ACK;
765 out_msg.Sender := machineID;
766 out_msg.Destination.add(in_msg.Requestor);
767 out_msg.Acks := 1;
768 out_msg.SilentAcks := in_msg.SilentAcks;
769 assert(in_msg.DirectedProbe == false);
770 out_msg.MessageSize := MessageSizeType:Response_Control;
771 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
772 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
773 }
774 }
775 }
776
777 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
778 peek(forwardToCache_in, RequestMsg) {
779 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
780 out_msg.Address := address;
781 out_msg.Type := CoherenceResponseType:ACK_SHARED;
782 out_msg.Sender := machineID;
783 out_msg.Destination.add(in_msg.Requestor);
784 out_msg.Acks := 1;
785 out_msg.SilentAcks := in_msg.SilentAcks;
786 assert(in_msg.DirectedProbe == false);
787 out_msg.MessageSize := MessageSizeType:Response_Control;
788 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
789 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
790 }
791 }
792 }
793
794 action(g_sendUnblock, "g", desc="Send unblock to memory") {
795 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
796 out_msg.Address := address;
797 out_msg.Type := CoherenceResponseType:UNBLOCK;
798 out_msg.Sender := machineID;
799 out_msg.Destination.add(map_Address_to_Directory(address));
800 out_msg.MessageSize := MessageSizeType:Unblock_Control;
801 }
802 }
803
804 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
805 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
806 out_msg.Address := address;
807 out_msg.Type := CoherenceResponseType:UNBLOCKM;
808 out_msg.Sender := machineID;
809 out_msg.Destination.add(map_Address_to_Directory(address));
810 out_msg.MessageSize := MessageSizeType:Unblock_Control;
811 }
812 }
813
814 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
815 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
816 assert(is_valid(tbe));
817 out_msg.Address := address;
818 out_msg.Type := CoherenceResponseType:UNBLOCKS;
819 out_msg.Sender := machineID;
820 out_msg.CurOwner := tbe.CurOwner;
821 out_msg.Destination.add(map_Address_to_Directory(address));
822 out_msg.MessageSize := MessageSizeType:Unblock_Control;
823 }
824 }
825
826 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
827 assert(is_valid(cache_entry));
828 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
829 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
830 cache_entry.DataBlk);
831 }
832
833 action(hx_external_load_hit, "hx", desc="load required external msgs") {
834 assert(is_valid(cache_entry));
835 assert(is_valid(tbe));
836 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
837 peek(responseToCache_in, ResponseMsg) {
838
839 sequencer.readCallback(address,
840 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
841 cache_entry.DataBlk,
842 tbe.InitialRequestTime,
843 tbe.ForwardRequestTime,
844 tbe.FirstResponseTime);
845 }
846 }
847
848 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
849 assert(is_valid(cache_entry));
850 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
851 peek(mandatoryQueue_in, RubyRequest) {
852 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
853 cache_entry.DataBlk);
854
855 cache_entry.Dirty := true;
856 if (in_msg.Type == RubyRequestType:ATOMIC) {
857 cache_entry.AtomicAccessed := true;
858 }
859 }
860 }
861
862 action(hh_flush_hit, "\hf", desc="Notify sequencer that flush completed.") {
863 assert(is_valid(tbe));
864 DPRINTF(RubySlicc, "%s\n", tbe.DataBlk);
865 sequencer.writeCallback(address, GenericMachineType:L1Cache,tbe.DataBlk);
866 }
867
868 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
869 assert(is_valid(cache_entry));
870 assert(is_valid(tbe));
871 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
872 peek(responseToCache_in, ResponseMsg) {
873
874 sequencer.writeCallback(address,
875 getNondirectHitMachType(address, in_msg.Sender),
876 cache_entry.DataBlk,
877 tbe.InitialRequestTime,
878 tbe.ForwardRequestTime,
879 tbe.FirstResponseTime);
880 }
881 cache_entry.Dirty := true;
882 }
883
884 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
885 assert(is_valid(cache_entry));
886 assert(is_valid(tbe));
887 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
888
889 sequencer.writeCallback(address,
890 getNondirectHitMachType(address, tbe.LastResponder),
891 cache_entry.DataBlk,
892 tbe.InitialRequestTime,
893 tbe.ForwardRequestTime,
894 tbe.FirstResponseTime);
895
896 cache_entry.Dirty := true;
897 }
898
899 action(i_allocateTBE, "i", desc="Allocate TBE") {
900 check_allocate(TBEs);
901 assert(is_valid(cache_entry));
902 TBEs.allocate(address);
903 set_tbe(TBEs[address]);
904 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
905 tbe.Dirty := cache_entry.Dirty;
906 tbe.Sharers := false;
907 }
908
909 action(it_allocateTBE, "it", desc="Allocate TBE") {
910 check_allocate(TBEs);
911 TBEs.allocate(address);
912 set_tbe(TBEs[address]);
913 tbe.Dirty := false;
914 tbe.Sharers := false;
915 }
916
917 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
918 triggerQueue_in.dequeue();
919 }
920
921 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
922 mandatoryQueue_in.dequeue();
923 }
924
925 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
926 forwardToCache_in.dequeue();
927 }
928
929 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
930 assert(is_valid(cache_entry));
931 assert(is_valid(tbe));
932 cache_entry.Dirty := tbe.Dirty;
933 cache_entry.DataBlk := tbe.DataBlk;
934 }
935
936 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
937 assert(is_valid(cache_entry));
938 assert(is_valid(tbe));
939 cache_entry.Dirty := tbe.Dirty;
940 cache_entry.DataBlk := tbe.DataBlk;
941 cache_entry.FromL2 := true;
942 }
943
944 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
945 peek(responseToCache_in, ResponseMsg) {
946 assert(in_msg.Acks >= 0);
947 assert(is_valid(tbe));
948 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
949 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
950 if (tbe.AppliedSilentAcks == false) {
951 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
952 tbe.AppliedSilentAcks := true;
953 }
954 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
955 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
956 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
957 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
958 APPEND_TRANSITION_COMMENT(in_msg.Sender);
959 tbe.LastResponder := in_msg.Sender;
960 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
961 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
962 }
963 if (in_msg.InitialRequestTime != zero_time()) {
964 tbe.InitialRequestTime := in_msg.InitialRequestTime;
965 }
966 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
967 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
968 }
969 if (in_msg.ForwardRequestTime != zero_time()) {
970 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
971 }
972 if (tbe.FirstResponseTime == zero_time()) {
973 tbe.FirstResponseTime := get_time();
974 }
975 }
976 }
977 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
978 peek(responseToCache_in, ResponseMsg) {
979 assert(is_valid(tbe));
980 tbe.CurOwner := in_msg.Sender;
981 }
982 }
983
984 action(n_popResponseQueue, "n", desc="Pop response queue") {
985 responseToCache_in.dequeue();
986 }
987
988 action(ll_L2toL1Transfer, "ll", desc="") {
989 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
990 out_msg.Address := address;
991 out_msg.Type := TriggerType:L2_to_L1;
992 }
993 }
994
995 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
996 assert(is_valid(tbe));
997 if (tbe.NumPendingMsgs == 0) {
998 enqueue(triggerQueue_out, TriggerMsg) {
999 out_msg.Address := address;
1000 if (tbe.Sharers) {
1001 out_msg.Type := TriggerType:ALL_ACKS;
1002 } else {
1003 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
1004 }
1005 }
1006 }
1007 }
1008
1009 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
1010 assert(is_valid(tbe));
1011 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
1012 }
1013
1014 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
1015 assert(is_valid(tbe));
1016 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
1017 }
1018
1019 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
1020 peek(forwardToCache_in, RequestMsg) {
1021 assert(in_msg.Requestor != machineID);
1022 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1023 assert(is_valid(tbe));
1024 out_msg.Address := address;
1025 out_msg.Type := CoherenceResponseType:DATA;
1026 out_msg.Sender := machineID;
1027 out_msg.Destination.add(in_msg.Requestor);
1028 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1029 out_msg.DataBlk := tbe.DataBlk;
1030 out_msg.Dirty := tbe.Dirty;
1031 if (in_msg.DirectedProbe) {
1032 out_msg.Acks := machineCount(MachineType:L1Cache);
1033 } else {
1034 out_msg.Acks := 2;
1035 }
1036 out_msg.SilentAcks := in_msg.SilentAcks;
1037 out_msg.MessageSize := MessageSizeType:Response_Data;
1038 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1039 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1040 }
1041 }
1042 }
1043
1044 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
1045 peek(forwardToCache_in, RequestMsg) {
1046 assert(in_msg.Requestor != machineID);
1047 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1048 assert(is_valid(tbe));
1049 out_msg.Address := address;
1050 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1051 out_msg.Sender := machineID;
1052 out_msg.Destination.add(in_msg.Requestor);
1053 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1054 out_msg.DataBlk := tbe.DataBlk;
1055 out_msg.Dirty := tbe.Dirty;
1056 if (in_msg.DirectedProbe) {
1057 out_msg.Acks := machineCount(MachineType:L1Cache);
1058 } else {
1059 out_msg.Acks := 2;
1060 }
1061 out_msg.SilentAcks := in_msg.SilentAcks;
1062 out_msg.MessageSize := MessageSizeType:Response_Data;
1063 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1064 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1065 }
1066 }
1067 }
1068
1069 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
1070 peek(forwardToCache_in, RequestMsg) {
1071 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
1072 assert(is_valid(tbe));
1073 out_msg.Address := address;
1074 out_msg.Type := CoherenceResponseType:DATA_SHARED;
1075 out_msg.Sender := machineID;
1076 out_msg.Destination := in_msg.MergedRequestors;
1077 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
1078 out_msg.DataBlk := tbe.DataBlk;
1079 out_msg.Dirty := tbe.Dirty;
1080 out_msg.Acks := machineCount(MachineType:L1Cache);
1081 out_msg.SilentAcks := in_msg.SilentAcks;
1082 out_msg.MessageSize := MessageSizeType:Response_Data;
1083 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
1084 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
1085 }
1086 }
1087 }
1088
1089 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
1090 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1091 assert(is_valid(tbe));
1092 out_msg.Address := address;
1093 out_msg.Sender := machineID;
1094 out_msg.Destination.add(map_Address_to_Directory(address));
1095 out_msg.Dirty := tbe.Dirty;
1096 if (tbe.Dirty) {
1097 out_msg.Type := CoherenceResponseType:WB_DIRTY;
1098 out_msg.DataBlk := tbe.DataBlk;
1099 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1100 } else {
1101 out_msg.Type := CoherenceResponseType:WB_CLEAN;
1102 // NOTE: in a real system this would not send data. We send
1103 // data here only so we can check it at the memory
1104 out_msg.DataBlk := tbe.DataBlk;
1105 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1106 }
1107 }
1108 }
1109
1110 action(r_setSharerBit, "r", desc="We saw other sharers") {
1111 assert(is_valid(tbe));
1112 tbe.Sharers := true;
1113 }
1114
1115 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
1116 TBEs.deallocate(address);
1117 unset_tbe();
1118 }
1119
1120 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
1121 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
1122 assert(is_valid(tbe));
1123 out_msg.Address := address;
1124 out_msg.Sender := machineID;
1125 out_msg.Destination.add(map_Address_to_Directory(address));
1126 out_msg.DataBlk := tbe.DataBlk;
1127 out_msg.Dirty := tbe.Dirty;
1128 if (tbe.Dirty) {
1129 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
1130 out_msg.DataBlk := tbe.DataBlk;
1131 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1132 } else {
1133 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
1134 // NOTE: in a real system this would not send data. We send
1135 // data here only so we can check it at the memory
1136 out_msg.DataBlk := tbe.DataBlk;
1137 out_msg.MessageSize := MessageSizeType:Writeback_Control;
1138 }
1139 }
1140 }
1141
1142 action(u_writeDataToCache, "u", desc="Write data to cache") {
1143 peek(responseToCache_in, ResponseMsg) {
1144 assert(is_valid(cache_entry));
1145 cache_entry.DataBlk := in_msg.DataBlk;
1146 cache_entry.Dirty := in_msg.Dirty;
1147 }
1148 }
1149
1150 action(uf_writeDataToCacheTBE, "uf", desc="Write data to TBE") {
1151 peek(responseToCache_in, ResponseMsg) {
1152 assert(is_valid(tbe));
1153 tbe.DataBlk := in_msg.DataBlk;
1154 tbe.Dirty := in_msg.Dirty;
1155 }
1156 }
1157
1158 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
1159 peek(responseToCache_in, ResponseMsg) {
1160 assert(is_valid(cache_entry));
1161 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1162 cache_entry.DataBlk, in_msg.DataBlk);
1163 assert(cache_entry.DataBlk == in_msg.DataBlk);
1164 cache_entry.DataBlk := in_msg.DataBlk;
1165 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
1166 }
1167 }
1168
1169 action(vt_writeDataToTBEVerify, "vt", desc="Write data to TBE, assert it was same as before") {
1170 peek(responseToCache_in, ResponseMsg) {
1171 assert(is_valid(tbe));
1172 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
1173 tbe.DataBlk, in_msg.DataBlk);
1174 assert(tbe.DataBlk == in_msg.DataBlk);
1175 tbe.DataBlk := in_msg.DataBlk;
1176 tbe.Dirty := in_msg.Dirty || tbe.Dirty;
1177 }
1178 }
1179
1180 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
1181 if (L1DcacheMemory.isTagPresent(address)) {
1182 L1DcacheMemory.deallocate(address);
1183 } else {
1184 L1IcacheMemory.deallocate(address);
1185 }
1186 unset_cache_entry();
1187 }
1188
1189 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1190 if (is_invalid(cache_entry)) {
1191 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
1192 }
1193 }
1194
1195 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1196 if (is_invalid(cache_entry)) {
1197 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
1198 }
1199 }
1200
1201 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1202 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
1203 }
1204
1205 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1206 L2cacheMemory.deallocate(address);
1207 unset_cache_entry();
1208 }
1209
1210 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
1211 peek(mandatoryQueue_in, RubyRequest) {
1212 if (L1IcacheMemory.isTagPresent(address)) {
1213 L1IcacheMemory.profileMiss(in_msg);
1214 } else if (L1DcacheMemory.isTagPresent(address)) {
1215 L1DcacheMemory.profileMiss(in_msg);
1216 }
1217 if (L2cacheMemory.isTagPresent(address) == false) {
1218 L2cacheMemory.profileMiss(in_msg);
1219 }
1220 }
1221 }
1222
1223 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1224 stall_and_wait(mandatoryQueue_in, address);
1225 }
1226
1227 action(z_stall, "z", desc="stall") {
1228 // do nothing and the special z_stall action will return a protocol stall
1229 // so that the next port is checked
1230 }
1231
1232 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1233 wakeUpBuffers(address);
1234 }
1235
1236 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1237 wakeUpAllBuffers();
1238 }
1239
1240 //*****************************************************
1241 // TRANSITIONS
1242 //*****************************************************
1243
1244 // Transitions for Load/Store/L2_Replacement from transient states
1245 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1246 zz_stallAndWaitMandatoryQueue;
1247 }
1248
1249 transition({IM, IM_F, MM_WF, SM, SM_F, ISM, ISM_F, OM, OM_F, IS, SS, OI, MI, II}, {Flush_line}) {
1250 zz_stallAndWaitMandatoryQueue;
1251 }
1252
1253 transition({M_W, MM_W}, {L2_Replacement, Flush_line}) {
1254 zz_stallAndWaitMandatoryQueue;
1255 }
1256
1257 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT, MI_F, MM_F, OM_F, IM_F, ISM_F, SM_F, MM_WF}, {Load, Ifetch}) {
1258 zz_stallAndWaitMandatoryQueue;
1259 }
1260
1261 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT, IM_F, SM_F, ISM_F, OM_F, MM_WF, MI_F, MM_F, IR, SR, OR, MR, MMR}, L1_to_L2) {
1262 zz_stallAndWaitMandatoryQueue;
1263 }
1264
1265 transition({MI_F, MM_F}, {Store}) {
1266 zz_stallAndWaitMandatoryQueue;
1267 }
1268
1269 transition({MM_F, MI_F}, {Flush_line}) {
1270 zz_stallAndWaitMandatoryQueue;
1271 }
1272
1273 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate, Flush_line}) {
1274 z_stall;
1275 }
1276
1277 transition({IR, SR, OR, MR, MMR}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1278 z_stall;
1279 }
1280
1281 // Transitions moving data between the L1 and L2 caches
1282 transition({I, S, O, M, MM}, L1_to_L2) {
1283 i_allocateTBE;
1284 gg_deallocateL1CacheBlock;
1285 vv_allocateL2CacheBlock;
1286 hp_copyFromTBEToL2;
1287 s_deallocateTBE;
1288 ka_wakeUpAllDependents;
1289 }
1290
1291 transition(I, Trigger_L2_to_L1D, IT) {
1292 i_allocateTBE;
1293 rr_deallocateL2CacheBlock;
1294 ii_allocateL1DCacheBlock;
1295 nb_copyFromTBEToL1; // Not really needed for state I
1296 s_deallocateTBE;
1297 uu_profileMiss;
1298 zz_stallAndWaitMandatoryQueue;
1299 ll_L2toL1Transfer;
1300 }
1301
1302 transition(S, Trigger_L2_to_L1D, ST) {
1303 i_allocateTBE;
1304 rr_deallocateL2CacheBlock;
1305 ii_allocateL1DCacheBlock;
1306 nb_copyFromTBEToL1;
1307 s_deallocateTBE;
1308 uu_profileMiss;
1309 zz_stallAndWaitMandatoryQueue;
1310 ll_L2toL1Transfer;
1311 }
1312
1313 transition(O, Trigger_L2_to_L1D, OT) {
1314 i_allocateTBE;
1315 rr_deallocateL2CacheBlock;
1316 ii_allocateL1DCacheBlock;
1317 nb_copyFromTBEToL1;
1318 s_deallocateTBE;
1319 uu_profileMiss;
1320 zz_stallAndWaitMandatoryQueue;
1321 ll_L2toL1Transfer;
1322 }
1323
1324 transition(M, Trigger_L2_to_L1D, MT) {
1325 i_allocateTBE;
1326 rr_deallocateL2CacheBlock;
1327 ii_allocateL1DCacheBlock;
1328 nb_copyFromTBEToL1;
1329 s_deallocateTBE;
1330 uu_profileMiss;
1331 zz_stallAndWaitMandatoryQueue;
1332 ll_L2toL1Transfer;
1333 }
1334
1335 transition(MM, Trigger_L2_to_L1D, MMT) {
1336 i_allocateTBE;
1337 rr_deallocateL2CacheBlock;
1338 ii_allocateL1DCacheBlock;
1339 nb_copyFromTBEToL1;
1340 s_deallocateTBE;
1341 uu_profileMiss;
1342 zz_stallAndWaitMandatoryQueue;
1343 ll_L2toL1Transfer;
1344 }
1345
1346 transition(I, Trigger_L2_to_L1I, IT) {
1347 i_allocateTBE;
1348 rr_deallocateL2CacheBlock;
1349 jj_allocateL1ICacheBlock;
1350 nb_copyFromTBEToL1;
1351 s_deallocateTBE;
1352 uu_profileMiss;
1353 zz_stallAndWaitMandatoryQueue;
1354 ll_L2toL1Transfer;
1355 }
1356
1357 transition(S, Trigger_L2_to_L1I, ST) {
1358 i_allocateTBE;
1359 rr_deallocateL2CacheBlock;
1360 jj_allocateL1ICacheBlock;
1361 nb_copyFromTBEToL1;
1362 s_deallocateTBE;
1363 uu_profileMiss;
1364 zz_stallAndWaitMandatoryQueue;
1365 ll_L2toL1Transfer;
1366 }
1367
1368 transition(O, Trigger_L2_to_L1I, OT) {
1369 i_allocateTBE;
1370 rr_deallocateL2CacheBlock;
1371 jj_allocateL1ICacheBlock;
1372 nb_copyFromTBEToL1;
1373 s_deallocateTBE;
1374 uu_profileMiss;
1375 zz_stallAndWaitMandatoryQueue;
1376 ll_L2toL1Transfer;
1377 }
1378
1379 transition(M, Trigger_L2_to_L1I, MT) {
1380 i_allocateTBE;
1381 rr_deallocateL2CacheBlock;
1382 jj_allocateL1ICacheBlock;
1383 nb_copyFromTBEToL1;
1384 s_deallocateTBE;
1385 uu_profileMiss;
1386 zz_stallAndWaitMandatoryQueue;
1387 ll_L2toL1Transfer;
1388 }
1389
1390 transition(MM, Trigger_L2_to_L1I, MMT) {
1391 i_allocateTBE;
1392 rr_deallocateL2CacheBlock;
1393 jj_allocateL1ICacheBlock;
1394 nb_copyFromTBEToL1;
1395 s_deallocateTBE;
1396 uu_profileMiss;
1397 zz_stallAndWaitMandatoryQueue;
1398 ll_L2toL1Transfer;
1399 }
1400
1401 transition(IT, Complete_L2_to_L1, IR) {
1402 j_popTriggerQueue;
1403 kd_wakeUpDependents;
1404 }
1405
1406 transition(ST, Complete_L2_to_L1, SR) {
1407 j_popTriggerQueue;
1408 kd_wakeUpDependents;
1409 }
1410
1411 transition(OT, Complete_L2_to_L1, OR) {
1412 j_popTriggerQueue;
1413 kd_wakeUpDependents;
1414 }
1415
1416 transition(MT, Complete_L2_to_L1, MR) {
1417 j_popTriggerQueue;
1418 kd_wakeUpDependents;
1419 }
1420
1421 transition(MMT, Complete_L2_to_L1, MMR) {
1422 j_popTriggerQueue;
1423 kd_wakeUpDependents;
1424 }
1425
1426 // Transitions from Idle
1427 transition({I, IR}, Load, IS) {
1428 ii_allocateL1DCacheBlock;
1429 i_allocateTBE;
1430 a_issueGETS;
1431 uu_profileMiss;
1432 k_popMandatoryQueue;
1433 }
1434
1435 transition({I, IR}, Ifetch, IS) {
1436 jj_allocateL1ICacheBlock;
1437 i_allocateTBE;
1438 a_issueGETS;
1439 uu_profileMiss;
1440 k_popMandatoryQueue;
1441 }
1442
1443 transition({I, IR}, Store, IM) {
1444 ii_allocateL1DCacheBlock;
1445 i_allocateTBE;
1446 b_issueGETX;
1447 uu_profileMiss;
1448 k_popMandatoryQueue;
1449 }
1450
1451 transition({I, IR}, Flush_line, IM_F) {
1452 it_allocateTBE;
1453 bf_issueGETF;
1454 uu_profileMiss;
1455 k_popMandatoryQueue;
1456 }
1457
1458 transition(I, L2_Replacement) {
1459 rr_deallocateL2CacheBlock;
1460 ka_wakeUpAllDependents;
1461 }
1462
1463 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1464 f_sendAck;
1465 l_popForwardQueue;
1466 }
1467
1468 // Transitions from Shared
1469 transition({S, SM, ISM}, {Load, Ifetch}) {
1470 h_load_hit;
1471 k_popMandatoryQueue;
1472 }
1473
1474 transition(SR, {Load, Ifetch}, S) {
1475 h_load_hit;
1476 k_popMandatoryQueue;
1477 }
1478
1479 transition({S, SR}, Store, SM) {
1480 i_allocateTBE;
1481 b_issueGETX;
1482 uu_profileMiss;
1483 k_popMandatoryQueue;
1484 }
1485
1486 transition({S, SR}, Flush_line, SM_F) {
1487 i_allocateTBE;
1488 bf_issueGETF;
1489 uu_profileMiss;
1490 gg_deallocateL1CacheBlock;
1491 k_popMandatoryQueue;
1492 }
1493
1494 transition(S, L2_Replacement, I) {
1495 rr_deallocateL2CacheBlock;
1496 ka_wakeUpAllDependents;
1497 }
1498
1499 transition(S, {Other_GETX, Invalidate}, I) {
1500 f_sendAck;
1501 l_popForwardQueue;
1502 }
1503
1504 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1505 ff_sendAckShared;
1506 l_popForwardQueue;
1507 }
1508
1509 // Transitions from Owned
1510 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1511 h_load_hit;
1512 k_popMandatoryQueue;
1513 }
1514
1515 transition(OR, {Load, Ifetch}, O) {
1516 h_load_hit;
1517 k_popMandatoryQueue;
1518 }
1519
1520 transition({O, OR}, Store, OM) {
1521 i_allocateTBE;
1522 b_issueGETX;
1523 p_decrementNumberOfMessagesByOne;
1524 uu_profileMiss;
1525 k_popMandatoryQueue;
1526 }
1527 transition({O, OR}, Flush_line, OM_F) {
1528 i_allocateTBE;
1529 bf_issueGETF;
1530 p_decrementNumberOfMessagesByOne;
1531 uu_profileMiss;
1532 gg_deallocateL1CacheBlock;
1533 k_popMandatoryQueue;
1534 }
1535
1536 transition(O, L2_Replacement, OI) {
1537 i_allocateTBE;
1538 d_issuePUT;
1539 rr_deallocateL2CacheBlock;
1540 ka_wakeUpAllDependents;
1541 }
1542
1543 transition(O, {Other_GETX, Invalidate}, I) {
1544 e_sendData;
1545 l_popForwardQueue;
1546 }
1547
1548 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1549 ee_sendDataShared;
1550 l_popForwardQueue;
1551 }
1552
1553 transition(O, Merged_GETS) {
1554 em_sendDataSharedMultiple;
1555 l_popForwardQueue;
1556 }
1557
1558 // Transitions from Modified
1559 transition({MM, MMR}, {Load, Ifetch}, MM) {
1560 h_load_hit;
1561 k_popMandatoryQueue;
1562 }
1563
1564 transition({MM, MMR}, Store, MM) {
1565 hh_store_hit;
1566 k_popMandatoryQueue;
1567 }
1568
1569 transition({MM, M, MMR}, Flush_line, MM_F) {
1570 i_allocateTBE;
1571 bf_issueGETF;
1572 p_decrementNumberOfMessagesByOne;
1573 gg_deallocateL1CacheBlock;
1574 k_popMandatoryQueue;
1575 }
1576
1577 transition(MM_F, Block_Ack, MI_F) {
1578 df_issuePUTF;
1579 l_popForwardQueue;
1580 kd_wakeUpDependents;
1581 }
1582
1583 transition(MM, L2_Replacement, MI) {
1584 i_allocateTBE;
1585 d_issuePUT;
1586 rr_deallocateL2CacheBlock;
1587 ka_wakeUpAllDependents;
1588 }
1589
1590 transition(MM, {Other_GETX, Invalidate}, I) {
1591 c_sendExclusiveData;
1592 l_popForwardQueue;
1593 }
1594
1595 transition(MM, Other_GETS, I) {
1596 c_sendExclusiveData;
1597 l_popForwardQueue;
1598 }
1599
1600 transition(MM, NC_DMA_GETS, O) {
1601 ee_sendDataShared;
1602 l_popForwardQueue;
1603 }
1604
1605 transition(MM, Other_GETS_No_Mig, O) {
1606 ee_sendDataShared;
1607 l_popForwardQueue;
1608 }
1609
1610 transition(MM, Merged_GETS, O) {
1611 em_sendDataSharedMultiple;
1612 l_popForwardQueue;
1613 }
1614
1615 // Transitions from Dirty Exclusive
1616 transition({M, MR}, {Load, Ifetch}, M) {
1617 h_load_hit;
1618 k_popMandatoryQueue;
1619 }
1620
1621 transition({M, MR}, Store, MM) {
1622 hh_store_hit;
1623 k_popMandatoryQueue;
1624 }
1625
1626 transition(M, L2_Replacement, MI) {
1627 i_allocateTBE;
1628 d_issuePUT;
1629 rr_deallocateL2CacheBlock;
1630 ka_wakeUpAllDependents;
1631 }
1632
1633 transition(M, {Other_GETX, Invalidate}, I) {
1634 c_sendExclusiveData;
1635 l_popForwardQueue;
1636 }
1637
1638 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1639 ee_sendDataShared;
1640 l_popForwardQueue;
1641 }
1642
1643 transition(M, NC_DMA_GETS, O) {
1644 ee_sendDataShared;
1645 l_popForwardQueue;
1646 }
1647
1648 transition(M, Merged_GETS, O) {
1649 em_sendDataSharedMultiple;
1650 l_popForwardQueue;
1651 }
1652
1653 // Transitions from IM
1654
1655 transition({IM, IM_F}, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1656 f_sendAck;
1657 l_popForwardQueue;
1658 }
1659
1660 transition({IM, IM_F, MM_F}, Ack) {
1661 m_decrementNumberOfMessages;
1662 o_checkForCompletion;
1663 n_popResponseQueue;
1664 }
1665
1666 transition(IM, Data, ISM) {
1667 u_writeDataToCache;
1668 m_decrementNumberOfMessages;
1669 o_checkForCompletion;
1670 n_popResponseQueue;
1671 }
1672
1673 transition(IM_F, Data, ISM_F) {
1674 uf_writeDataToCacheTBE;
1675 m_decrementNumberOfMessages;
1676 o_checkForCompletion;
1677 n_popResponseQueue;
1678 }
1679
1680 transition(IM, Exclusive_Data, MM_W) {
1681 u_writeDataToCache;
1682 m_decrementNumberOfMessages;
1683 o_checkForCompletion;
1684 sx_external_store_hit;
1685 n_popResponseQueue;
1686 kd_wakeUpDependents;
1687 }
1688
1689 transition(IM_F, Exclusive_Data, MM_WF) {
1690 uf_writeDataToCacheTBE;
1691 m_decrementNumberOfMessages;
1692 o_checkForCompletion;
1693 n_popResponseQueue;
1694 }
1695
1696 // Transitions from SM
1697 transition({SM, SM_F}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1698 ff_sendAckShared;
1699 l_popForwardQueue;
1700 }
1701
1702 transition(SM, {Other_GETX, Invalidate}, IM) {
1703 f_sendAck;
1704 l_popForwardQueue;
1705 }
1706
1707 transition(SM_F, {Other_GETX, Invalidate}, IM_F) {
1708 f_sendAck;
1709 l_popForwardQueue;
1710 }
1711
1712 transition({SM, SM_F}, Ack) {
1713 m_decrementNumberOfMessages;
1714 o_checkForCompletion;
1715 n_popResponseQueue;
1716 }
1717
1718 transition(SM, {Data, Exclusive_Data}, ISM) {
1719 v_writeDataToCacheVerify;
1720 m_decrementNumberOfMessages;
1721 o_checkForCompletion;
1722 n_popResponseQueue;
1723 }
1724
1725 transition(SM_F, {Data, Exclusive_Data}, ISM_F) {
1726 vt_writeDataToTBEVerify;
1727 m_decrementNumberOfMessages;
1728 o_checkForCompletion;
1729 n_popResponseQueue;
1730 }
1731
1732 // Transitions from ISM
1733 transition({ISM, ISM_F}, Ack) {
1734 m_decrementNumberOfMessages;
1735 o_checkForCompletion;
1736 n_popResponseQueue;
1737 }
1738
1739 transition(ISM, All_acks_no_sharers, MM) {
1740 sxt_trig_ext_store_hit;
1741 gm_sendUnblockM;
1742 s_deallocateTBE;
1743 j_popTriggerQueue;
1744 kd_wakeUpDependents;
1745 }
1746
1747 transition(ISM_F, All_acks_no_sharers, MI_F) {
1748 df_issuePUTF;
1749 j_popTriggerQueue;
1750 kd_wakeUpDependents;
1751 }
1752
1753 // Transitions from OM
1754
1755 transition(OM, {Other_GETX, Invalidate}, IM) {
1756 e_sendData;
1757 pp_incrementNumberOfMessagesByOne;
1758 l_popForwardQueue;
1759 }
1760
1761 transition(OM_F, {Other_GETX, Invalidate}, IM_F) {
1762 q_sendDataFromTBEToCache;
1763 pp_incrementNumberOfMessagesByOne;
1764 l_popForwardQueue;
1765 }
1766
1767 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1768 ee_sendDataShared;
1769 l_popForwardQueue;
1770 }
1771
1772 transition(OM, Merged_GETS) {
1773 em_sendDataSharedMultiple;
1774 l_popForwardQueue;
1775 }
1776
1777 transition(OM_F, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1778 et_sendDataSharedFromTBE;
1779 l_popForwardQueue;
1780 }
1781
1782 transition(OM_F, Merged_GETS) {
1783 emt_sendDataSharedMultipleFromTBE;
1784 l_popForwardQueue;
1785 }
1786
1787 transition({OM, OM_F}, Ack) {
1788 m_decrementNumberOfMessages;
1789 o_checkForCompletion;
1790 n_popResponseQueue;
1791 }
1792
1793 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1794 sxt_trig_ext_store_hit;
1795 gm_sendUnblockM;
1796 s_deallocateTBE;
1797 j_popTriggerQueue;
1798 kd_wakeUpDependents;
1799 }
1800
1801 transition({MM_F, OM_F}, {All_acks, All_acks_no_sharers}, MI_F) {
1802 df_issuePUTF;
1803 j_popTriggerQueue;
1804 kd_wakeUpDependents;
1805 }
1806 // Transitions from IS
1807
1808 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1809 f_sendAck;
1810 l_popForwardQueue;
1811 }
1812
1813 transition(IS, Ack) {
1814 m_decrementNumberOfMessages;
1815 o_checkForCompletion;
1816 n_popResponseQueue;
1817 }
1818
1819 transition(IS, Shared_Ack) {
1820 m_decrementNumberOfMessages;
1821 r_setSharerBit;
1822 o_checkForCompletion;
1823 n_popResponseQueue;
1824 }
1825
1826 transition(IS, Data, SS) {
1827 u_writeDataToCache;
1828 m_decrementNumberOfMessages;
1829 o_checkForCompletion;
1830 hx_external_load_hit;
1831 uo_updateCurrentOwner;
1832 n_popResponseQueue;
1833 kd_wakeUpDependents;
1834 }
1835
1836 transition(IS, Exclusive_Data, M_W) {
1837 u_writeDataToCache;
1838 m_decrementNumberOfMessages;
1839 o_checkForCompletion;
1840 hx_external_load_hit;
1841 n_popResponseQueue;
1842 kd_wakeUpDependents;
1843 }
1844
1845 transition(IS, Shared_Data, SS) {
1846 u_writeDataToCache;
1847 r_setSharerBit;
1848 m_decrementNumberOfMessages;
1849 o_checkForCompletion;
1850 hx_external_load_hit;
1851 uo_updateCurrentOwner;
1852 n_popResponseQueue;
1853 kd_wakeUpDependents;
1854 }
1855
1856 // Transitions from SS
1857
1858 transition(SS, Ack) {
1859 m_decrementNumberOfMessages;
1860 o_checkForCompletion;
1861 n_popResponseQueue;
1862 }
1863
1864 transition(SS, Shared_Ack) {
1865 m_decrementNumberOfMessages;
1866 r_setSharerBit;
1867 o_checkForCompletion;
1868 n_popResponseQueue;
1869 }
1870
1871 transition(SS, All_acks, S) {
1872 gs_sendUnblockS;
1873 s_deallocateTBE;
1874 j_popTriggerQueue;
1875 kd_wakeUpDependents;
1876 }
1877
1878 transition(SS, All_acks_no_sharers, S) {
1879 // Note: The directory might still be the owner, so that is why we go to S
1880 gs_sendUnblockS;
1881 s_deallocateTBE;
1882 j_popTriggerQueue;
1883 kd_wakeUpDependents;
1884 }
1885
1886 // Transitions from MM_W
1887
1888 transition(MM_W, Store) {
1889 hh_store_hit;
1890 k_popMandatoryQueue;
1891 }
1892
1893 transition({MM_W, MM_WF}, Ack) {
1894 m_decrementNumberOfMessages;
1895 o_checkForCompletion;
1896 n_popResponseQueue;
1897 }
1898
1899 transition(MM_W, All_acks_no_sharers, MM) {
1900 gm_sendUnblockM;
1901 s_deallocateTBE;
1902 j_popTriggerQueue;
1903 kd_wakeUpDependents;
1904 }
1905
1906 transition(MM_WF, All_acks_no_sharers, MI_F) {
1907 df_issuePUTF;
1908 j_popTriggerQueue;
1909 kd_wakeUpDependents;
1910 }
1911 // Transitions from M_W
1912
1913 transition(M_W, Store, MM_W) {
1914 hh_store_hit;
1915 k_popMandatoryQueue;
1916 }
1917
1918 transition(M_W, Ack) {
1919 m_decrementNumberOfMessages;
1920 o_checkForCompletion;
1921 n_popResponseQueue;
1922 }
1923
1924 transition(M_W, All_acks_no_sharers, M) {
1925 gm_sendUnblockM;
1926 s_deallocateTBE;
1927 j_popTriggerQueue;
1928 kd_wakeUpDependents;
1929 }
1930
1931 // Transitions from OI/MI
1932
1933 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1934 q_sendDataFromTBEToCache;
1935 l_popForwardQueue;
1936 }
1937
1938 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1939 sq_sendSharedDataFromTBEToCache;
1940 l_popForwardQueue;
1941 }
1942
1943 transition({OI, MI}, Merged_GETS, OI) {
1944 qm_sendDataFromTBEToCache;
1945 l_popForwardQueue;
1946 }
1947
1948 transition(MI, Writeback_Ack, I) {
1949 t_sendExclusiveDataFromTBEToMemory;
1950 s_deallocateTBE;
1951 l_popForwardQueue;
1952 kd_wakeUpDependents;
1953 }
1954
1955 transition(MI_F, Writeback_Ack, I) {
1956 hh_flush_hit;
1957 t_sendExclusiveDataFromTBEToMemory;
1958 s_deallocateTBE;
1959 l_popForwardQueue;
1960 kd_wakeUpDependents;
1961 }
1962
1963 transition(OI, Writeback_Ack, I) {
1964 qq_sendDataFromTBEToMemory;
1965 s_deallocateTBE;
1966 l_popForwardQueue;
1967 kd_wakeUpDependents;
1968 }
1969
1970 // Transitions from II
1971 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1972 f_sendAck;
1973 l_popForwardQueue;
1974 }
1975
1976 transition(II, Writeback_Ack, I) {
1977 g_sendUnblock;
1978 s_deallocateTBE;
1979 l_popForwardQueue;
1980 kd_wakeUpDependents;
1981 }
1982
1983 transition(II, Writeback_Nack, I) {
1984 s_deallocateTBE;
1985 l_popForwardQueue;
1986 kd_wakeUpDependents;
1987 }
1988
1989 transition(MM_F, {Other_GETX, Invalidate}, IM_F) {
1990 ct_sendExclusiveDataFromTBE;
1991 pp_incrementNumberOfMessagesByOne;
1992 l_popForwardQueue;
1993 }
1994
1995 transition(MM_F, Other_GETS, IM_F) {
1996 ct_sendExclusiveDataFromTBE;
1997 pp_incrementNumberOfMessagesByOne;
1998 l_popForwardQueue;
1999 }
2000
2001 transition(MM_F, NC_DMA_GETS, OM_F) {
2002 sq_sendSharedDataFromTBEToCache;
2003 l_popForwardQueue;
2004 }
2005
2006 transition(MM_F, Other_GETS_No_Mig, OM_F) {
2007 et_sendDataSharedFromTBE;
2008 l_popForwardQueue;
2009 }
2010
2011 transition(MM_F, Merged_GETS, OM_F) {
2012 emt_sendDataSharedMultipleFromTBE;
2013 l_popForwardQueue;
2014 }
2015 }