ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
45 {
46
47 // NETWORK BUFFERS
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
51
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
54
55
56 // STATES
57 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 I, desc="Idle";
60 S, desc="Shared";
61 O, desc="Owned";
62 M, desc="Modified (dirty)";
63 MM, desc="Modified (dirty and locally modified)";
64
65 // Transient States
66 IM, "IM", desc="Issued GetX";
67 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
68 OM, "OM", desc="Issued GetX, received data";
69 ISM, "ISM", desc="Issued GetX, received data, waiting for all acks";
70 M_W, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, "IS", desc="Issued GetS";
73 SS, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, "OI", desc="Issued PutO, waiting for ack";
75 MI, "MI", desc="Issued PutX, waiting for ack";
76 II, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, "IT", desc="Invalid block transferring to L1";
78 ST, "ST", desc="S block transferring to L1";
79 OT, "OT", desc="O block transferring to L1";
80 MT, "MT", desc="M block transferring to L1";
81 MMT, "MMT", desc="MM block transferring to L1";
82 }
83
84 // EVENTS
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
94
95 // Requests
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 NC_DMA_GETS, desc="special GetS when only DMA exists";
101 Invalidate, desc="Invalidate block";
102
103 // Responses
104 Ack, desc="Received an ack message";
105 Shared_Ack, desc="Received an ack message, responder has a shared copy";
106 Data, desc="Received a data message";
107 Shared_Data, desc="Received a data message, responder has a shared copy";
108 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
109
110 Writeback_Ack, desc="Writeback O.K. from directory";
111 Writeback_Nack, desc="Writeback not O.K. from directory";
112
113 // Triggers
114 All_acks, desc="Received all required data and message acks";
115 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
116 }
117
118 // TYPES
119
120 // STRUCTURE DEFINITIONS
121
122 MessageBuffer mandatoryQueue, ordered="false";
123
124 // CacheEntry
125 structure(Entry, desc="...", interface="AbstractCacheEntry") {
126 State CacheState, desc="cache state";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 DataBlock DataBlk, desc="data for the block";
129 bool FromL2, default="false", desc="block just moved from L2";
130 bool AtomicAccessed, default="false", desc="block just moved from L2";
131 }
132
133 // TBE fields
134 structure(TBE, desc="...") {
135 State TBEState, desc="Transient state";
136 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
137 bool Dirty, desc="Is the data dirty (different than memory)?";
138 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
139 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
140 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
141 MachineID LastResponder, desc="last machine to send a response for this request";
142 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
143 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
144 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
145 Time FirstResponseTime, default="0", desc="the time the first response was received";
146 }
147
148 external_type(TBETable) {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 TBETable TBEs, template_hack="<L1Cache_TBE>";
156
157 void set_cache_entry(AbstractCacheEntry b);
158 void unset_cache_entry();
159 void set_tbe(TBE b);
160 void unset_tbe();
161
162 Entry getCacheEntry(Address address), return_by_pointer="yes" {
163 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
164 if(is_valid(L2cache_entry)) {
165 return L2cache_entry;
166 }
167
168 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
169 if(is_valid(L1Dcache_entry)) {
170 return L1Dcache_entry;
171 }
172
173 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
174 return L1Icache_entry;
175 }
176
177 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
178 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
179 return L2cache_entry;
180 }
181
182 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
183 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
184 return L1Dcache_entry;
185 }
186
187 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
188 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
189 return L1Icache_entry;
190 }
191
192 State getState(TBE tbe, Entry cache_entry, Address addr) {
193 if(is_valid(tbe)) {
194 return tbe.TBEState;
195 } else if (is_valid(cache_entry)) {
196 return cache_entry.CacheState;
197 }
198 return State:I;
199 }
200
201 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
202 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
203 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
204 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
205
206 if (is_valid(tbe)) {
207 tbe.TBEState := state;
208 }
209
210 if (is_valid(cache_entry)) {
211 cache_entry.CacheState := state;
212
213 // Set permission
214 if ((state == State:MM) ||
215 (state == State:MM_W)) {
216 cache_entry.changePermission(AccessPermission:Read_Write);
217 } else if (state == State:S ||
218 state == State:O ||
219 state == State:M ||
220 state == State:M_W ||
221 state == State:SM ||
222 state == State:ISM ||
223 state == State:OM ||
224 state == State:SS) {
225 cache_entry.changePermission(AccessPermission:Read_Only);
226 } else {
227 cache_entry.changePermission(AccessPermission:Invalid);
228 }
229 }
230 }
231
232 Event mandatory_request_type_to_event(CacheRequestType type) {
233 if (type == CacheRequestType:LD) {
234 return Event:Load;
235 } else if (type == CacheRequestType:IFETCH) {
236 return Event:Ifetch;
237 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
238 return Event:Store;
239 } else {
240 error("Invalid CacheRequestType");
241 }
242 }
243
244 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
245 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
246 //
247 // NOTE direct local hits should not call this
248 //
249 return GenericMachineType:L1Cache_wCC;
250 } else {
251 return ConvertMachToGenericMach(machineIDToMachineType(sender));
252 }
253 }
254
255 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
256 if (is_valid(cache_entry) && cache_entry.FromL2) {
257 cache_entry.FromL2 := false;
258 return GenericMachineType:L2Cache;
259 } else {
260 return GenericMachineType:L1Cache;
261 }
262 }
263
264 bool IsAtomicAccessed(Entry cache_entry) {
265 assert(is_valid(cache_entry));
266 return cache_entry.AtomicAccessed;
267 }
268
269 MessageBuffer triggerQueue, ordered="false";
270
271 // ** OUT_PORTS **
272
273 out_port(requestNetwork_out, RequestMsg, requestFromCache);
274 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
275 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
276 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
277
278 // ** IN_PORTS **
279
280 // Trigger Queue
281 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
282 if (triggerQueue_in.isReady()) {
283 peek(triggerQueue_in, TriggerMsg) {
284
285 Entry cache_entry := getCacheEntry(in_msg.Address);
286 TBE tbe := TBEs[in_msg.Address];
287
288 if (in_msg.Type == TriggerType:L2_to_L1) {
289 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
290 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
291 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
292 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
293 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
294 } else {
295 error("Unexpected message");
296 }
297 }
298 }
299 }
300
301 // Nothing from the unblock network
302
303 // Response Network
304 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
305 if (responseToCache_in.isReady()) {
306 peek(responseToCache_in, ResponseMsg, block_on="Address") {
307
308 Entry cache_entry := getCacheEntry(in_msg.Address);
309 TBE tbe := TBEs[in_msg.Address];
310
311 if (in_msg.Type == CoherenceResponseType:ACK) {
312 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
313 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
314 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
315 } else if (in_msg.Type == CoherenceResponseType:DATA) {
316 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
317 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
318 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
319 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
320 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
321 } else {
322 error("Unexpected message");
323 }
324 }
325 }
326 }
327
328 // Forward Network
329 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
330 if (forwardToCache_in.isReady()) {
331 peek(forwardToCache_in, RequestMsg, block_on="Address") {
332
333 Entry cache_entry := getCacheEntry(in_msg.Address);
334 TBE tbe := TBEs[in_msg.Address];
335
336 if (in_msg.Type == CoherenceRequestType:GETX) {
337 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
338 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
339 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
340 } else if (in_msg.Type == CoherenceRequestType:GETS) {
341 if (machineCount(MachineType:L1Cache) > 1) {
342 if (is_valid(cache_entry)) {
343 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
344 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
345 } else {
346 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
347 }
348 } else {
349 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
350 }
351 } else {
352 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
353 }
354 } else if (in_msg.Type == CoherenceRequestType:INV) {
355 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
356 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
357 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
358 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
359 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
360 } else {
361 error("Unexpected message");
362 }
363 }
364 }
365 }
366
367 // Nothing from the request network
368
369 // Mandatory Queue
370 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
371 if (mandatoryQueue_in.isReady()) {
372 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
373
374 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
375 TBE tbe := TBEs[in_msg.LineAddress];
376
377 if (in_msg.Type == CacheRequestType:IFETCH) {
378 // ** INSTRUCTION ACCESS ***
379
380 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
381 if (is_valid(L1Icache_entry)) {
382 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
383 trigger(mandatory_request_type_to_event(in_msg.Type),
384 in_msg.LineAddress, L1Icache_entry, tbe);
385 } else {
386 // Check to see if it is in the OTHER L1
387 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
388 if (is_valid(L1Dcache_entry)) {
389 // The block is in the wrong L1, try to write it to the L2
390 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
391 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
392 } else {
393 trigger(Event:L2_Replacement,
394 L2cacheMemory.cacheProbe(in_msg.LineAddress),
395 getL2CacheEntry(L2cacheMemory.cacheProbe(in_msg.LineAddress)),
396 TBEs[L2cacheMemory.cacheProbe(in_msg.LineAddress)]);
397 }
398 }
399
400 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
401 // L1 does't have the line, but we have space for it in the L1
402
403 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
404 if (is_valid(L2cache_entry)) {
405 // L2 has it (maybe not with the right permissions)
406 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
407 L2cache_entry, tbe);
408 } else {
409 // We have room, the L2 doesn't have it, so the L1 fetches the line
410 trigger(mandatory_request_type_to_event(in_msg.Type),
411 in_msg.LineAddress, L1Icache_entry, tbe);
412 }
413 } else {
414 // No room in the L1, so we need to make room
415 if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.LineAddress))) {
416 // The L2 has room, so we move the line from the L1 to the L2
417 trigger(Event:L1_to_L2,
418 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
419 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
420 TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
421 } else {
422 // The L2 does not have room, so we replace a line from the L2
423 trigger(Event:L2_Replacement,
424 L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
425 getL2CacheEntry(L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress))),
426 TBEs[L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.LineAddress))]);
427 }
428 }
429 }
430 } else {
431 // *** DATA ACCESS ***
432
433 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
434 if (is_valid(L1Dcache_entry)) {
435 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
436 trigger(mandatory_request_type_to_event(in_msg.Type),
437 in_msg.LineAddress, L1Dcache_entry, tbe);
438 } else {
439
440 // Check to see if it is in the OTHER L1
441 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
442 if (is_valid(L1Icache_entry)) {
443 // The block is in the wrong L1, try to write it to the L2
444 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
445 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
446 } else {
447 trigger(Event:L2_Replacement,
448 L2cacheMemory.cacheProbe(in_msg.LineAddress),
449 getL2CacheEntry(L2cacheMemory.cacheProbe(in_msg.LineAddress)),
450 TBEs[L2cacheMemory.cacheProbe(in_msg.LineAddress)]);
451 }
452 }
453
454 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
455 // L1 does't have the line, but we have space for it in the L1
456 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
457 if (is_valid(L2cache_entry)) {
458 // L2 has it (maybe not with the right permissions)
459 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
460 L2cache_entry, tbe);
461 } else {
462 // We have room, the L2 doesn't have it, so the L1 fetches the line
463 trigger(mandatory_request_type_to_event(in_msg.Type),
464 in_msg.LineAddress, L1Dcache_entry, tbe);
465 }
466 } else {
467 // No room in the L1, so we need to make room
468 if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.LineAddress))) {
469 // The L2 has room, so we move the line from the L1 to the L2
470 trigger(Event:L1_to_L2,
471 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
472 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
473 TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
474 } else {
475 // The L2 does not have room, so we replace a line from the L2
476 trigger(Event:L2_Replacement,
477 L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
478 getL2CacheEntry(L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress))),
479 TBEs[L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.LineAddress))]);
480 }
481 }
482 }
483 }
484 }
485 }
486 }
487
488 // ACTIONS
489
490 action(a_issueGETS, "a", desc="Issue GETS") {
491 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
492 assert(is_valid(tbe));
493 out_msg.Address := address;
494 out_msg.Type := CoherenceRequestType:GETS;
495 out_msg.Requestor := machineID;
496 out_msg.Destination.add(map_Address_to_Directory(address));
497 out_msg.MessageSize := MessageSizeType:Request_Control;
498 out_msg.InitialRequestTime := get_time();
499 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
500 }
501 }
502
503 action(b_issueGETX, "b", desc="Issue GETX") {
504 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
505 assert(is_valid(tbe));
506 out_msg.Address := address;
507 out_msg.Type := CoherenceRequestType:GETX;
508 out_msg.Requestor := machineID;
509 out_msg.Destination.add(map_Address_to_Directory(address));
510 out_msg.MessageSize := MessageSizeType:Request_Control;
511 out_msg.InitialRequestTime := get_time();
512 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
513 }
514 }
515
516 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
517 peek(forwardToCache_in, RequestMsg) {
518 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
519 assert(is_valid(cache_entry));
520 out_msg.Address := address;
521 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
522 out_msg.Sender := machineID;
523 out_msg.Destination.add(in_msg.Requestor);
524 out_msg.DataBlk := cache_entry.DataBlk;
525 out_msg.Dirty := cache_entry.Dirty;
526 if (in_msg.DirectedProbe) {
527 out_msg.Acks := machineCount(MachineType:L1Cache);
528 } else {
529 out_msg.Acks := 2;
530 }
531 out_msg.SilentAcks := in_msg.SilentAcks;
532 out_msg.MessageSize := MessageSizeType:Response_Data;
533 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
534 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
535 }
536 }
537 }
538
539 action(d_issuePUT, "d", desc="Issue PUT") {
540 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
541 out_msg.Address := address;
542 out_msg.Type := CoherenceRequestType:PUT;
543 out_msg.Requestor := machineID;
544 out_msg.Destination.add(map_Address_to_Directory(address));
545 out_msg.MessageSize := MessageSizeType:Writeback_Control;
546 }
547 }
548
549 action(e_sendData, "e", desc="Send data from cache to requestor") {
550 peek(forwardToCache_in, RequestMsg) {
551 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
552 assert(is_valid(cache_entry));
553 out_msg.Address := address;
554 out_msg.Type := CoherenceResponseType:DATA;
555 out_msg.Sender := machineID;
556 out_msg.Destination.add(in_msg.Requestor);
557 out_msg.DataBlk := cache_entry.DataBlk;
558 out_msg.Dirty := cache_entry.Dirty;
559 if (in_msg.DirectedProbe) {
560 out_msg.Acks := machineCount(MachineType:L1Cache);
561 } else {
562 out_msg.Acks := 2;
563 }
564 out_msg.SilentAcks := in_msg.SilentAcks;
565 out_msg.MessageSize := MessageSizeType:Response_Data;
566 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
567 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
568 }
569 }
570 }
571
572 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
573 peek(forwardToCache_in, RequestMsg) {
574 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
575 assert(is_valid(cache_entry));
576 out_msg.Address := address;
577 out_msg.Type := CoherenceResponseType:DATA_SHARED;
578 out_msg.Sender := machineID;
579 out_msg.Destination.add(in_msg.Requestor);
580 out_msg.DataBlk := cache_entry.DataBlk;
581 out_msg.Dirty := cache_entry.Dirty;
582 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
583 if (in_msg.DirectedProbe) {
584 out_msg.Acks := machineCount(MachineType:L1Cache);
585 } else {
586 out_msg.Acks := 2;
587 }
588 out_msg.SilentAcks := in_msg.SilentAcks;
589 out_msg.MessageSize := MessageSizeType:Response_Data;
590 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
591 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
592 }
593 }
594 }
595
596 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
597 peek(forwardToCache_in, RequestMsg) {
598 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
599 assert(is_valid(cache_entry));
600 out_msg.Address := address;
601 out_msg.Type := CoherenceResponseType:DATA_SHARED;
602 out_msg.Sender := machineID;
603 out_msg.Destination := in_msg.MergedRequestors;
604 out_msg.DataBlk := cache_entry.DataBlk;
605 out_msg.Dirty := cache_entry.Dirty;
606 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
607 out_msg.Acks := machineCount(MachineType:L1Cache);
608 out_msg.SilentAcks := in_msg.SilentAcks;
609 out_msg.MessageSize := MessageSizeType:Response_Data;
610 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
611 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
612 }
613 }
614 }
615
616 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
617 peek(forwardToCache_in, RequestMsg) {
618 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
619 out_msg.Address := address;
620 out_msg.Type := CoherenceResponseType:ACK;
621 out_msg.Sender := machineID;
622 out_msg.Destination.add(in_msg.Requestor);
623 out_msg.Acks := 1;
624 out_msg.SilentAcks := in_msg.SilentAcks;
625 assert(in_msg.DirectedProbe == false);
626 out_msg.MessageSize := MessageSizeType:Response_Control;
627 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
628 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
629 }
630 }
631 }
632
633 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
634 peek(forwardToCache_in, RequestMsg) {
635 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
636 out_msg.Address := address;
637 out_msg.Type := CoherenceResponseType:ACK_SHARED;
638 out_msg.Sender := machineID;
639 out_msg.Destination.add(in_msg.Requestor);
640 out_msg.Acks := 1;
641 out_msg.SilentAcks := in_msg.SilentAcks;
642 assert(in_msg.DirectedProbe == false);
643 out_msg.MessageSize := MessageSizeType:Response_Control;
644 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
645 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
646 }
647 }
648 }
649
650 action(g_sendUnblock, "g", desc="Send unblock to memory") {
651 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
652 out_msg.Address := address;
653 out_msg.Type := CoherenceResponseType:UNBLOCK;
654 out_msg.Sender := machineID;
655 out_msg.Destination.add(map_Address_to_Directory(address));
656 out_msg.MessageSize := MessageSizeType:Unblock_Control;
657 }
658 }
659
660 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
661 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
662 out_msg.Address := address;
663 out_msg.Type := CoherenceResponseType:UNBLOCKM;
664 out_msg.Sender := machineID;
665 out_msg.Destination.add(map_Address_to_Directory(address));
666 out_msg.MessageSize := MessageSizeType:Unblock_Control;
667 }
668 }
669
670 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
671 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
672 assert(is_valid(tbe));
673 out_msg.Address := address;
674 out_msg.Type := CoherenceResponseType:UNBLOCKS;
675 out_msg.Sender := machineID;
676 out_msg.CurOwner := tbe.CurOwner;
677 out_msg.Destination.add(map_Address_to_Directory(address));
678 out_msg.MessageSize := MessageSizeType:Unblock_Control;
679 }
680 }
681
682 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
683 assert(is_valid(cache_entry));
684 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
685 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
686 cache_entry.DataBlk);
687 }
688
689 action(hx_external_load_hit, "hx", desc="load required external msgs") {
690 assert(is_valid(cache_entry));
691 assert(is_valid(tbe));
692 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
693 peek(responseToCache_in, ResponseMsg) {
694
695 sequencer.readCallback(address,
696 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
697 cache_entry.DataBlk,
698 tbe.InitialRequestTime,
699 tbe.ForwardRequestTime,
700 tbe.FirstResponseTime);
701 }
702 }
703
704 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
705 assert(is_valid(cache_entry));
706 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
707 peek(mandatoryQueue_in, CacheMsg) {
708 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
709 cache_entry.DataBlk);
710
711 cache_entry.Dirty := true;
712 if (in_msg.Type == CacheRequestType:ATOMIC) {
713 cache_entry.AtomicAccessed := true;
714 }
715 }
716 }
717
718 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
719 assert(is_valid(cache_entry));
720 assert(is_valid(tbe));
721 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
722 peek(responseToCache_in, ResponseMsg) {
723
724 sequencer.writeCallback(address,
725 getNondirectHitMachType(address, in_msg.Sender),
726 cache_entry.DataBlk,
727 tbe.InitialRequestTime,
728 tbe.ForwardRequestTime,
729 tbe.FirstResponseTime);
730 }
731 cache_entry.Dirty := true;
732 }
733
734 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
735 assert(is_valid(cache_entry));
736 assert(is_valid(tbe));
737 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
738
739 sequencer.writeCallback(address,
740 getNondirectHitMachType(address, tbe.LastResponder),
741 cache_entry.DataBlk,
742 tbe.InitialRequestTime,
743 tbe.ForwardRequestTime,
744 tbe.FirstResponseTime);
745
746 cache_entry.Dirty := true;
747 }
748
749 action(i_allocateTBE, "i", desc="Allocate TBE") {
750 check_allocate(TBEs);
751 assert(is_valid(cache_entry));
752 TBEs.allocate(address);
753 set_tbe(TBEs[address]);
754 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
755 tbe.Dirty := cache_entry.Dirty;
756 tbe.Sharers := false;
757 }
758
759 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
760 triggerQueue_in.dequeue();
761 }
762
763 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
764 mandatoryQueue_in.dequeue();
765 }
766
767 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
768 forwardToCache_in.dequeue();
769 }
770
771 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
772 assert(is_valid(cache_entry));
773 assert(is_valid(tbe));
774 cache_entry.Dirty := tbe.Dirty;
775 cache_entry.DataBlk := tbe.DataBlk;
776 }
777
778 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
779 assert(is_valid(cache_entry));
780 assert(is_valid(tbe));
781 cache_entry.Dirty := tbe.Dirty;
782 cache_entry.DataBlk := tbe.DataBlk;
783 cache_entry.FromL2 := true;
784 }
785
786 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
787 peek(responseToCache_in, ResponseMsg) {
788 assert(in_msg.Acks > 0);
789 assert(is_valid(tbe));
790 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
791 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
792 if (tbe.AppliedSilentAcks == false) {
793 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
794 tbe.AppliedSilentAcks := true;
795 }
796 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
797 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
798 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
799 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
800 APPEND_TRANSITION_COMMENT(in_msg.Sender);
801 tbe.LastResponder := in_msg.Sender;
802 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
803 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
804 }
805 if (in_msg.InitialRequestTime != zero_time()) {
806 tbe.InitialRequestTime := in_msg.InitialRequestTime;
807 }
808 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
809 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
810 }
811 if (in_msg.ForwardRequestTime != zero_time()) {
812 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
813 }
814 if (tbe.FirstResponseTime == zero_time()) {
815 tbe.FirstResponseTime := get_time();
816 }
817 }
818 }
819 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
820 peek(responseToCache_in, ResponseMsg) {
821 assert(is_valid(tbe));
822 tbe.CurOwner := in_msg.Sender;
823 }
824 }
825
826 action(n_popResponseQueue, "n", desc="Pop response queue") {
827 responseToCache_in.dequeue();
828 }
829
830 action(ll_L2toL1Transfer, "ll", desc="") {
831 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
832 out_msg.Address := address;
833 out_msg.Type := TriggerType:L2_to_L1;
834 }
835 }
836
837 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
838 assert(is_valid(tbe));
839 if (tbe.NumPendingMsgs == 0) {
840 enqueue(triggerQueue_out, TriggerMsg) {
841 out_msg.Address := address;
842 if (tbe.Sharers) {
843 out_msg.Type := TriggerType:ALL_ACKS;
844 } else {
845 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
846 }
847 }
848 }
849 }
850
851 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
852 assert(is_valid(tbe));
853 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
854 }
855
856 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
857 assert(is_valid(tbe));
858 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
859 }
860
861 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
862 peek(forwardToCache_in, RequestMsg) {
863 assert(in_msg.Requestor != machineID);
864 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
865 assert(is_valid(tbe));
866 out_msg.Address := address;
867 out_msg.Type := CoherenceResponseType:DATA;
868 out_msg.Sender := machineID;
869 out_msg.Destination.add(in_msg.Requestor);
870 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
871 out_msg.DataBlk := tbe.DataBlk;
872 out_msg.Dirty := tbe.Dirty;
873 if (in_msg.DirectedProbe) {
874 out_msg.Acks := machineCount(MachineType:L1Cache);
875 } else {
876 out_msg.Acks := 2;
877 }
878 out_msg.SilentAcks := in_msg.SilentAcks;
879 out_msg.MessageSize := MessageSizeType:Response_Data;
880 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
881 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
882 }
883 }
884 }
885
886 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
887 peek(forwardToCache_in, RequestMsg) {
888 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
889 assert(is_valid(tbe));
890 out_msg.Address := address;
891 out_msg.Type := CoherenceResponseType:DATA;
892 out_msg.Sender := machineID;
893 out_msg.Destination := in_msg.MergedRequestors;
894 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
895 out_msg.DataBlk := tbe.DataBlk;
896 out_msg.Dirty := tbe.Dirty;
897 out_msg.Acks := machineCount(MachineType:L1Cache);
898 out_msg.SilentAcks := in_msg.SilentAcks;
899 out_msg.MessageSize := MessageSizeType:Response_Data;
900 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
901 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
902 }
903 }
904 }
905
906 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
907 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
908 assert(is_valid(tbe));
909 out_msg.Address := address;
910 out_msg.Sender := machineID;
911 out_msg.Destination.add(map_Address_to_Directory(address));
912 out_msg.Dirty := tbe.Dirty;
913 if (tbe.Dirty) {
914 out_msg.Type := CoherenceResponseType:WB_DIRTY;
915 out_msg.DataBlk := tbe.DataBlk;
916 out_msg.MessageSize := MessageSizeType:Writeback_Data;
917 } else {
918 out_msg.Type := CoherenceResponseType:WB_CLEAN;
919 // NOTE: in a real system this would not send data. We send
920 // data here only so we can check it at the memory
921 out_msg.DataBlk := tbe.DataBlk;
922 out_msg.MessageSize := MessageSizeType:Writeback_Control;
923 }
924 }
925 }
926
927 action(r_setSharerBit, "r", desc="We saw other sharers") {
928 assert(is_valid(tbe));
929 tbe.Sharers := true;
930 }
931
932 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
933 TBEs.deallocate(address);
934 unset_tbe();
935 }
936
937 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
938 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
939 assert(is_valid(tbe));
940 out_msg.Address := address;
941 out_msg.Sender := machineID;
942 out_msg.Destination.add(map_Address_to_Directory(address));
943 out_msg.DataBlk := tbe.DataBlk;
944 out_msg.Dirty := tbe.Dirty;
945 if (tbe.Dirty) {
946 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
947 out_msg.DataBlk := tbe.DataBlk;
948 out_msg.MessageSize := MessageSizeType:Writeback_Data;
949 } else {
950 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
951 // NOTE: in a real system this would not send data. We send
952 // data here only so we can check it at the memory
953 out_msg.DataBlk := tbe.DataBlk;
954 out_msg.MessageSize := MessageSizeType:Writeback_Control;
955 }
956 }
957 }
958
959 action(u_writeDataToCache, "u", desc="Write data to cache") {
960 peek(responseToCache_in, ResponseMsg) {
961 assert(is_valid(cache_entry));
962 cache_entry.DataBlk := in_msg.DataBlk;
963 cache_entry.Dirty := in_msg.Dirty;
964 }
965 }
966
967 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
968 peek(responseToCache_in, ResponseMsg) {
969 assert(is_valid(cache_entry));
970 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
971 cache_entry.DataBlk, in_msg.DataBlk);
972 assert(cache_entry.DataBlk == in_msg.DataBlk);
973 cache_entry.DataBlk := in_msg.DataBlk;
974 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
975 }
976 }
977
978 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
979 if (L1DcacheMemory.isTagPresent(address)) {
980 L1DcacheMemory.deallocate(address);
981 } else {
982 L1IcacheMemory.deallocate(address);
983 }
984 unset_cache_entry();
985 }
986
987 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
988 if (is_invalid(cache_entry)) {
989 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
990 }
991 }
992
993 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
994 if (is_invalid(cache_entry)) {
995 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
996 }
997 }
998
999 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1000 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
1001 }
1002
1003 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1004 L2cacheMemory.deallocate(address);
1005 unset_cache_entry();
1006 }
1007
1008 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
1009 peek(mandatoryQueue_in, CacheMsg) {
1010 if (L1IcacheMemory.isTagPresent(address)) {
1011 L1IcacheMemory.profileMiss(in_msg);
1012 } else if (L1DcacheMemory.isTagPresent(address)) {
1013 L1DcacheMemory.profileMiss(in_msg);
1014 }
1015 if (L2cacheMemory.isTagPresent(address) == false) {
1016 L2cacheMemory.profileMiss(in_msg);
1017 }
1018 }
1019 }
1020
1021 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1022 stall_and_wait(mandatoryQueue_in, address);
1023 }
1024
1025 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1026 wake_up_dependents(address);
1027 }
1028
1029 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1030 wake_up_all_dependents();
1031 }
1032
1033 //*****************************************************
1034 // TRANSITIONS
1035 //*****************************************************
1036
1037 // Transitions for Load/Store/L2_Replacement from transient states
1038 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1039 zz_stallAndWaitMandatoryQueue;
1040 }
1041
1042 transition({M_W, MM_W}, {L2_Replacement}) {
1043 zz_stallAndWaitMandatoryQueue;
1044 }
1045
1046 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
1047 zz_stallAndWaitMandatoryQueue;
1048 }
1049
1050 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
1051 zz_stallAndWaitMandatoryQueue;
1052 }
1053
1054 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1055 // stall
1056 }
1057
1058 // Transitions moving data between the L1 and L2 caches
1059 transition({I, S, O, M, MM}, L1_to_L2) {
1060 i_allocateTBE;
1061 gg_deallocateL1CacheBlock;
1062 vv_allocateL2CacheBlock;
1063 hp_copyFromTBEToL2;
1064 s_deallocateTBE;
1065 ka_wakeUpAllDependents;
1066 }
1067
1068 transition(I, Trigger_L2_to_L1D, IT) {
1069 i_allocateTBE;
1070 rr_deallocateL2CacheBlock;
1071 ii_allocateL1DCacheBlock;
1072 nb_copyFromTBEToL1; // Not really needed for state I
1073 s_deallocateTBE;
1074 uu_profileMiss;
1075 zz_stallAndWaitMandatoryQueue;
1076 ll_L2toL1Transfer;
1077 }
1078
1079 transition(S, Trigger_L2_to_L1D, ST) {
1080 i_allocateTBE;
1081 rr_deallocateL2CacheBlock;
1082 ii_allocateL1DCacheBlock;
1083 nb_copyFromTBEToL1;
1084 s_deallocateTBE;
1085 uu_profileMiss;
1086 zz_stallAndWaitMandatoryQueue;
1087 ll_L2toL1Transfer;
1088 }
1089
1090 transition(O, Trigger_L2_to_L1D, OT) {
1091 i_allocateTBE;
1092 rr_deallocateL2CacheBlock;
1093 ii_allocateL1DCacheBlock;
1094 nb_copyFromTBEToL1;
1095 s_deallocateTBE;
1096 uu_profileMiss;
1097 zz_stallAndWaitMandatoryQueue;
1098 ll_L2toL1Transfer;
1099 }
1100
1101 transition(M, Trigger_L2_to_L1D, MT) {
1102 i_allocateTBE;
1103 rr_deallocateL2CacheBlock;
1104 ii_allocateL1DCacheBlock;
1105 nb_copyFromTBEToL1;
1106 s_deallocateTBE;
1107 uu_profileMiss;
1108 zz_stallAndWaitMandatoryQueue;
1109 ll_L2toL1Transfer;
1110 }
1111
1112 transition(MM, Trigger_L2_to_L1D, MMT) {
1113 i_allocateTBE;
1114 rr_deallocateL2CacheBlock;
1115 ii_allocateL1DCacheBlock;
1116 nb_copyFromTBEToL1;
1117 s_deallocateTBE;
1118 uu_profileMiss;
1119 zz_stallAndWaitMandatoryQueue;
1120 ll_L2toL1Transfer;
1121 }
1122
1123 transition(I, Trigger_L2_to_L1I, IT) {
1124 i_allocateTBE;
1125 rr_deallocateL2CacheBlock;
1126 jj_allocateL1ICacheBlock;
1127 nb_copyFromTBEToL1;
1128 s_deallocateTBE;
1129 uu_profileMiss;
1130 zz_stallAndWaitMandatoryQueue;
1131 ll_L2toL1Transfer;
1132 }
1133
1134 transition(S, Trigger_L2_to_L1I, ST) {
1135 i_allocateTBE;
1136 rr_deallocateL2CacheBlock;
1137 jj_allocateL1ICacheBlock;
1138 nb_copyFromTBEToL1;
1139 s_deallocateTBE;
1140 uu_profileMiss;
1141 zz_stallAndWaitMandatoryQueue;
1142 ll_L2toL1Transfer;
1143 }
1144
1145 transition(O, Trigger_L2_to_L1I, OT) {
1146 i_allocateTBE;
1147 rr_deallocateL2CacheBlock;
1148 jj_allocateL1ICacheBlock;
1149 nb_copyFromTBEToL1;
1150 s_deallocateTBE;
1151 uu_profileMiss;
1152 zz_stallAndWaitMandatoryQueue;
1153 ll_L2toL1Transfer;
1154 }
1155
1156 transition(M, Trigger_L2_to_L1I, MT) {
1157 i_allocateTBE;
1158 rr_deallocateL2CacheBlock;
1159 jj_allocateL1ICacheBlock;
1160 nb_copyFromTBEToL1;
1161 s_deallocateTBE;
1162 uu_profileMiss;
1163 zz_stallAndWaitMandatoryQueue;
1164 ll_L2toL1Transfer;
1165 }
1166
1167 transition(MM, Trigger_L2_to_L1I, MMT) {
1168 i_allocateTBE;
1169 rr_deallocateL2CacheBlock;
1170 jj_allocateL1ICacheBlock;
1171 nb_copyFromTBEToL1;
1172 s_deallocateTBE;
1173 uu_profileMiss;
1174 zz_stallAndWaitMandatoryQueue;
1175 ll_L2toL1Transfer;
1176 }
1177
1178 transition(IT, Complete_L2_to_L1, I) {
1179 j_popTriggerQueue;
1180 kd_wakeUpDependents;
1181 }
1182
1183 transition(ST, Complete_L2_to_L1, S) {
1184 j_popTriggerQueue;
1185 kd_wakeUpDependents;
1186 }
1187
1188 transition(OT, Complete_L2_to_L1, O) {
1189 j_popTriggerQueue;
1190 kd_wakeUpDependents;
1191 }
1192
1193 transition(MT, Complete_L2_to_L1, M) {
1194 j_popTriggerQueue;
1195 kd_wakeUpDependents;
1196 }
1197
1198 transition(MMT, Complete_L2_to_L1, MM) {
1199 j_popTriggerQueue;
1200 kd_wakeUpDependents;
1201 }
1202
1203 // Transitions from Idle
1204 transition(I, Load, IS) {
1205 ii_allocateL1DCacheBlock;
1206 i_allocateTBE;
1207 a_issueGETS;
1208 uu_profileMiss;
1209 k_popMandatoryQueue;
1210 }
1211
1212 transition(I, Ifetch, IS) {
1213 jj_allocateL1ICacheBlock;
1214 i_allocateTBE;
1215 a_issueGETS;
1216 uu_profileMiss;
1217 k_popMandatoryQueue;
1218 }
1219
1220 transition(I, Store, IM) {
1221 ii_allocateL1DCacheBlock;
1222 i_allocateTBE;
1223 b_issueGETX;
1224 uu_profileMiss;
1225 k_popMandatoryQueue;
1226 }
1227
1228 transition(I, L2_Replacement) {
1229 rr_deallocateL2CacheBlock;
1230 ka_wakeUpAllDependents;
1231 }
1232
1233 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1234 f_sendAck;
1235 l_popForwardQueue;
1236 }
1237
1238 // Transitions from Shared
1239 transition({S, SM, ISM}, {Load, Ifetch}) {
1240 h_load_hit;
1241 k_popMandatoryQueue;
1242 }
1243
1244 transition(S, Store, SM) {
1245 i_allocateTBE;
1246 b_issueGETX;
1247 uu_profileMiss;
1248 k_popMandatoryQueue;
1249 }
1250
1251 transition(S, L2_Replacement, I) {
1252 rr_deallocateL2CacheBlock;
1253 ka_wakeUpAllDependents;
1254 }
1255
1256 transition(S, {Other_GETX, Invalidate}, I) {
1257 f_sendAck;
1258 l_popForwardQueue;
1259 }
1260
1261 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1262 ff_sendAckShared;
1263 l_popForwardQueue;
1264 }
1265
1266 // Transitions from Owned
1267 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1268 h_load_hit;
1269 k_popMandatoryQueue;
1270 }
1271
1272 transition(O, Store, OM) {
1273 i_allocateTBE;
1274 b_issueGETX;
1275 p_decrementNumberOfMessagesByOne;
1276 uu_profileMiss;
1277 k_popMandatoryQueue;
1278 }
1279
1280 transition(O, L2_Replacement, OI) {
1281 i_allocateTBE;
1282 d_issuePUT;
1283 rr_deallocateL2CacheBlock;
1284 ka_wakeUpAllDependents;
1285 }
1286
1287 transition(O, {Other_GETX, Invalidate}, I) {
1288 e_sendData;
1289 l_popForwardQueue;
1290 }
1291
1292 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1293 ee_sendDataShared;
1294 l_popForwardQueue;
1295 }
1296
1297 transition(O, Merged_GETS) {
1298 em_sendDataSharedMultiple;
1299 l_popForwardQueue;
1300 }
1301
1302 // Transitions from Modified
1303 transition(MM, {Load, Ifetch}) {
1304 h_load_hit;
1305 k_popMandatoryQueue;
1306 }
1307
1308 transition(MM, Store) {
1309 hh_store_hit;
1310 k_popMandatoryQueue;
1311 }
1312
1313 transition(MM, L2_Replacement, MI) {
1314 i_allocateTBE;
1315 d_issuePUT;
1316 rr_deallocateL2CacheBlock;
1317 ka_wakeUpAllDependents;
1318 }
1319
1320 transition(MM, {Other_GETX, Invalidate}, I) {
1321 c_sendExclusiveData;
1322 l_popForwardQueue;
1323 }
1324
1325 transition(MM, Other_GETS, I) {
1326 c_sendExclusiveData;
1327 l_popForwardQueue;
1328 }
1329
1330 transition(MM, NC_DMA_GETS) {
1331 c_sendExclusiveData;
1332 l_popForwardQueue;
1333 }
1334
1335 transition(MM, Other_GETS_No_Mig, O) {
1336 ee_sendDataShared;
1337 l_popForwardQueue;
1338 }
1339
1340 transition(MM, Merged_GETS, O) {
1341 em_sendDataSharedMultiple;
1342 l_popForwardQueue;
1343 }
1344
1345 // Transitions from Dirty Exclusive
1346 transition(M, {Load, Ifetch}) {
1347 h_load_hit;
1348 k_popMandatoryQueue;
1349 }
1350
1351 transition(M, Store, MM) {
1352 hh_store_hit;
1353 k_popMandatoryQueue;
1354 }
1355
1356 transition(M, L2_Replacement, MI) {
1357 i_allocateTBE;
1358 d_issuePUT;
1359 rr_deallocateL2CacheBlock;
1360 ka_wakeUpAllDependents;
1361 }
1362
1363 transition(M, {Other_GETX, Invalidate}, I) {
1364 c_sendExclusiveData;
1365 l_popForwardQueue;
1366 }
1367
1368 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1369 ee_sendDataShared;
1370 l_popForwardQueue;
1371 }
1372
1373 transition(M, NC_DMA_GETS) {
1374 ee_sendDataShared;
1375 l_popForwardQueue;
1376 }
1377
1378 transition(M, Merged_GETS, O) {
1379 em_sendDataSharedMultiple;
1380 l_popForwardQueue;
1381 }
1382
1383 // Transitions from IM
1384
1385 transition(IM, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1386 f_sendAck;
1387 l_popForwardQueue;
1388 }
1389
1390 transition(IM, Ack) {
1391 m_decrementNumberOfMessages;
1392 o_checkForCompletion;
1393 n_popResponseQueue;
1394 }
1395
1396 transition(IM, Data, ISM) {
1397 u_writeDataToCache;
1398 m_decrementNumberOfMessages;
1399 o_checkForCompletion;
1400 n_popResponseQueue;
1401 }
1402
1403 transition(IM, Exclusive_Data, MM_W) {
1404 u_writeDataToCache;
1405 m_decrementNumberOfMessages;
1406 o_checkForCompletion;
1407 sx_external_store_hit;
1408 n_popResponseQueue;
1409 kd_wakeUpDependents;
1410 }
1411
1412 // Transitions from SM
1413 transition(SM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1414 ff_sendAckShared;
1415 l_popForwardQueue;
1416 }
1417
1418 transition(SM, {Other_GETX, Invalidate}, IM) {
1419 f_sendAck;
1420 l_popForwardQueue;
1421 }
1422
1423 transition(SM, Ack) {
1424 m_decrementNumberOfMessages;
1425 o_checkForCompletion;
1426 n_popResponseQueue;
1427 }
1428
1429 transition(SM, {Data, Exclusive_Data}, ISM) {
1430 v_writeDataToCacheVerify;
1431 m_decrementNumberOfMessages;
1432 o_checkForCompletion;
1433 n_popResponseQueue;
1434 }
1435
1436 // Transitions from ISM
1437 transition(ISM, Ack) {
1438 m_decrementNumberOfMessages;
1439 o_checkForCompletion;
1440 n_popResponseQueue;
1441 }
1442
1443 transition(ISM, All_acks_no_sharers, MM) {
1444 sxt_trig_ext_store_hit;
1445 gm_sendUnblockM;
1446 s_deallocateTBE;
1447 j_popTriggerQueue;
1448 kd_wakeUpDependents;
1449 }
1450
1451 // Transitions from OM
1452
1453 transition(OM, {Other_GETX, Invalidate}, IM) {
1454 e_sendData;
1455 pp_incrementNumberOfMessagesByOne;
1456 l_popForwardQueue;
1457 }
1458
1459 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1460 ee_sendDataShared;
1461 l_popForwardQueue;
1462 }
1463
1464 transition(OM, Merged_GETS) {
1465 em_sendDataSharedMultiple;
1466 l_popForwardQueue;
1467 }
1468
1469 transition(OM, Ack) {
1470 m_decrementNumberOfMessages;
1471 o_checkForCompletion;
1472 n_popResponseQueue;
1473 }
1474
1475 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1476 sxt_trig_ext_store_hit;
1477 gm_sendUnblockM;
1478 s_deallocateTBE;
1479 j_popTriggerQueue;
1480 kd_wakeUpDependents;
1481 }
1482
1483 // Transitions from IS
1484
1485 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1486 f_sendAck;
1487 l_popForwardQueue;
1488 }
1489
1490 transition(IS, Ack) {
1491 m_decrementNumberOfMessages;
1492 o_checkForCompletion;
1493 n_popResponseQueue;
1494 }
1495
1496 transition(IS, Shared_Ack) {
1497 m_decrementNumberOfMessages;
1498 r_setSharerBit;
1499 o_checkForCompletion;
1500 n_popResponseQueue;
1501 }
1502
1503 transition(IS, Data, SS) {
1504 u_writeDataToCache;
1505 m_decrementNumberOfMessages;
1506 o_checkForCompletion;
1507 hx_external_load_hit;
1508 uo_updateCurrentOwner;
1509 n_popResponseQueue;
1510 kd_wakeUpDependents;
1511 }
1512
1513 transition(IS, Exclusive_Data, M_W) {
1514 u_writeDataToCache;
1515 m_decrementNumberOfMessages;
1516 o_checkForCompletion;
1517 hx_external_load_hit;
1518 n_popResponseQueue;
1519 kd_wakeUpDependents;
1520 }
1521
1522 transition(IS, Shared_Data, SS) {
1523 u_writeDataToCache;
1524 r_setSharerBit;
1525 m_decrementNumberOfMessages;
1526 o_checkForCompletion;
1527 hx_external_load_hit;
1528 uo_updateCurrentOwner;
1529 n_popResponseQueue;
1530 kd_wakeUpDependents;
1531 }
1532
1533 // Transitions from SS
1534
1535 transition(SS, Ack) {
1536 m_decrementNumberOfMessages;
1537 o_checkForCompletion;
1538 n_popResponseQueue;
1539 }
1540
1541 transition(SS, Shared_Ack) {
1542 m_decrementNumberOfMessages;
1543 r_setSharerBit;
1544 o_checkForCompletion;
1545 n_popResponseQueue;
1546 }
1547
1548 transition(SS, All_acks, S) {
1549 gs_sendUnblockS;
1550 s_deallocateTBE;
1551 j_popTriggerQueue;
1552 kd_wakeUpDependents;
1553 }
1554
1555 transition(SS, All_acks_no_sharers, S) {
1556 // Note: The directory might still be the owner, so that is why we go to S
1557 gs_sendUnblockS;
1558 s_deallocateTBE;
1559 j_popTriggerQueue;
1560 kd_wakeUpDependents;
1561 }
1562
1563 // Transitions from MM_W
1564
1565 transition(MM_W, Store) {
1566 hh_store_hit;
1567 k_popMandatoryQueue;
1568 }
1569
1570 transition(MM_W, Ack) {
1571 m_decrementNumberOfMessages;
1572 o_checkForCompletion;
1573 n_popResponseQueue;
1574 }
1575
1576 transition(MM_W, All_acks_no_sharers, MM) {
1577 gm_sendUnblockM;
1578 s_deallocateTBE;
1579 j_popTriggerQueue;
1580 kd_wakeUpDependents;
1581 }
1582
1583 // Transitions from M_W
1584
1585 transition(M_W, Store, MM_W) {
1586 hh_store_hit;
1587 k_popMandatoryQueue;
1588 }
1589
1590 transition(M_W, Ack) {
1591 m_decrementNumberOfMessages;
1592 o_checkForCompletion;
1593 n_popResponseQueue;
1594 }
1595
1596 transition(M_W, All_acks_no_sharers, M) {
1597 gm_sendUnblockM;
1598 s_deallocateTBE;
1599 j_popTriggerQueue;
1600 kd_wakeUpDependents;
1601 }
1602
1603 // Transitions from OI/MI
1604
1605 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1606 q_sendDataFromTBEToCache;
1607 l_popForwardQueue;
1608 }
1609
1610 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1611 q_sendDataFromTBEToCache;
1612 l_popForwardQueue;
1613 }
1614
1615 transition({OI, MI}, Merged_GETS, OI) {
1616 qm_sendDataFromTBEToCache;
1617 l_popForwardQueue;
1618 }
1619
1620 transition(MI, Writeback_Ack, I) {
1621 t_sendExclusiveDataFromTBEToMemory;
1622 s_deallocateTBE;
1623 l_popForwardQueue;
1624 kd_wakeUpDependents;
1625 }
1626
1627 transition(OI, Writeback_Ack, I) {
1628 qq_sendDataFromTBEToMemory;
1629 s_deallocateTBE;
1630 l_popForwardQueue;
1631 kd_wakeUpDependents;
1632 }
1633
1634 // Transitions from II
1635 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1636 f_sendAck;
1637 l_popForwardQueue;
1638 }
1639
1640 transition(II, Writeback_Ack, I) {
1641 g_sendUnblock;
1642 s_deallocateTBE;
1643 l_popForwardQueue;
1644 kd_wakeUpDependents;
1645 }
1646
1647 transition(II, Writeback_Nack, I) {
1648 s_deallocateTBE;
1649 l_popForwardQueue;
1650 kd_wakeUpDependents;
1651 }
1652 }