94fd25f56817fb20a97611b27aa98d8e9387536f
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
45 {
46
47 // NETWORK BUFFERS
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
51
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
54
55
56 // STATES
57 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 I, AccessPermission:Invalid, desc="Idle";
60 S, AccessPermission:Read_Only, desc="Shared";
61 O, AccessPermission:Read_Only, desc="Owned";
62 M, AccessPermission:Read_Only, desc="Modified (dirty)";
63 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
64
65 // Transient States
66 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
67 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
68 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
69 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
70 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
73 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
75 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
76 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
78 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
79 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
80 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
81 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L1";
82 }
83
84 // EVENTS
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
94
95 // Requests
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 NC_DMA_GETS, desc="special GetS when only DMA exists";
101 Invalidate, desc="Invalidate block";
102
103 // Responses
104 Ack, desc="Received an ack message";
105 Shared_Ack, desc="Received an ack message, responder has a shared copy";
106 Data, desc="Received a data message";
107 Shared_Data, desc="Received a data message, responder has a shared copy";
108 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
109
110 Writeback_Ack, desc="Writeback O.K. from directory";
111 Writeback_Nack, desc="Writeback not O.K. from directory";
112
113 // Triggers
114 All_acks, desc="Received all required data and message acks";
115 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
116 }
117
118 // TYPES
119
120 // STRUCTURE DEFINITIONS
121
122 MessageBuffer mandatoryQueue, ordered="false";
123
124 // CacheEntry
125 structure(Entry, desc="...", interface="AbstractCacheEntry") {
126 State CacheState, desc="cache state";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 DataBlock DataBlk, desc="data for the block";
129 bool FromL2, default="false", desc="block just moved from L2";
130 bool AtomicAccessed, default="false", desc="block just moved from L2";
131 }
132
133 // TBE fields
134 structure(TBE, desc="...") {
135 State TBEState, desc="Transient state";
136 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
137 bool Dirty, desc="Is the data dirty (different than memory)?";
138 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
139 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
140 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
141 MachineID LastResponder, desc="last machine to send a response for this request";
142 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
143 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
144 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
145 Time FirstResponseTime, default="0", desc="the time the first response was received";
146 }
147
148 external_type(TBETable) {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 TBETable TBEs, template_hack="<L1Cache_TBE>";
156
157 void set_cache_entry(AbstractCacheEntry b);
158 void unset_cache_entry();
159 void set_tbe(TBE b);
160 void unset_tbe();
161 void wakeUpAllBuffers();
162
163 Entry getCacheEntry(Address address), return_by_pointer="yes" {
164 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
165 if(is_valid(L2cache_entry)) {
166 return L2cache_entry;
167 }
168
169 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
170 if(is_valid(L1Dcache_entry)) {
171 return L1Dcache_entry;
172 }
173
174 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
175 return L1Icache_entry;
176 }
177
178 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
179 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
180 return L2cache_entry;
181 }
182
183 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
184 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
185 return L1Dcache_entry;
186 }
187
188 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
189 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
190 return L1Icache_entry;
191 }
192
193 State getState(TBE tbe, Entry cache_entry, Address addr) {
194 if(is_valid(tbe)) {
195 return tbe.TBEState;
196 } else if (is_valid(cache_entry)) {
197 return cache_entry.CacheState;
198 }
199 return State:I;
200 }
201
202 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
203 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
204 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
205 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
206
207 if (is_valid(tbe)) {
208 tbe.TBEState := state;
209 }
210
211 if (is_valid(cache_entry)) {
212 cache_entry.CacheState := state;
213 }
214 }
215
216 Event mandatory_request_type_to_event(CacheRequestType type) {
217 if (type == CacheRequestType:LD) {
218 return Event:Load;
219 } else if (type == CacheRequestType:IFETCH) {
220 return Event:Ifetch;
221 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
222 return Event:Store;
223 } else {
224 error("Invalid CacheRequestType");
225 }
226 }
227
228 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
229 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
230 //
231 // NOTE direct local hits should not call this
232 //
233 return GenericMachineType:L1Cache_wCC;
234 } else {
235 return ConvertMachToGenericMach(machineIDToMachineType(sender));
236 }
237 }
238
239 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
240 if (is_valid(cache_entry) && cache_entry.FromL2) {
241 cache_entry.FromL2 := false;
242 return GenericMachineType:L2Cache;
243 } else {
244 return GenericMachineType:L1Cache;
245 }
246 }
247
248 bool IsAtomicAccessed(Entry cache_entry) {
249 assert(is_valid(cache_entry));
250 return cache_entry.AtomicAccessed;
251 }
252
253 MessageBuffer triggerQueue, ordered="false";
254
255 // ** OUT_PORTS **
256
257 out_port(requestNetwork_out, RequestMsg, requestFromCache);
258 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
259 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
260 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
261
262 // ** IN_PORTS **
263
264 // Trigger Queue
265 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
266 if (triggerQueue_in.isReady()) {
267 peek(triggerQueue_in, TriggerMsg) {
268
269 Entry cache_entry := getCacheEntry(in_msg.Address);
270 TBE tbe := TBEs[in_msg.Address];
271
272 if (in_msg.Type == TriggerType:L2_to_L1) {
273 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
274 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
275 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
276 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
277 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
278 } else {
279 error("Unexpected message");
280 }
281 }
282 }
283 }
284
285 // Nothing from the unblock network
286
287 // Response Network
288 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
289 if (responseToCache_in.isReady()) {
290 peek(responseToCache_in, ResponseMsg, block_on="Address") {
291
292 Entry cache_entry := getCacheEntry(in_msg.Address);
293 TBE tbe := TBEs[in_msg.Address];
294
295 if (in_msg.Type == CoherenceResponseType:ACK) {
296 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
297 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
298 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
299 } else if (in_msg.Type == CoherenceResponseType:DATA) {
300 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
301 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
302 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
303 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
304 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
305 } else {
306 error("Unexpected message");
307 }
308 }
309 }
310 }
311
312 // Forward Network
313 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
314 if (forwardToCache_in.isReady()) {
315 peek(forwardToCache_in, RequestMsg, block_on="Address") {
316
317 Entry cache_entry := getCacheEntry(in_msg.Address);
318 TBE tbe := TBEs[in_msg.Address];
319
320 if (in_msg.Type == CoherenceRequestType:GETX) {
321 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
322 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
323 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
324 } else if (in_msg.Type == CoherenceRequestType:GETS) {
325 if (machineCount(MachineType:L1Cache) > 1) {
326 if (is_valid(cache_entry)) {
327 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
328 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
329 } else {
330 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
331 }
332 } else {
333 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
334 }
335 } else {
336 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
337 }
338 } else if (in_msg.Type == CoherenceRequestType:INV) {
339 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
340 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
341 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
342 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
343 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
344 } else {
345 error("Unexpected message");
346 }
347 }
348 }
349 }
350
351 // Nothing from the request network
352
353 // Mandatory Queue
354 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
355 if (mandatoryQueue_in.isReady()) {
356 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
357
358 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
359 TBE tbe := TBEs[in_msg.LineAddress];
360
361 if (in_msg.Type == CacheRequestType:IFETCH) {
362 // ** INSTRUCTION ACCESS ***
363
364 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
365 if (is_valid(L1Icache_entry)) {
366 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
367 trigger(mandatory_request_type_to_event(in_msg.Type),
368 in_msg.LineAddress, L1Icache_entry, tbe);
369 } else {
370 // Check to see if it is in the OTHER L1
371 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
372 if (is_valid(L1Dcache_entry)) {
373 // The block is in the wrong L1, try to write it to the L2
374 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
375 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
376 } else {
377 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
378 trigger(Event:L2_Replacement,
379 l2_victim_addr,
380 getL2CacheEntry(l2_victim_addr),
381 TBEs[l2_victim_addr]);
382 }
383 }
384
385 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
386 // L1 does't have the line, but we have space for it in the L1
387
388 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
389 if (is_valid(L2cache_entry)) {
390 // L2 has it (maybe not with the right permissions)
391 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
392 L2cache_entry, tbe);
393 } else {
394 // We have room, the L2 doesn't have it, so the L1 fetches the line
395 trigger(mandatory_request_type_to_event(in_msg.Type),
396 in_msg.LineAddress, L1Icache_entry, tbe);
397 }
398 } else {
399 // No room in the L1, so we need to make room
400 Address l1i_victim_addr := L1IcacheMemory.cacheProbe(in_msg.LineAddress);
401 if (L2cacheMemory.cacheAvail(l1i_victim_addr)) {
402 // The L2 has room, so we move the line from the L1 to the L2
403 trigger(Event:L1_to_L2,
404 l1i_victim_addr,
405 getL1ICacheEntry(l1i_victim_addr),
406 TBEs[l1i_victim_addr]);
407 } else {
408 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1i_victim_addr);
409 // The L2 does not have room, so we replace a line from the L2
410 trigger(Event:L2_Replacement,
411 l2_victim_addr,
412 getL2CacheEntry(l2_victim_addr),
413 TBEs[l2_victim_addr]);
414 }
415 }
416 }
417 } else {
418 // *** DATA ACCESS ***
419
420 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
421 if (is_valid(L1Dcache_entry)) {
422 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
423 trigger(mandatory_request_type_to_event(in_msg.Type),
424 in_msg.LineAddress, L1Dcache_entry, tbe);
425 } else {
426
427 // Check to see if it is in the OTHER L1
428 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
429 if (is_valid(L1Icache_entry)) {
430 // The block is in the wrong L1, try to write it to the L2
431 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
432 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
433 } else {
434 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
435 trigger(Event:L2_Replacement,
436 l2_victim_addr,
437 getL2CacheEntry(l2_victim_addr),
438 TBEs[l2_victim_addr]);
439 }
440 }
441
442 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
443 // L1 does't have the line, but we have space for it in the L1
444 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
445 if (is_valid(L2cache_entry)) {
446 // L2 has it (maybe not with the right permissions)
447 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
448 L2cache_entry, tbe);
449 } else {
450 // We have room, the L2 doesn't have it, so the L1 fetches the line
451 trigger(mandatory_request_type_to_event(in_msg.Type),
452 in_msg.LineAddress, L1Dcache_entry, tbe);
453 }
454 } else {
455 // No room in the L1, so we need to make room
456 Address l1d_victim_addr := L1DcacheMemory.cacheProbe(in_msg.LineAddress);
457 if (L2cacheMemory.cacheAvail(l1d_victim_addr)) {
458 // The L2 has room, so we move the line from the L1 to the L2
459 trigger(Event:L1_to_L2,
460 l1d_victim_addr,
461 getL1DCacheEntry(l1d_victim_addr),
462 TBEs[l1d_victim_addr]);
463 } else {
464 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1d_victim_addr);
465 // The L2 does not have room, so we replace a line from the L2
466 trigger(Event:L2_Replacement,
467 l2_victim_addr,
468 getL2CacheEntry(l2_victim_addr),
469 TBEs[l2_victim_addr]);
470 }
471 }
472 }
473 }
474 }
475 }
476 }
477
478 // ACTIONS
479
480 action(a_issueGETS, "a", desc="Issue GETS") {
481 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
482 assert(is_valid(tbe));
483 out_msg.Address := address;
484 out_msg.Type := CoherenceRequestType:GETS;
485 out_msg.Requestor := machineID;
486 out_msg.Destination.add(map_Address_to_Directory(address));
487 out_msg.MessageSize := MessageSizeType:Request_Control;
488 out_msg.InitialRequestTime := get_time();
489 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
490 }
491 }
492
493 action(b_issueGETX, "b", desc="Issue GETX") {
494 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
495 assert(is_valid(tbe));
496 out_msg.Address := address;
497 out_msg.Type := CoherenceRequestType:GETX;
498 out_msg.Requestor := machineID;
499 out_msg.Destination.add(map_Address_to_Directory(address));
500 out_msg.MessageSize := MessageSizeType:Request_Control;
501 out_msg.InitialRequestTime := get_time();
502 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
503 }
504 }
505
506 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
507 peek(forwardToCache_in, RequestMsg) {
508 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
509 assert(is_valid(cache_entry));
510 out_msg.Address := address;
511 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
512 out_msg.Sender := machineID;
513 out_msg.Destination.add(in_msg.Requestor);
514 out_msg.DataBlk := cache_entry.DataBlk;
515 out_msg.Dirty := cache_entry.Dirty;
516 if (in_msg.DirectedProbe) {
517 out_msg.Acks := machineCount(MachineType:L1Cache);
518 } else {
519 out_msg.Acks := 2;
520 }
521 out_msg.SilentAcks := in_msg.SilentAcks;
522 out_msg.MessageSize := MessageSizeType:Response_Data;
523 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
524 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
525 }
526 }
527 }
528
529 action(d_issuePUT, "d", desc="Issue PUT") {
530 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
531 out_msg.Address := address;
532 out_msg.Type := CoherenceRequestType:PUT;
533 out_msg.Requestor := machineID;
534 out_msg.Destination.add(map_Address_to_Directory(address));
535 out_msg.MessageSize := MessageSizeType:Writeback_Control;
536 }
537 }
538
539 action(e_sendData, "e", desc="Send data from cache to requestor") {
540 peek(forwardToCache_in, RequestMsg) {
541 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
542 assert(is_valid(cache_entry));
543 out_msg.Address := address;
544 out_msg.Type := CoherenceResponseType:DATA;
545 out_msg.Sender := machineID;
546 out_msg.Destination.add(in_msg.Requestor);
547 out_msg.DataBlk := cache_entry.DataBlk;
548 out_msg.Dirty := cache_entry.Dirty;
549 if (in_msg.DirectedProbe) {
550 out_msg.Acks := machineCount(MachineType:L1Cache);
551 } else {
552 out_msg.Acks := 2;
553 }
554 out_msg.SilentAcks := in_msg.SilentAcks;
555 out_msg.MessageSize := MessageSizeType:Response_Data;
556 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
557 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
558 }
559 }
560 }
561
562 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, keep a shared copy") {
563 peek(forwardToCache_in, RequestMsg) {
564 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
565 assert(is_valid(cache_entry));
566 out_msg.Address := address;
567 out_msg.Type := CoherenceResponseType:DATA_SHARED;
568 out_msg.Sender := machineID;
569 out_msg.Destination.add(in_msg.Requestor);
570 out_msg.DataBlk := cache_entry.DataBlk;
571 out_msg.Dirty := cache_entry.Dirty;
572 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
573 if (in_msg.DirectedProbe) {
574 out_msg.Acks := machineCount(MachineType:L1Cache);
575 } else {
576 out_msg.Acks := 2;
577 }
578 out_msg.SilentAcks := in_msg.SilentAcks;
579 out_msg.MessageSize := MessageSizeType:Response_Data;
580 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
581 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
582 }
583 }
584 }
585
586 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors") {
587 peek(forwardToCache_in, RequestMsg) {
588 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
589 assert(is_valid(cache_entry));
590 out_msg.Address := address;
591 out_msg.Type := CoherenceResponseType:DATA_SHARED;
592 out_msg.Sender := machineID;
593 out_msg.Destination := in_msg.MergedRequestors;
594 out_msg.DataBlk := cache_entry.DataBlk;
595 out_msg.Dirty := cache_entry.Dirty;
596 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
597 out_msg.Acks := machineCount(MachineType:L1Cache);
598 out_msg.SilentAcks := in_msg.SilentAcks;
599 out_msg.MessageSize := MessageSizeType:Response_Data;
600 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
601 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
602 }
603 }
604 }
605
606 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
607 peek(forwardToCache_in, RequestMsg) {
608 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
609 out_msg.Address := address;
610 out_msg.Type := CoherenceResponseType:ACK;
611 out_msg.Sender := machineID;
612 out_msg.Destination.add(in_msg.Requestor);
613 out_msg.Acks := 1;
614 out_msg.SilentAcks := in_msg.SilentAcks;
615 assert(in_msg.DirectedProbe == false);
616 out_msg.MessageSize := MessageSizeType:Response_Control;
617 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
618 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
619 }
620 }
621 }
622
623 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
624 peek(forwardToCache_in, RequestMsg) {
625 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
626 out_msg.Address := address;
627 out_msg.Type := CoherenceResponseType:ACK_SHARED;
628 out_msg.Sender := machineID;
629 out_msg.Destination.add(in_msg.Requestor);
630 out_msg.Acks := 1;
631 out_msg.SilentAcks := in_msg.SilentAcks;
632 assert(in_msg.DirectedProbe == false);
633 out_msg.MessageSize := MessageSizeType:Response_Control;
634 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
635 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
636 }
637 }
638 }
639
640 action(g_sendUnblock, "g", desc="Send unblock to memory") {
641 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
642 out_msg.Address := address;
643 out_msg.Type := CoherenceResponseType:UNBLOCK;
644 out_msg.Sender := machineID;
645 out_msg.Destination.add(map_Address_to_Directory(address));
646 out_msg.MessageSize := MessageSizeType:Unblock_Control;
647 }
648 }
649
650 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
651 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
652 out_msg.Address := address;
653 out_msg.Type := CoherenceResponseType:UNBLOCKM;
654 out_msg.Sender := machineID;
655 out_msg.Destination.add(map_Address_to_Directory(address));
656 out_msg.MessageSize := MessageSizeType:Unblock_Control;
657 }
658 }
659
660 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
661 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
662 assert(is_valid(tbe));
663 out_msg.Address := address;
664 out_msg.Type := CoherenceResponseType:UNBLOCKS;
665 out_msg.Sender := machineID;
666 out_msg.CurOwner := tbe.CurOwner;
667 out_msg.Destination.add(map_Address_to_Directory(address));
668 out_msg.MessageSize := MessageSizeType:Unblock_Control;
669 }
670 }
671
672 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
673 assert(is_valid(cache_entry));
674 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
675 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
676 cache_entry.DataBlk);
677 }
678
679 action(hx_external_load_hit, "hx", desc="load required external msgs") {
680 assert(is_valid(cache_entry));
681 assert(is_valid(tbe));
682 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
683 peek(responseToCache_in, ResponseMsg) {
684
685 sequencer.readCallback(address,
686 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
687 cache_entry.DataBlk,
688 tbe.InitialRequestTime,
689 tbe.ForwardRequestTime,
690 tbe.FirstResponseTime);
691 }
692 }
693
694 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
695 assert(is_valid(cache_entry));
696 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
697 peek(mandatoryQueue_in, CacheMsg) {
698 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
699 cache_entry.DataBlk);
700
701 cache_entry.Dirty := true;
702 if (in_msg.Type == CacheRequestType:ATOMIC) {
703 cache_entry.AtomicAccessed := true;
704 }
705 }
706 }
707
708 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
709 assert(is_valid(cache_entry));
710 assert(is_valid(tbe));
711 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
712 peek(responseToCache_in, ResponseMsg) {
713
714 sequencer.writeCallback(address,
715 getNondirectHitMachType(address, in_msg.Sender),
716 cache_entry.DataBlk,
717 tbe.InitialRequestTime,
718 tbe.ForwardRequestTime,
719 tbe.FirstResponseTime);
720 }
721 cache_entry.Dirty := true;
722 }
723
724 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
725 assert(is_valid(cache_entry));
726 assert(is_valid(tbe));
727 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
728
729 sequencer.writeCallback(address,
730 getNondirectHitMachType(address, tbe.LastResponder),
731 cache_entry.DataBlk,
732 tbe.InitialRequestTime,
733 tbe.ForwardRequestTime,
734 tbe.FirstResponseTime);
735
736 cache_entry.Dirty := true;
737 }
738
739 action(i_allocateTBE, "i", desc="Allocate TBE") {
740 check_allocate(TBEs);
741 assert(is_valid(cache_entry));
742 TBEs.allocate(address);
743 set_tbe(TBEs[address]);
744 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
745 tbe.Dirty := cache_entry.Dirty;
746 tbe.Sharers := false;
747 }
748
749 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
750 triggerQueue_in.dequeue();
751 }
752
753 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
754 mandatoryQueue_in.dequeue();
755 }
756
757 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
758 forwardToCache_in.dequeue();
759 }
760
761 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
762 assert(is_valid(cache_entry));
763 assert(is_valid(tbe));
764 cache_entry.Dirty := tbe.Dirty;
765 cache_entry.DataBlk := tbe.DataBlk;
766 }
767
768 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
769 assert(is_valid(cache_entry));
770 assert(is_valid(tbe));
771 cache_entry.Dirty := tbe.Dirty;
772 cache_entry.DataBlk := tbe.DataBlk;
773 cache_entry.FromL2 := true;
774 }
775
776 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
777 peek(responseToCache_in, ResponseMsg) {
778 assert(in_msg.Acks > 0);
779 assert(is_valid(tbe));
780 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
781 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
782 if (tbe.AppliedSilentAcks == false) {
783 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
784 tbe.AppliedSilentAcks := true;
785 }
786 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
787 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
788 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
789 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
790 APPEND_TRANSITION_COMMENT(in_msg.Sender);
791 tbe.LastResponder := in_msg.Sender;
792 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
793 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
794 }
795 if (in_msg.InitialRequestTime != zero_time()) {
796 tbe.InitialRequestTime := in_msg.InitialRequestTime;
797 }
798 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
799 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
800 }
801 if (in_msg.ForwardRequestTime != zero_time()) {
802 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
803 }
804 if (tbe.FirstResponseTime == zero_time()) {
805 tbe.FirstResponseTime := get_time();
806 }
807 }
808 }
809 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
810 peek(responseToCache_in, ResponseMsg) {
811 assert(is_valid(tbe));
812 tbe.CurOwner := in_msg.Sender;
813 }
814 }
815
816 action(n_popResponseQueue, "n", desc="Pop response queue") {
817 responseToCache_in.dequeue();
818 }
819
820 action(ll_L2toL1Transfer, "ll", desc="") {
821 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
822 out_msg.Address := address;
823 out_msg.Type := TriggerType:L2_to_L1;
824 }
825 }
826
827 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
828 assert(is_valid(tbe));
829 if (tbe.NumPendingMsgs == 0) {
830 enqueue(triggerQueue_out, TriggerMsg) {
831 out_msg.Address := address;
832 if (tbe.Sharers) {
833 out_msg.Type := TriggerType:ALL_ACKS;
834 } else {
835 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
836 }
837 }
838 }
839 }
840
841 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
842 assert(is_valid(tbe));
843 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
844 }
845
846 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
847 assert(is_valid(tbe));
848 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
849 }
850
851 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
852 peek(forwardToCache_in, RequestMsg) {
853 assert(in_msg.Requestor != machineID);
854 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
855 assert(is_valid(tbe));
856 out_msg.Address := address;
857 out_msg.Type := CoherenceResponseType:DATA;
858 out_msg.Sender := machineID;
859 out_msg.Destination.add(in_msg.Requestor);
860 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
861 out_msg.DataBlk := tbe.DataBlk;
862 out_msg.Dirty := tbe.Dirty;
863 if (in_msg.DirectedProbe) {
864 out_msg.Acks := machineCount(MachineType:L1Cache);
865 } else {
866 out_msg.Acks := 2;
867 }
868 out_msg.SilentAcks := in_msg.SilentAcks;
869 out_msg.MessageSize := MessageSizeType:Response_Data;
870 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
871 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
872 }
873 }
874 }
875
876 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers") {
877 peek(forwardToCache_in, RequestMsg) {
878 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
879 assert(is_valid(tbe));
880 out_msg.Address := address;
881 out_msg.Type := CoherenceResponseType:DATA;
882 out_msg.Sender := machineID;
883 out_msg.Destination := in_msg.MergedRequestors;
884 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
885 out_msg.DataBlk := tbe.DataBlk;
886 out_msg.Dirty := tbe.Dirty;
887 out_msg.Acks := machineCount(MachineType:L1Cache);
888 out_msg.SilentAcks := in_msg.SilentAcks;
889 out_msg.MessageSize := MessageSizeType:Response_Data;
890 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
891 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
892 }
893 }
894 }
895
896 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
897 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
898 assert(is_valid(tbe));
899 out_msg.Address := address;
900 out_msg.Sender := machineID;
901 out_msg.Destination.add(map_Address_to_Directory(address));
902 out_msg.Dirty := tbe.Dirty;
903 if (tbe.Dirty) {
904 out_msg.Type := CoherenceResponseType:WB_DIRTY;
905 out_msg.DataBlk := tbe.DataBlk;
906 out_msg.MessageSize := MessageSizeType:Writeback_Data;
907 } else {
908 out_msg.Type := CoherenceResponseType:WB_CLEAN;
909 // NOTE: in a real system this would not send data. We send
910 // data here only so we can check it at the memory
911 out_msg.DataBlk := tbe.DataBlk;
912 out_msg.MessageSize := MessageSizeType:Writeback_Control;
913 }
914 }
915 }
916
917 action(r_setSharerBit, "r", desc="We saw other sharers") {
918 assert(is_valid(tbe));
919 tbe.Sharers := true;
920 }
921
922 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
923 TBEs.deallocate(address);
924 unset_tbe();
925 }
926
927 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
928 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
929 assert(is_valid(tbe));
930 out_msg.Address := address;
931 out_msg.Sender := machineID;
932 out_msg.Destination.add(map_Address_to_Directory(address));
933 out_msg.DataBlk := tbe.DataBlk;
934 out_msg.Dirty := tbe.Dirty;
935 if (tbe.Dirty) {
936 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
937 out_msg.DataBlk := tbe.DataBlk;
938 out_msg.MessageSize := MessageSizeType:Writeback_Data;
939 } else {
940 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
941 // NOTE: in a real system this would not send data. We send
942 // data here only so we can check it at the memory
943 out_msg.DataBlk := tbe.DataBlk;
944 out_msg.MessageSize := MessageSizeType:Writeback_Control;
945 }
946 }
947 }
948
949 action(u_writeDataToCache, "u", desc="Write data to cache") {
950 peek(responseToCache_in, ResponseMsg) {
951 assert(is_valid(cache_entry));
952 cache_entry.DataBlk := in_msg.DataBlk;
953 cache_entry.Dirty := in_msg.Dirty;
954 }
955 }
956
957 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
958 peek(responseToCache_in, ResponseMsg) {
959 assert(is_valid(cache_entry));
960 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
961 cache_entry.DataBlk, in_msg.DataBlk);
962 assert(cache_entry.DataBlk == in_msg.DataBlk);
963 cache_entry.DataBlk := in_msg.DataBlk;
964 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
965 }
966 }
967
968 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
969 if (L1DcacheMemory.isTagPresent(address)) {
970 L1DcacheMemory.deallocate(address);
971 } else {
972 L1IcacheMemory.deallocate(address);
973 }
974 unset_cache_entry();
975 }
976
977 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
978 if (is_invalid(cache_entry)) {
979 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
980 }
981 }
982
983 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
984 if (is_invalid(cache_entry)) {
985 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
986 }
987 }
988
989 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
990 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
991 }
992
993 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
994 L2cacheMemory.deallocate(address);
995 unset_cache_entry();
996 }
997
998 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
999 peek(mandatoryQueue_in, CacheMsg) {
1000 if (L1IcacheMemory.isTagPresent(address)) {
1001 L1IcacheMemory.profileMiss(in_msg);
1002 } else if (L1DcacheMemory.isTagPresent(address)) {
1003 L1DcacheMemory.profileMiss(in_msg);
1004 }
1005 if (L2cacheMemory.isTagPresent(address) == false) {
1006 L2cacheMemory.profileMiss(in_msg);
1007 }
1008 }
1009 }
1010
1011 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1012 stall_and_wait(mandatoryQueue_in, address);
1013 }
1014
1015 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1016 wake_up_dependents(address);
1017 }
1018
1019 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1020 wakeUpAllBuffers();
1021 }
1022
1023 //*****************************************************
1024 // TRANSITIONS
1025 //*****************************************************
1026
1027 // Transitions for Load/Store/L2_Replacement from transient states
1028 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1029 zz_stallAndWaitMandatoryQueue;
1030 }
1031
1032 transition({M_W, MM_W}, {L2_Replacement}) {
1033 zz_stallAndWaitMandatoryQueue;
1034 }
1035
1036 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
1037 zz_stallAndWaitMandatoryQueue;
1038 }
1039
1040 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
1041 zz_stallAndWaitMandatoryQueue;
1042 }
1043
1044 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1045 // stall
1046 }
1047
1048 // Transitions moving data between the L1 and L2 caches
1049 transition({I, S, O, M, MM}, L1_to_L2) {
1050 i_allocateTBE;
1051 gg_deallocateL1CacheBlock;
1052 vv_allocateL2CacheBlock;
1053 hp_copyFromTBEToL2;
1054 s_deallocateTBE;
1055 ka_wakeUpAllDependents;
1056 }
1057
1058 transition(I, Trigger_L2_to_L1D, IT) {
1059 i_allocateTBE;
1060 rr_deallocateL2CacheBlock;
1061 ii_allocateL1DCacheBlock;
1062 nb_copyFromTBEToL1; // Not really needed for state I
1063 s_deallocateTBE;
1064 uu_profileMiss;
1065 zz_stallAndWaitMandatoryQueue;
1066 ll_L2toL1Transfer;
1067 }
1068
1069 transition(S, Trigger_L2_to_L1D, ST) {
1070 i_allocateTBE;
1071 rr_deallocateL2CacheBlock;
1072 ii_allocateL1DCacheBlock;
1073 nb_copyFromTBEToL1;
1074 s_deallocateTBE;
1075 uu_profileMiss;
1076 zz_stallAndWaitMandatoryQueue;
1077 ll_L2toL1Transfer;
1078 }
1079
1080 transition(O, Trigger_L2_to_L1D, OT) {
1081 i_allocateTBE;
1082 rr_deallocateL2CacheBlock;
1083 ii_allocateL1DCacheBlock;
1084 nb_copyFromTBEToL1;
1085 s_deallocateTBE;
1086 uu_profileMiss;
1087 zz_stallAndWaitMandatoryQueue;
1088 ll_L2toL1Transfer;
1089 }
1090
1091 transition(M, Trigger_L2_to_L1D, MT) {
1092 i_allocateTBE;
1093 rr_deallocateL2CacheBlock;
1094 ii_allocateL1DCacheBlock;
1095 nb_copyFromTBEToL1;
1096 s_deallocateTBE;
1097 uu_profileMiss;
1098 zz_stallAndWaitMandatoryQueue;
1099 ll_L2toL1Transfer;
1100 }
1101
1102 transition(MM, Trigger_L2_to_L1D, MMT) {
1103 i_allocateTBE;
1104 rr_deallocateL2CacheBlock;
1105 ii_allocateL1DCacheBlock;
1106 nb_copyFromTBEToL1;
1107 s_deallocateTBE;
1108 uu_profileMiss;
1109 zz_stallAndWaitMandatoryQueue;
1110 ll_L2toL1Transfer;
1111 }
1112
1113 transition(I, Trigger_L2_to_L1I, IT) {
1114 i_allocateTBE;
1115 rr_deallocateL2CacheBlock;
1116 jj_allocateL1ICacheBlock;
1117 nb_copyFromTBEToL1;
1118 s_deallocateTBE;
1119 uu_profileMiss;
1120 zz_stallAndWaitMandatoryQueue;
1121 ll_L2toL1Transfer;
1122 }
1123
1124 transition(S, Trigger_L2_to_L1I, ST) {
1125 i_allocateTBE;
1126 rr_deallocateL2CacheBlock;
1127 jj_allocateL1ICacheBlock;
1128 nb_copyFromTBEToL1;
1129 s_deallocateTBE;
1130 uu_profileMiss;
1131 zz_stallAndWaitMandatoryQueue;
1132 ll_L2toL1Transfer;
1133 }
1134
1135 transition(O, Trigger_L2_to_L1I, OT) {
1136 i_allocateTBE;
1137 rr_deallocateL2CacheBlock;
1138 jj_allocateL1ICacheBlock;
1139 nb_copyFromTBEToL1;
1140 s_deallocateTBE;
1141 uu_profileMiss;
1142 zz_stallAndWaitMandatoryQueue;
1143 ll_L2toL1Transfer;
1144 }
1145
1146 transition(M, Trigger_L2_to_L1I, MT) {
1147 i_allocateTBE;
1148 rr_deallocateL2CacheBlock;
1149 jj_allocateL1ICacheBlock;
1150 nb_copyFromTBEToL1;
1151 s_deallocateTBE;
1152 uu_profileMiss;
1153 zz_stallAndWaitMandatoryQueue;
1154 ll_L2toL1Transfer;
1155 }
1156
1157 transition(MM, Trigger_L2_to_L1I, MMT) {
1158 i_allocateTBE;
1159 rr_deallocateL2CacheBlock;
1160 jj_allocateL1ICacheBlock;
1161 nb_copyFromTBEToL1;
1162 s_deallocateTBE;
1163 uu_profileMiss;
1164 zz_stallAndWaitMandatoryQueue;
1165 ll_L2toL1Transfer;
1166 }
1167
1168 transition(IT, Complete_L2_to_L1, I) {
1169 j_popTriggerQueue;
1170 kd_wakeUpDependents;
1171 }
1172
1173 transition(ST, Complete_L2_to_L1, S) {
1174 j_popTriggerQueue;
1175 kd_wakeUpDependents;
1176 }
1177
1178 transition(OT, Complete_L2_to_L1, O) {
1179 j_popTriggerQueue;
1180 kd_wakeUpDependents;
1181 }
1182
1183 transition(MT, Complete_L2_to_L1, M) {
1184 j_popTriggerQueue;
1185 kd_wakeUpDependents;
1186 }
1187
1188 transition(MMT, Complete_L2_to_L1, MM) {
1189 j_popTriggerQueue;
1190 kd_wakeUpDependents;
1191 }
1192
1193 // Transitions from Idle
1194 transition(I, Load, IS) {
1195 ii_allocateL1DCacheBlock;
1196 i_allocateTBE;
1197 a_issueGETS;
1198 uu_profileMiss;
1199 k_popMandatoryQueue;
1200 }
1201
1202 transition(I, Ifetch, IS) {
1203 jj_allocateL1ICacheBlock;
1204 i_allocateTBE;
1205 a_issueGETS;
1206 uu_profileMiss;
1207 k_popMandatoryQueue;
1208 }
1209
1210 transition(I, Store, IM) {
1211 ii_allocateL1DCacheBlock;
1212 i_allocateTBE;
1213 b_issueGETX;
1214 uu_profileMiss;
1215 k_popMandatoryQueue;
1216 }
1217
1218 transition(I, L2_Replacement) {
1219 rr_deallocateL2CacheBlock;
1220 ka_wakeUpAllDependents;
1221 }
1222
1223 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1224 f_sendAck;
1225 l_popForwardQueue;
1226 }
1227
1228 // Transitions from Shared
1229 transition({S, SM, ISM}, {Load, Ifetch}) {
1230 h_load_hit;
1231 k_popMandatoryQueue;
1232 }
1233
1234 transition(S, Store, SM) {
1235 i_allocateTBE;
1236 b_issueGETX;
1237 uu_profileMiss;
1238 k_popMandatoryQueue;
1239 }
1240
1241 transition(S, L2_Replacement, I) {
1242 rr_deallocateL2CacheBlock;
1243 ka_wakeUpAllDependents;
1244 }
1245
1246 transition(S, {Other_GETX, Invalidate}, I) {
1247 f_sendAck;
1248 l_popForwardQueue;
1249 }
1250
1251 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1252 ff_sendAckShared;
1253 l_popForwardQueue;
1254 }
1255
1256 // Transitions from Owned
1257 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1258 h_load_hit;
1259 k_popMandatoryQueue;
1260 }
1261
1262 transition(O, Store, OM) {
1263 i_allocateTBE;
1264 b_issueGETX;
1265 p_decrementNumberOfMessagesByOne;
1266 uu_profileMiss;
1267 k_popMandatoryQueue;
1268 }
1269
1270 transition(O, L2_Replacement, OI) {
1271 i_allocateTBE;
1272 d_issuePUT;
1273 rr_deallocateL2CacheBlock;
1274 ka_wakeUpAllDependents;
1275 }
1276
1277 transition(O, {Other_GETX, Invalidate}, I) {
1278 e_sendData;
1279 l_popForwardQueue;
1280 }
1281
1282 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1283 ee_sendDataShared;
1284 l_popForwardQueue;
1285 }
1286
1287 transition(O, Merged_GETS) {
1288 em_sendDataSharedMultiple;
1289 l_popForwardQueue;
1290 }
1291
1292 // Transitions from Modified
1293 transition(MM, {Load, Ifetch}) {
1294 h_load_hit;
1295 k_popMandatoryQueue;
1296 }
1297
1298 transition(MM, Store) {
1299 hh_store_hit;
1300 k_popMandatoryQueue;
1301 }
1302
1303 transition(MM, L2_Replacement, MI) {
1304 i_allocateTBE;
1305 d_issuePUT;
1306 rr_deallocateL2CacheBlock;
1307 ka_wakeUpAllDependents;
1308 }
1309
1310 transition(MM, {Other_GETX, Invalidate}, I) {
1311 c_sendExclusiveData;
1312 l_popForwardQueue;
1313 }
1314
1315 transition(MM, Other_GETS, I) {
1316 c_sendExclusiveData;
1317 l_popForwardQueue;
1318 }
1319
1320 transition(MM, NC_DMA_GETS) {
1321 c_sendExclusiveData;
1322 l_popForwardQueue;
1323 }
1324
1325 transition(MM, Other_GETS_No_Mig, O) {
1326 ee_sendDataShared;
1327 l_popForwardQueue;
1328 }
1329
1330 transition(MM, Merged_GETS, O) {
1331 em_sendDataSharedMultiple;
1332 l_popForwardQueue;
1333 }
1334
1335 // Transitions from Dirty Exclusive
1336 transition(M, {Load, Ifetch}) {
1337 h_load_hit;
1338 k_popMandatoryQueue;
1339 }
1340
1341 transition(M, Store, MM) {
1342 hh_store_hit;
1343 k_popMandatoryQueue;
1344 }
1345
1346 transition(M, L2_Replacement, MI) {
1347 i_allocateTBE;
1348 d_issuePUT;
1349 rr_deallocateL2CacheBlock;
1350 ka_wakeUpAllDependents;
1351 }
1352
1353 transition(M, {Other_GETX, Invalidate}, I) {
1354 c_sendExclusiveData;
1355 l_popForwardQueue;
1356 }
1357
1358 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1359 ee_sendDataShared;
1360 l_popForwardQueue;
1361 }
1362
1363 transition(M, NC_DMA_GETS) {
1364 ee_sendDataShared;
1365 l_popForwardQueue;
1366 }
1367
1368 transition(M, Merged_GETS, O) {
1369 em_sendDataSharedMultiple;
1370 l_popForwardQueue;
1371 }
1372
1373 // Transitions from IM
1374
1375 transition(IM, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1376 f_sendAck;
1377 l_popForwardQueue;
1378 }
1379
1380 transition(IM, Ack) {
1381 m_decrementNumberOfMessages;
1382 o_checkForCompletion;
1383 n_popResponseQueue;
1384 }
1385
1386 transition(IM, Data, ISM) {
1387 u_writeDataToCache;
1388 m_decrementNumberOfMessages;
1389 o_checkForCompletion;
1390 n_popResponseQueue;
1391 }
1392
1393 transition(IM, Exclusive_Data, MM_W) {
1394 u_writeDataToCache;
1395 m_decrementNumberOfMessages;
1396 o_checkForCompletion;
1397 sx_external_store_hit;
1398 n_popResponseQueue;
1399 kd_wakeUpDependents;
1400 }
1401
1402 // Transitions from SM
1403 transition(SM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1404 ff_sendAckShared;
1405 l_popForwardQueue;
1406 }
1407
1408 transition(SM, {Other_GETX, Invalidate}, IM) {
1409 f_sendAck;
1410 l_popForwardQueue;
1411 }
1412
1413 transition(SM, Ack) {
1414 m_decrementNumberOfMessages;
1415 o_checkForCompletion;
1416 n_popResponseQueue;
1417 }
1418
1419 transition(SM, {Data, Exclusive_Data}, ISM) {
1420 v_writeDataToCacheVerify;
1421 m_decrementNumberOfMessages;
1422 o_checkForCompletion;
1423 n_popResponseQueue;
1424 }
1425
1426 // Transitions from ISM
1427 transition(ISM, Ack) {
1428 m_decrementNumberOfMessages;
1429 o_checkForCompletion;
1430 n_popResponseQueue;
1431 }
1432
1433 transition(ISM, All_acks_no_sharers, MM) {
1434 sxt_trig_ext_store_hit;
1435 gm_sendUnblockM;
1436 s_deallocateTBE;
1437 j_popTriggerQueue;
1438 kd_wakeUpDependents;
1439 }
1440
1441 // Transitions from OM
1442
1443 transition(OM, {Other_GETX, Invalidate}, IM) {
1444 e_sendData;
1445 pp_incrementNumberOfMessagesByOne;
1446 l_popForwardQueue;
1447 }
1448
1449 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1450 ee_sendDataShared;
1451 l_popForwardQueue;
1452 }
1453
1454 transition(OM, Merged_GETS) {
1455 em_sendDataSharedMultiple;
1456 l_popForwardQueue;
1457 }
1458
1459 transition(OM, Ack) {
1460 m_decrementNumberOfMessages;
1461 o_checkForCompletion;
1462 n_popResponseQueue;
1463 }
1464
1465 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1466 sxt_trig_ext_store_hit;
1467 gm_sendUnblockM;
1468 s_deallocateTBE;
1469 j_popTriggerQueue;
1470 kd_wakeUpDependents;
1471 }
1472
1473 // Transitions from IS
1474
1475 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1476 f_sendAck;
1477 l_popForwardQueue;
1478 }
1479
1480 transition(IS, Ack) {
1481 m_decrementNumberOfMessages;
1482 o_checkForCompletion;
1483 n_popResponseQueue;
1484 }
1485
1486 transition(IS, Shared_Ack) {
1487 m_decrementNumberOfMessages;
1488 r_setSharerBit;
1489 o_checkForCompletion;
1490 n_popResponseQueue;
1491 }
1492
1493 transition(IS, Data, SS) {
1494 u_writeDataToCache;
1495 m_decrementNumberOfMessages;
1496 o_checkForCompletion;
1497 hx_external_load_hit;
1498 uo_updateCurrentOwner;
1499 n_popResponseQueue;
1500 kd_wakeUpDependents;
1501 }
1502
1503 transition(IS, Exclusive_Data, M_W) {
1504 u_writeDataToCache;
1505 m_decrementNumberOfMessages;
1506 o_checkForCompletion;
1507 hx_external_load_hit;
1508 n_popResponseQueue;
1509 kd_wakeUpDependents;
1510 }
1511
1512 transition(IS, Shared_Data, SS) {
1513 u_writeDataToCache;
1514 r_setSharerBit;
1515 m_decrementNumberOfMessages;
1516 o_checkForCompletion;
1517 hx_external_load_hit;
1518 uo_updateCurrentOwner;
1519 n_popResponseQueue;
1520 kd_wakeUpDependents;
1521 }
1522
1523 // Transitions from SS
1524
1525 transition(SS, Ack) {
1526 m_decrementNumberOfMessages;
1527 o_checkForCompletion;
1528 n_popResponseQueue;
1529 }
1530
1531 transition(SS, Shared_Ack) {
1532 m_decrementNumberOfMessages;
1533 r_setSharerBit;
1534 o_checkForCompletion;
1535 n_popResponseQueue;
1536 }
1537
1538 transition(SS, All_acks, S) {
1539 gs_sendUnblockS;
1540 s_deallocateTBE;
1541 j_popTriggerQueue;
1542 kd_wakeUpDependents;
1543 }
1544
1545 transition(SS, All_acks_no_sharers, S) {
1546 // Note: The directory might still be the owner, so that is why we go to S
1547 gs_sendUnblockS;
1548 s_deallocateTBE;
1549 j_popTriggerQueue;
1550 kd_wakeUpDependents;
1551 }
1552
1553 // Transitions from MM_W
1554
1555 transition(MM_W, Store) {
1556 hh_store_hit;
1557 k_popMandatoryQueue;
1558 }
1559
1560 transition(MM_W, Ack) {
1561 m_decrementNumberOfMessages;
1562 o_checkForCompletion;
1563 n_popResponseQueue;
1564 }
1565
1566 transition(MM_W, All_acks_no_sharers, MM) {
1567 gm_sendUnblockM;
1568 s_deallocateTBE;
1569 j_popTriggerQueue;
1570 kd_wakeUpDependents;
1571 }
1572
1573 // Transitions from M_W
1574
1575 transition(M_W, Store, MM_W) {
1576 hh_store_hit;
1577 k_popMandatoryQueue;
1578 }
1579
1580 transition(M_W, Ack) {
1581 m_decrementNumberOfMessages;
1582 o_checkForCompletion;
1583 n_popResponseQueue;
1584 }
1585
1586 transition(M_W, All_acks_no_sharers, M) {
1587 gm_sendUnblockM;
1588 s_deallocateTBE;
1589 j_popTriggerQueue;
1590 kd_wakeUpDependents;
1591 }
1592
1593 // Transitions from OI/MI
1594
1595 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1596 q_sendDataFromTBEToCache;
1597 l_popForwardQueue;
1598 }
1599
1600 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1601 q_sendDataFromTBEToCache;
1602 l_popForwardQueue;
1603 }
1604
1605 transition({OI, MI}, Merged_GETS, OI) {
1606 qm_sendDataFromTBEToCache;
1607 l_popForwardQueue;
1608 }
1609
1610 transition(MI, Writeback_Ack, I) {
1611 t_sendExclusiveDataFromTBEToMemory;
1612 s_deallocateTBE;
1613 l_popForwardQueue;
1614 kd_wakeUpDependents;
1615 }
1616
1617 transition(OI, Writeback_Ack, I) {
1618 qq_sendDataFromTBEToMemory;
1619 s_deallocateTBE;
1620 l_popForwardQueue;
1621 kd_wakeUpDependents;
1622 }
1623
1624 // Transitions from II
1625 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1626 f_sendAck;
1627 l_popForwardQueue;
1628 }
1629
1630 transition(II, Writeback_Ack, I) {
1631 g_sendUnblock;
1632 s_deallocateTBE;
1633 l_popForwardQueue;
1634 kd_wakeUpDependents;
1635 }
1636
1637 transition(II, Writeback_Nack, I) {
1638 s_deallocateTBE;
1639 l_popForwardQueue;
1640 kd_wakeUpDependents;
1641 }
1642 }