Ruby: Convert CacheRequestType to RubyRequestType
[gem5.git] / src / mem / protocol / MOESI_hammer-cache.sm
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * Copyright (c) 2009 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * AMD's contributions to the MOESI hammer protocol do not constitute an
30 * endorsement of its similarity to any AMD products.
31 *
32 * Authors: Milo Martin
33 * Brad Beckmann
34 */
35
36 machine(L1Cache, "AMD Hammer-like protocol")
37 : Sequencer * sequencer,
38 CacheMemory * L1IcacheMemory,
39 CacheMemory * L1DcacheMemory,
40 CacheMemory * L2cacheMemory,
41 int cache_response_latency = 10,
42 int issue_latency = 2,
43 int l2_cache_hit_latency = 10,
44 bool no_mig_atomic = true
45 {
46
47 // NETWORK BUFFERS
48 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="false";
49 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="false";
50 MessageBuffer unblockFromCache, network="To", virtual_network="5", ordered="false";
51
52 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="false";
53 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="false";
54
55
56 // STATES
57 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
58 // Base states
59 I, AccessPermission:Invalid, desc="Idle";
60 S, AccessPermission:Read_Only, desc="Shared";
61 O, AccessPermission:Read_Only, desc="Owned";
62 M, AccessPermission:Read_Only, desc="Modified (dirty)";
63 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
64
65 // Transient States
66 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
67 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have a valid copy of the line";
68 OM, AccessPermission:Read_Only, "OM", desc="Issued GetX, received data";
69 ISM, AccessPermission:Read_Only, "ISM", desc="Issued GetX, received valid data, waiting for all acks";
70 M_W, AccessPermission:Read_Only, "M^W", desc="Issued GetS, received exclusive data";
71 MM_W, AccessPermission:Read_Write, "MM^W", desc="Issued GetX, received exclusive data";
72 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
73 SS, AccessPermission:Read_Only, "SS", desc="Issued GetS, received data, waiting for all acks";
74 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
75 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
76 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Other_GETS or Other_GETX, waiting for ack";
77 IT, AccessPermission:Busy, "IT", desc="Invalid block transferring to L1";
78 ST, AccessPermission:Busy, "ST", desc="S block transferring to L1";
79 OT, AccessPermission:Busy, "OT", desc="O block transferring to L1";
80 MT, AccessPermission:Busy, "MT", desc="M block transferring to L1";
81 MMT, AccessPermission:Busy, "MMT", desc="MM block transferring to L1";
82 }
83
84 // EVENTS
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L2_Replacement, desc="L2 Replacement";
90 L1_to_L2, desc="L1 to L2 transfer";
91 Trigger_L2_to_L1D, desc="Trigger L2 to L1-Data transfer";
92 Trigger_L2_to_L1I, desc="Trigger L2 to L1-Instruction transfer";
93 Complete_L2_to_L1, desc="L2 to L1 transfer completed";
94
95 // Requests
96 Other_GETX, desc="A GetX from another processor";
97 Other_GETS, desc="A GetS from another processor";
98 Merged_GETS, desc="A Merged GetS from another processor";
99 Other_GETS_No_Mig, desc="A GetS from another processor";
100 NC_DMA_GETS, desc="special GetS when only DMA exists";
101 Invalidate, desc="Invalidate block";
102
103 // Responses
104 Ack, desc="Received an ack message";
105 Shared_Ack, desc="Received an ack message, responder has a shared copy";
106 Data, desc="Received a data message";
107 Shared_Data, desc="Received a data message, responder has a shared copy";
108 Exclusive_Data, desc="Received a data message, responder had an exclusive copy, they gave it to us";
109
110 Writeback_Ack, desc="Writeback O.K. from directory";
111 Writeback_Nack, desc="Writeback not O.K. from directory";
112
113 // Triggers
114 All_acks, desc="Received all required data and message acks";
115 All_acks_no_sharers, desc="Received all acks and no other processor has a shared copy";
116 }
117
118 // TYPES
119
120 // STRUCTURE DEFINITIONS
121
122 MessageBuffer mandatoryQueue, ordered="false";
123
124 // CacheEntry
125 structure(Entry, desc="...", interface="AbstractCacheEntry") {
126 State CacheState, desc="cache state";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 DataBlock DataBlk, desc="data for the block";
129 bool FromL2, default="false", desc="block just moved from L2";
130 bool AtomicAccessed, default="false", desc="block just moved from L2";
131 }
132
133 // TBE fields
134 structure(TBE, desc="...") {
135 State TBEState, desc="Transient state";
136 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
137 bool Dirty, desc="Is the data dirty (different than memory)?";
138 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
139 bool Sharers, desc="On a GetS, did we find any other sharers in the system";
140 bool AppliedSilentAcks, default="false", desc="for full-bit dir, does the pending msg count reflect the silent acks";
141 MachineID LastResponder, desc="last machine to send a response for this request";
142 MachineID CurOwner, desc="current owner of the block, used for UnblockS responses";
143 Time InitialRequestTime, default="0", desc="time the initial requests was sent from the L1Cache";
144 Time ForwardRequestTime, default="0", desc="time the dir forwarded the request";
145 Time FirstResponseTime, default="0", desc="the time the first response was received";
146 }
147
148 structure(TBETable, external="yes") {
149 TBE lookup(Address);
150 void allocate(Address);
151 void deallocate(Address);
152 bool isPresent(Address);
153 }
154
155 TBETable TBEs, template_hack="<L1Cache_TBE>";
156
157 void set_cache_entry(AbstractCacheEntry b);
158 void unset_cache_entry();
159 void set_tbe(TBE b);
160 void unset_tbe();
161 void wakeUpAllBuffers();
162 void wakeUpBuffers(Address a);
163
164 Entry getCacheEntry(Address address), return_by_pointer="yes" {
165 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
166 if(is_valid(L2cache_entry)) {
167 return L2cache_entry;
168 }
169
170 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
171 if(is_valid(L1Dcache_entry)) {
172 return L1Dcache_entry;
173 }
174
175 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
176 return L1Icache_entry;
177 }
178
179 Entry getL2CacheEntry(Address address), return_by_pointer="yes" {
180 Entry L2cache_entry := static_cast(Entry, "pointer", L2cacheMemory.lookup(address));
181 return L2cache_entry;
182 }
183
184 Entry getL1DCacheEntry(Address address), return_by_pointer="yes" {
185 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(address));
186 return L1Dcache_entry;
187 }
188
189 Entry getL1ICacheEntry(Address address), return_by_pointer="yes" {
190 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(address));
191 return L1Icache_entry;
192 }
193
194 State getState(TBE tbe, Entry cache_entry, Address addr) {
195 if(is_valid(tbe)) {
196 return tbe.TBEState;
197 } else if (is_valid(cache_entry)) {
198 return cache_entry.CacheState;
199 }
200 return State:I;
201 }
202
203 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
204 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
205 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
206 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
207
208 if (is_valid(tbe)) {
209 tbe.TBEState := state;
210 }
211
212 if (is_valid(cache_entry)) {
213 cache_entry.CacheState := state;
214 }
215 }
216
217 Event mandatory_request_type_to_event(RubyRequestType type) {
218 if (type == RubyRequestType:LD) {
219 return Event:Load;
220 } else if (type == RubyRequestType:IFETCH) {
221 return Event:Ifetch;
222 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
223 return Event:Store;
224 } else {
225 error("Invalid RubyRequestType");
226 }
227 }
228
229 GenericMachineType getNondirectHitMachType(Address addr, MachineID sender) {
230 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
231 //
232 // NOTE direct local hits should not call this
233 //
234 return GenericMachineType:L1Cache_wCC;
235 } else {
236 return ConvertMachToGenericMach(machineIDToMachineType(sender));
237 }
238 }
239
240 GenericMachineType testAndClearLocalHit(Entry cache_entry) {
241 if (is_valid(cache_entry) && cache_entry.FromL2) {
242 cache_entry.FromL2 := false;
243 return GenericMachineType:L2Cache;
244 } else {
245 return GenericMachineType:L1Cache;
246 }
247 }
248
249 bool IsAtomicAccessed(Entry cache_entry) {
250 assert(is_valid(cache_entry));
251 return cache_entry.AtomicAccessed;
252 }
253
254 MessageBuffer triggerQueue, ordered="false";
255
256 // ** OUT_PORTS **
257
258 out_port(requestNetwork_out, RequestMsg, requestFromCache);
259 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
260 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
261 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
262
263 // ** IN_PORTS **
264
265 // Trigger Queue
266 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
267 if (triggerQueue_in.isReady()) {
268 peek(triggerQueue_in, TriggerMsg) {
269
270 Entry cache_entry := getCacheEntry(in_msg.Address);
271 TBE tbe := TBEs[in_msg.Address];
272
273 if (in_msg.Type == TriggerType:L2_to_L1) {
274 trigger(Event:Complete_L2_to_L1, in_msg.Address, cache_entry, tbe);
275 } else if (in_msg.Type == TriggerType:ALL_ACKS) {
276 trigger(Event:All_acks, in_msg.Address, cache_entry, tbe);
277 } else if (in_msg.Type == TriggerType:ALL_ACKS_NO_SHARERS) {
278 trigger(Event:All_acks_no_sharers, in_msg.Address, cache_entry, tbe);
279 } else {
280 error("Unexpected message");
281 }
282 }
283 }
284 }
285
286 // Nothing from the unblock network
287
288 // Response Network
289 in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
290 if (responseToCache_in.isReady()) {
291 peek(responseToCache_in, ResponseMsg, block_on="Address") {
292
293 Entry cache_entry := getCacheEntry(in_msg.Address);
294 TBE tbe := TBEs[in_msg.Address];
295
296 if (in_msg.Type == CoherenceResponseType:ACK) {
297 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
298 } else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
299 trigger(Event:Shared_Ack, in_msg.Address, cache_entry, tbe);
300 } else if (in_msg.Type == CoherenceResponseType:DATA) {
301 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
302 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
303 trigger(Event:Shared_Data, in_msg.Address, cache_entry, tbe);
304 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
305 trigger(Event:Exclusive_Data, in_msg.Address, cache_entry, tbe);
306 } else {
307 error("Unexpected message");
308 }
309 }
310 }
311 }
312
313 // Forward Network
314 in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
315 if (forwardToCache_in.isReady()) {
316 peek(forwardToCache_in, RequestMsg, block_on="Address") {
317
318 Entry cache_entry := getCacheEntry(in_msg.Address);
319 TBE tbe := TBEs[in_msg.Address];
320
321 if (in_msg.Type == CoherenceRequestType:GETX) {
322 trigger(Event:Other_GETX, in_msg.Address, cache_entry, tbe);
323 } else if (in_msg.Type == CoherenceRequestType:MERGED_GETS) {
324 trigger(Event:Merged_GETS, in_msg.Address, cache_entry, tbe);
325 } else if (in_msg.Type == CoherenceRequestType:GETS) {
326 if (machineCount(MachineType:L1Cache) > 1) {
327 if (is_valid(cache_entry)) {
328 if (IsAtomicAccessed(cache_entry) && no_mig_atomic) {
329 trigger(Event:Other_GETS_No_Mig, in_msg.Address, cache_entry, tbe);
330 } else {
331 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
332 }
333 } else {
334 trigger(Event:Other_GETS, in_msg.Address, cache_entry, tbe);
335 }
336 } else {
337 trigger(Event:NC_DMA_GETS, in_msg.Address, cache_entry, tbe);
338 }
339 } else if (in_msg.Type == CoherenceRequestType:INV) {
340 trigger(Event:Invalidate, in_msg.Address, cache_entry, tbe);
341 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
342 trigger(Event:Writeback_Ack, in_msg.Address, cache_entry, tbe);
343 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
344 trigger(Event:Writeback_Nack, in_msg.Address, cache_entry, tbe);
345 } else {
346 error("Unexpected message");
347 }
348 }
349 }
350 }
351
352 // Nothing from the request network
353
354 // Mandatory Queue
355 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...", rank=0) {
356 if (mandatoryQueue_in.isReady()) {
357 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
358
359 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
360 TBE tbe := TBEs[in_msg.LineAddress];
361
362 if (in_msg.Type == RubyRequestType:IFETCH) {
363 // ** INSTRUCTION ACCESS ***
364
365 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
366 if (is_valid(L1Icache_entry)) {
367 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
368 trigger(mandatory_request_type_to_event(in_msg.Type),
369 in_msg.LineAddress, L1Icache_entry, tbe);
370 } else {
371 // Check to see if it is in the OTHER L1
372 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
373 if (is_valid(L1Dcache_entry)) {
374 // The block is in the wrong L1, try to write it to the L2
375 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
376 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Dcache_entry, tbe);
377 } else {
378 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
379 trigger(Event:L2_Replacement,
380 l2_victim_addr,
381 getL2CacheEntry(l2_victim_addr),
382 TBEs[l2_victim_addr]);
383 }
384 }
385
386 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
387 // L1 does't have the line, but we have space for it in the L1
388
389 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
390 if (is_valid(L2cache_entry)) {
391 // L2 has it (maybe not with the right permissions)
392 trigger(Event:Trigger_L2_to_L1I, in_msg.LineAddress,
393 L2cache_entry, tbe);
394 } else {
395 // We have room, the L2 doesn't have it, so the L1 fetches the line
396 trigger(mandatory_request_type_to_event(in_msg.Type),
397 in_msg.LineAddress, L1Icache_entry, tbe);
398 }
399 } else {
400 // No room in the L1, so we need to make room
401 Address l1i_victim_addr := L1IcacheMemory.cacheProbe(in_msg.LineAddress);
402 if (L2cacheMemory.cacheAvail(l1i_victim_addr)) {
403 // The L2 has room, so we move the line from the L1 to the L2
404 trigger(Event:L1_to_L2,
405 l1i_victim_addr,
406 getL1ICacheEntry(l1i_victim_addr),
407 TBEs[l1i_victim_addr]);
408 } else {
409 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1i_victim_addr);
410 // The L2 does not have room, so we replace a line from the L2
411 trigger(Event:L2_Replacement,
412 l2_victim_addr,
413 getL2CacheEntry(l2_victim_addr),
414 TBEs[l2_victim_addr]);
415 }
416 }
417 }
418 } else {
419 // *** DATA ACCESS ***
420
421 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
422 if (is_valid(L1Dcache_entry)) {
423 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
424 trigger(mandatory_request_type_to_event(in_msg.Type),
425 in_msg.LineAddress, L1Dcache_entry, tbe);
426 } else {
427
428 // Check to see if it is in the OTHER L1
429 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
430 if (is_valid(L1Icache_entry)) {
431 // The block is in the wrong L1, try to write it to the L2
432 if (L2cacheMemory.cacheAvail(in_msg.LineAddress)) {
433 trigger(Event:L1_to_L2, in_msg.LineAddress, L1Icache_entry, tbe);
434 } else {
435 Address l2_victim_addr := L2cacheMemory.cacheProbe(in_msg.LineAddress);
436 trigger(Event:L2_Replacement,
437 l2_victim_addr,
438 getL2CacheEntry(l2_victim_addr),
439 TBEs[l2_victim_addr]);
440 }
441 }
442
443 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
444 // L1 does't have the line, but we have space for it in the L1
445 Entry L2cache_entry := getL2CacheEntry(in_msg.LineAddress);
446 if (is_valid(L2cache_entry)) {
447 // L2 has it (maybe not with the right permissions)
448 trigger(Event:Trigger_L2_to_L1D, in_msg.LineAddress,
449 L2cache_entry, tbe);
450 } else {
451 // We have room, the L2 doesn't have it, so the L1 fetches the line
452 trigger(mandatory_request_type_to_event(in_msg.Type),
453 in_msg.LineAddress, L1Dcache_entry, tbe);
454 }
455 } else {
456 // No room in the L1, so we need to make room
457 Address l1d_victim_addr := L1DcacheMemory.cacheProbe(in_msg.LineAddress);
458 if (L2cacheMemory.cacheAvail(l1d_victim_addr)) {
459 // The L2 has room, so we move the line from the L1 to the L2
460 trigger(Event:L1_to_L2,
461 l1d_victim_addr,
462 getL1DCacheEntry(l1d_victim_addr),
463 TBEs[l1d_victim_addr]);
464 } else {
465 Address l2_victim_addr := L2cacheMemory.cacheProbe(l1d_victim_addr);
466 // The L2 does not have room, so we replace a line from the L2
467 trigger(Event:L2_Replacement,
468 l2_victim_addr,
469 getL2CacheEntry(l2_victim_addr),
470 TBEs[l2_victim_addr]);
471 }
472 }
473 }
474 }
475 }
476 }
477 }
478
479 // ACTIONS
480
481 action(a_issueGETS, "a", desc="Issue GETS") {
482 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
483 assert(is_valid(tbe));
484 out_msg.Address := address;
485 out_msg.Type := CoherenceRequestType:GETS;
486 out_msg.Requestor := machineID;
487 out_msg.Destination.add(map_Address_to_Directory(address));
488 out_msg.MessageSize := MessageSizeType:Request_Control;
489 out_msg.InitialRequestTime := get_time();
490 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
491 }
492 }
493
494 action(b_issueGETX, "b", desc="Issue GETX") {
495 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
496 assert(is_valid(tbe));
497 out_msg.Address := address;
498 out_msg.Type := CoherenceRequestType:GETX;
499 out_msg.Requestor := machineID;
500 out_msg.Destination.add(map_Address_to_Directory(address));
501 out_msg.MessageSize := MessageSizeType:Request_Control;
502 out_msg.InitialRequestTime := get_time();
503 tbe.NumPendingMsgs := machineCount(MachineType:L1Cache); // One from each other cache (n-1) plus the memory (+1)
504 }
505 }
506
507 action(c_sendExclusiveData, "c", desc="Send exclusive data from cache to requestor") {
508 peek(forwardToCache_in, RequestMsg) {
509 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
510 assert(is_valid(cache_entry));
511 out_msg.Address := address;
512 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
513 out_msg.Sender := machineID;
514 out_msg.Destination.add(in_msg.Requestor);
515 out_msg.DataBlk := cache_entry.DataBlk;
516 out_msg.Dirty := cache_entry.Dirty;
517 if (in_msg.DirectedProbe) {
518 out_msg.Acks := machineCount(MachineType:L1Cache);
519 } else {
520 out_msg.Acks := 2;
521 }
522 out_msg.SilentAcks := in_msg.SilentAcks;
523 out_msg.MessageSize := MessageSizeType:Response_Data;
524 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
525 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
526 }
527 }
528 }
529
530 action(d_issuePUT, "d", desc="Issue PUT") {
531 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
532 out_msg.Address := address;
533 out_msg.Type := CoherenceRequestType:PUT;
534 out_msg.Requestor := machineID;
535 out_msg.Destination.add(map_Address_to_Directory(address));
536 out_msg.MessageSize := MessageSizeType:Writeback_Control;
537 }
538 }
539
540 action(e_sendData, "e", desc="Send data from cache to requestor") {
541 peek(forwardToCache_in, RequestMsg) {
542 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
543 assert(is_valid(cache_entry));
544 out_msg.Address := address;
545 out_msg.Type := CoherenceResponseType:DATA;
546 out_msg.Sender := machineID;
547 out_msg.Destination.add(in_msg.Requestor);
548 out_msg.DataBlk := cache_entry.DataBlk;
549 out_msg.Dirty := cache_entry.Dirty;
550 if (in_msg.DirectedProbe) {
551 out_msg.Acks := machineCount(MachineType:L1Cache);
552 } else {
553 out_msg.Acks := 2;
554 }
555 out_msg.SilentAcks := in_msg.SilentAcks;
556 out_msg.MessageSize := MessageSizeType:Response_Data;
557 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
558 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
559 }
560 }
561 }
562
563 action(ee_sendDataShared, "\e", desc="Send data from cache to requestor, remaining the owner") {
564 peek(forwardToCache_in, RequestMsg) {
565 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
566 assert(is_valid(cache_entry));
567 out_msg.Address := address;
568 out_msg.Type := CoherenceResponseType:DATA_SHARED;
569 out_msg.Sender := machineID;
570 out_msg.Destination.add(in_msg.Requestor);
571 out_msg.DataBlk := cache_entry.DataBlk;
572 out_msg.Dirty := cache_entry.Dirty;
573 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
574 if (in_msg.DirectedProbe) {
575 out_msg.Acks := machineCount(MachineType:L1Cache);
576 } else {
577 out_msg.Acks := 2;
578 }
579 out_msg.SilentAcks := in_msg.SilentAcks;
580 out_msg.MessageSize := MessageSizeType:Response_Data;
581 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
582 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
583 }
584 }
585 }
586
587 action(em_sendDataSharedMultiple, "em", desc="Send data from cache to all requestors, still the owner") {
588 peek(forwardToCache_in, RequestMsg) {
589 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
590 assert(is_valid(cache_entry));
591 out_msg.Address := address;
592 out_msg.Type := CoherenceResponseType:DATA_SHARED;
593 out_msg.Sender := machineID;
594 out_msg.Destination := in_msg.MergedRequestors;
595 out_msg.DataBlk := cache_entry.DataBlk;
596 out_msg.Dirty := cache_entry.Dirty;
597 DPRINTF(RubySlicc, "%s\n", out_msg.DataBlk);
598 out_msg.Acks := machineCount(MachineType:L1Cache);
599 out_msg.SilentAcks := in_msg.SilentAcks;
600 out_msg.MessageSize := MessageSizeType:Response_Data;
601 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
602 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
603 }
604 }
605 }
606
607 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
608 peek(forwardToCache_in, RequestMsg) {
609 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
610 out_msg.Address := address;
611 out_msg.Type := CoherenceResponseType:ACK;
612 out_msg.Sender := machineID;
613 out_msg.Destination.add(in_msg.Requestor);
614 out_msg.Acks := 1;
615 out_msg.SilentAcks := in_msg.SilentAcks;
616 assert(in_msg.DirectedProbe == false);
617 out_msg.MessageSize := MessageSizeType:Response_Control;
618 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
619 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
620 }
621 }
622 }
623
624 action(ff_sendAckShared, "\f", desc="Send shared ack from cache to requestor") {
625 peek(forwardToCache_in, RequestMsg) {
626 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
627 out_msg.Address := address;
628 out_msg.Type := CoherenceResponseType:ACK_SHARED;
629 out_msg.Sender := machineID;
630 out_msg.Destination.add(in_msg.Requestor);
631 out_msg.Acks := 1;
632 out_msg.SilentAcks := in_msg.SilentAcks;
633 assert(in_msg.DirectedProbe == false);
634 out_msg.MessageSize := MessageSizeType:Response_Control;
635 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
636 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
637 }
638 }
639 }
640
641 action(g_sendUnblock, "g", desc="Send unblock to memory") {
642 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
643 out_msg.Address := address;
644 out_msg.Type := CoherenceResponseType:UNBLOCK;
645 out_msg.Sender := machineID;
646 out_msg.Destination.add(map_Address_to_Directory(address));
647 out_msg.MessageSize := MessageSizeType:Unblock_Control;
648 }
649 }
650
651 action(gm_sendUnblockM, "gm", desc="Send unblock to memory and indicate M/O/E state") {
652 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
653 out_msg.Address := address;
654 out_msg.Type := CoherenceResponseType:UNBLOCKM;
655 out_msg.Sender := machineID;
656 out_msg.Destination.add(map_Address_to_Directory(address));
657 out_msg.MessageSize := MessageSizeType:Unblock_Control;
658 }
659 }
660
661 action(gs_sendUnblockS, "gs", desc="Send unblock to memory and indicate S state") {
662 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
663 assert(is_valid(tbe));
664 out_msg.Address := address;
665 out_msg.Type := CoherenceResponseType:UNBLOCKS;
666 out_msg.Sender := machineID;
667 out_msg.CurOwner := tbe.CurOwner;
668 out_msg.Destination.add(map_Address_to_Directory(address));
669 out_msg.MessageSize := MessageSizeType:Unblock_Control;
670 }
671 }
672
673 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
674 assert(is_valid(cache_entry));
675 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
676 sequencer.readCallback(address, testAndClearLocalHit(cache_entry),
677 cache_entry.DataBlk);
678 }
679
680 action(hx_external_load_hit, "hx", desc="load required external msgs") {
681 assert(is_valid(cache_entry));
682 assert(is_valid(tbe));
683 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
684 peek(responseToCache_in, ResponseMsg) {
685
686 sequencer.readCallback(address,
687 getNondirectHitMachType(in_msg.Address, in_msg.Sender),
688 cache_entry.DataBlk,
689 tbe.InitialRequestTime,
690 tbe.ForwardRequestTime,
691 tbe.FirstResponseTime);
692 }
693 }
694
695 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
696 assert(is_valid(cache_entry));
697 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
698 peek(mandatoryQueue_in, CacheMsg) {
699 sequencer.writeCallback(address, testAndClearLocalHit(cache_entry),
700 cache_entry.DataBlk);
701
702 cache_entry.Dirty := true;
703 if (in_msg.Type == RubyRequestType:ATOMIC) {
704 cache_entry.AtomicAccessed := true;
705 }
706 }
707 }
708
709 action(sx_external_store_hit, "sx", desc="store required external msgs.") {
710 assert(is_valid(cache_entry));
711 assert(is_valid(tbe));
712 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
713 peek(responseToCache_in, ResponseMsg) {
714
715 sequencer.writeCallback(address,
716 getNondirectHitMachType(address, in_msg.Sender),
717 cache_entry.DataBlk,
718 tbe.InitialRequestTime,
719 tbe.ForwardRequestTime,
720 tbe.FirstResponseTime);
721 }
722 cache_entry.Dirty := true;
723 }
724
725 action(sxt_trig_ext_store_hit, "sxt", desc="store required external msgs.") {
726 assert(is_valid(cache_entry));
727 assert(is_valid(tbe));
728 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
729
730 sequencer.writeCallback(address,
731 getNondirectHitMachType(address, tbe.LastResponder),
732 cache_entry.DataBlk,
733 tbe.InitialRequestTime,
734 tbe.ForwardRequestTime,
735 tbe.FirstResponseTime);
736
737 cache_entry.Dirty := true;
738 }
739
740 action(i_allocateTBE, "i", desc="Allocate TBE") {
741 check_allocate(TBEs);
742 assert(is_valid(cache_entry));
743 TBEs.allocate(address);
744 set_tbe(TBEs[address]);
745 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
746 tbe.Dirty := cache_entry.Dirty;
747 tbe.Sharers := false;
748 }
749
750 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
751 triggerQueue_in.dequeue();
752 }
753
754 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
755 mandatoryQueue_in.dequeue();
756 }
757
758 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
759 forwardToCache_in.dequeue();
760 }
761
762 action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
763 assert(is_valid(cache_entry));
764 assert(is_valid(tbe));
765 cache_entry.Dirty := tbe.Dirty;
766 cache_entry.DataBlk := tbe.DataBlk;
767 }
768
769 action(nb_copyFromTBEToL1, "fu", desc="Copy data from TBE to L1 cache entry.") {
770 assert(is_valid(cache_entry));
771 assert(is_valid(tbe));
772 cache_entry.Dirty := tbe.Dirty;
773 cache_entry.DataBlk := tbe.DataBlk;
774 cache_entry.FromL2 := true;
775 }
776
777 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
778 peek(responseToCache_in, ResponseMsg) {
779 assert(in_msg.Acks > 0);
780 assert(is_valid(tbe));
781 DPRINTF(RubySlicc, "Sender = %s\n", in_msg.Sender);
782 DPRINTF(RubySlicc, "SilentAcks = %d\n", in_msg.SilentAcks);
783 if (tbe.AppliedSilentAcks == false) {
784 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.SilentAcks;
785 tbe.AppliedSilentAcks := true;
786 }
787 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
788 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
789 DPRINTF(RubySlicc, "%d\n", tbe.NumPendingMsgs);
790 APPEND_TRANSITION_COMMENT(tbe.NumPendingMsgs);
791 APPEND_TRANSITION_COMMENT(in_msg.Sender);
792 tbe.LastResponder := in_msg.Sender;
793 if (tbe.InitialRequestTime != zero_time() && in_msg.InitialRequestTime != zero_time()) {
794 assert(tbe.InitialRequestTime == in_msg.InitialRequestTime);
795 }
796 if (in_msg.InitialRequestTime != zero_time()) {
797 tbe.InitialRequestTime := in_msg.InitialRequestTime;
798 }
799 if (tbe.ForwardRequestTime != zero_time() && in_msg.ForwardRequestTime != zero_time()) {
800 assert(tbe.ForwardRequestTime == in_msg.ForwardRequestTime);
801 }
802 if (in_msg.ForwardRequestTime != zero_time()) {
803 tbe.ForwardRequestTime := in_msg.ForwardRequestTime;
804 }
805 if (tbe.FirstResponseTime == zero_time()) {
806 tbe.FirstResponseTime := get_time();
807 }
808 }
809 }
810 action(uo_updateCurrentOwner, "uo", desc="When moving SS state, update current owner.") {
811 peek(responseToCache_in, ResponseMsg) {
812 assert(is_valid(tbe));
813 tbe.CurOwner := in_msg.Sender;
814 }
815 }
816
817 action(n_popResponseQueue, "n", desc="Pop response queue") {
818 responseToCache_in.dequeue();
819 }
820
821 action(ll_L2toL1Transfer, "ll", desc="") {
822 enqueue(triggerQueue_out, TriggerMsg, latency=l2_cache_hit_latency) {
823 out_msg.Address := address;
824 out_msg.Type := TriggerType:L2_to_L1;
825 }
826 }
827
828 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
829 assert(is_valid(tbe));
830 if (tbe.NumPendingMsgs == 0) {
831 enqueue(triggerQueue_out, TriggerMsg) {
832 out_msg.Address := address;
833 if (tbe.Sharers) {
834 out_msg.Type := TriggerType:ALL_ACKS;
835 } else {
836 out_msg.Type := TriggerType:ALL_ACKS_NO_SHARERS;
837 }
838 }
839 }
840 }
841
842 action(p_decrementNumberOfMessagesByOne, "p", desc="Decrement the number of messages for which we're waiting by one") {
843 assert(is_valid(tbe));
844 tbe.NumPendingMsgs := tbe.NumPendingMsgs - 1;
845 }
846
847 action(pp_incrementNumberOfMessagesByOne, "\p", desc="Increment the number of messages for which we're waiting by one") {
848 assert(is_valid(tbe));
849 tbe.NumPendingMsgs := tbe.NumPendingMsgs + 1;
850 }
851
852 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
853 peek(forwardToCache_in, RequestMsg) {
854 assert(in_msg.Requestor != machineID);
855 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
856 assert(is_valid(tbe));
857 out_msg.Address := address;
858 out_msg.Type := CoherenceResponseType:DATA;
859 out_msg.Sender := machineID;
860 out_msg.Destination.add(in_msg.Requestor);
861 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
862 out_msg.DataBlk := tbe.DataBlk;
863 out_msg.Dirty := tbe.Dirty;
864 if (in_msg.DirectedProbe) {
865 out_msg.Acks := machineCount(MachineType:L1Cache);
866 } else {
867 out_msg.Acks := 2;
868 }
869 out_msg.SilentAcks := in_msg.SilentAcks;
870 out_msg.MessageSize := MessageSizeType:Response_Data;
871 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
872 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
873 }
874 }
875 }
876
877 action(sq_sendSharedDataFromTBEToCache, "sq", desc="Send shared data from TBE to cache, still the owner") {
878 peek(forwardToCache_in, RequestMsg) {
879 assert(in_msg.Requestor != machineID);
880 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
881 assert(is_valid(tbe));
882 out_msg.Address := address;
883 out_msg.Type := CoherenceResponseType:DATA_SHARED;
884 out_msg.Sender := machineID;
885 out_msg.Destination.add(in_msg.Requestor);
886 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
887 out_msg.DataBlk := tbe.DataBlk;
888 out_msg.Dirty := tbe.Dirty;
889 if (in_msg.DirectedProbe) {
890 out_msg.Acks := machineCount(MachineType:L1Cache);
891 } else {
892 out_msg.Acks := 2;
893 }
894 out_msg.SilentAcks := in_msg.SilentAcks;
895 out_msg.MessageSize := MessageSizeType:Response_Data;
896 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
897 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
898 }
899 }
900 }
901
902 action(qm_sendDataFromTBEToCache, "qm", desc="Send data from TBE to cache, multiple sharers, still the owner") {
903 peek(forwardToCache_in, RequestMsg) {
904 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
905 assert(is_valid(tbe));
906 out_msg.Address := address;
907 out_msg.Type := CoherenceResponseType:DATA_SHARED;
908 out_msg.Sender := machineID;
909 out_msg.Destination := in_msg.MergedRequestors;
910 DPRINTF(RubySlicc, "%s\n", out_msg.Destination);
911 out_msg.DataBlk := tbe.DataBlk;
912 out_msg.Dirty := tbe.Dirty;
913 out_msg.Acks := machineCount(MachineType:L1Cache);
914 out_msg.SilentAcks := in_msg.SilentAcks;
915 out_msg.MessageSize := MessageSizeType:Response_Data;
916 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
917 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
918 }
919 }
920 }
921
922 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
923 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
924 assert(is_valid(tbe));
925 out_msg.Address := address;
926 out_msg.Sender := machineID;
927 out_msg.Destination.add(map_Address_to_Directory(address));
928 out_msg.Dirty := tbe.Dirty;
929 if (tbe.Dirty) {
930 out_msg.Type := CoherenceResponseType:WB_DIRTY;
931 out_msg.DataBlk := tbe.DataBlk;
932 out_msg.MessageSize := MessageSizeType:Writeback_Data;
933 } else {
934 out_msg.Type := CoherenceResponseType:WB_CLEAN;
935 // NOTE: in a real system this would not send data. We send
936 // data here only so we can check it at the memory
937 out_msg.DataBlk := tbe.DataBlk;
938 out_msg.MessageSize := MessageSizeType:Writeback_Control;
939 }
940 }
941 }
942
943 action(r_setSharerBit, "r", desc="We saw other sharers") {
944 assert(is_valid(tbe));
945 tbe.Sharers := true;
946 }
947
948 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
949 TBEs.deallocate(address);
950 unset_tbe();
951 }
952
953 action(t_sendExclusiveDataFromTBEToMemory, "t", desc="Send exclusive data from TBE to memory") {
954 enqueue(unblockNetwork_out, ResponseMsg, latency=cache_response_latency) {
955 assert(is_valid(tbe));
956 out_msg.Address := address;
957 out_msg.Sender := machineID;
958 out_msg.Destination.add(map_Address_to_Directory(address));
959 out_msg.DataBlk := tbe.DataBlk;
960 out_msg.Dirty := tbe.Dirty;
961 if (tbe.Dirty) {
962 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_DIRTY;
963 out_msg.DataBlk := tbe.DataBlk;
964 out_msg.MessageSize := MessageSizeType:Writeback_Data;
965 } else {
966 out_msg.Type := CoherenceResponseType:WB_EXCLUSIVE_CLEAN;
967 // NOTE: in a real system this would not send data. We send
968 // data here only so we can check it at the memory
969 out_msg.DataBlk := tbe.DataBlk;
970 out_msg.MessageSize := MessageSizeType:Writeback_Control;
971 }
972 }
973 }
974
975 action(u_writeDataToCache, "u", desc="Write data to cache") {
976 peek(responseToCache_in, ResponseMsg) {
977 assert(is_valid(cache_entry));
978 cache_entry.DataBlk := in_msg.DataBlk;
979 cache_entry.Dirty := in_msg.Dirty;
980 }
981 }
982
983 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
984 peek(responseToCache_in, ResponseMsg) {
985 assert(is_valid(cache_entry));
986 DPRINTF(RubySlicc, "Cached Data Block: %s, Msg Data Block: %s\n",
987 cache_entry.DataBlk, in_msg.DataBlk);
988 assert(cache_entry.DataBlk == in_msg.DataBlk);
989 cache_entry.DataBlk := in_msg.DataBlk;
990 cache_entry.Dirty := in_msg.Dirty || cache_entry.Dirty;
991 }
992 }
993
994 action(gg_deallocateL1CacheBlock, "\g", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
995 if (L1DcacheMemory.isTagPresent(address)) {
996 L1DcacheMemory.deallocate(address);
997 } else {
998 L1IcacheMemory.deallocate(address);
999 }
1000 unset_cache_entry();
1001 }
1002
1003 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
1004 if (is_invalid(cache_entry)) {
1005 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
1006 }
1007 }
1008
1009 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
1010 if (is_invalid(cache_entry)) {
1011 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
1012 }
1013 }
1014
1015 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
1016 set_cache_entry(L2cacheMemory.allocate(address, new Entry));
1017 }
1018
1019 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
1020 L2cacheMemory.deallocate(address);
1021 unset_cache_entry();
1022 }
1023
1024 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
1025 peek(mandatoryQueue_in, CacheMsg) {
1026 if (L1IcacheMemory.isTagPresent(address)) {
1027 L1IcacheMemory.profileMiss(in_msg);
1028 } else if (L1DcacheMemory.isTagPresent(address)) {
1029 L1DcacheMemory.profileMiss(in_msg);
1030 }
1031 if (L2cacheMemory.isTagPresent(address) == false) {
1032 L2cacheMemory.profileMiss(in_msg);
1033 }
1034 }
1035 }
1036
1037 action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
1038 stall_and_wait(mandatoryQueue_in, address);
1039 }
1040
1041 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
1042 wakeUpBuffers(address);
1043 }
1044
1045 action(ka_wakeUpAllDependents, "ka", desc="wake-up all dependents") {
1046 wakeUpAllBuffers();
1047 }
1048
1049 //*****************************************************
1050 // TRANSITIONS
1051 //*****************************************************
1052
1053 // Transitions for Load/Store/L2_Replacement from transient states
1054 transition({IM, SM, ISM, OM, IS, SS, OI, MI, II, IT, ST, OT, MT, MMT}, {Store, L2_Replacement}) {
1055 zz_stallAndWaitMandatoryQueue;
1056 }
1057
1058 transition({M_W, MM_W}, {L2_Replacement}) {
1059 zz_stallAndWaitMandatoryQueue;
1060 }
1061
1062 transition({IM, IS, OI, MI, II, IT, ST, OT, MT, MMT}, {Load, Ifetch}) {
1063 zz_stallAndWaitMandatoryQueue;
1064 }
1065
1066 transition({IM, SM, ISM, OM, IS, SS, MM_W, M_W, OI, MI, II, IT, ST, OT, MT, MMT}, L1_to_L2) {
1067 zz_stallAndWaitMandatoryQueue;
1068 }
1069
1070 transition({IT, ST, OT, MT, MMT}, {Other_GETX, NC_DMA_GETS, Other_GETS, Merged_GETS, Other_GETS_No_Mig, Invalidate}) {
1071 // stall
1072 }
1073
1074 // Transitions moving data between the L1 and L2 caches
1075 transition({I, S, O, M, MM}, L1_to_L2) {
1076 i_allocateTBE;
1077 gg_deallocateL1CacheBlock;
1078 vv_allocateL2CacheBlock;
1079 hp_copyFromTBEToL2;
1080 s_deallocateTBE;
1081 ka_wakeUpAllDependents;
1082 }
1083
1084 transition(I, Trigger_L2_to_L1D, IT) {
1085 i_allocateTBE;
1086 rr_deallocateL2CacheBlock;
1087 ii_allocateL1DCacheBlock;
1088 nb_copyFromTBEToL1; // Not really needed for state I
1089 s_deallocateTBE;
1090 uu_profileMiss;
1091 zz_stallAndWaitMandatoryQueue;
1092 ll_L2toL1Transfer;
1093 }
1094
1095 transition(S, Trigger_L2_to_L1D, ST) {
1096 i_allocateTBE;
1097 rr_deallocateL2CacheBlock;
1098 ii_allocateL1DCacheBlock;
1099 nb_copyFromTBEToL1;
1100 s_deallocateTBE;
1101 uu_profileMiss;
1102 zz_stallAndWaitMandatoryQueue;
1103 ll_L2toL1Transfer;
1104 }
1105
1106 transition(O, Trigger_L2_to_L1D, OT) {
1107 i_allocateTBE;
1108 rr_deallocateL2CacheBlock;
1109 ii_allocateL1DCacheBlock;
1110 nb_copyFromTBEToL1;
1111 s_deallocateTBE;
1112 uu_profileMiss;
1113 zz_stallAndWaitMandatoryQueue;
1114 ll_L2toL1Transfer;
1115 }
1116
1117 transition(M, Trigger_L2_to_L1D, MT) {
1118 i_allocateTBE;
1119 rr_deallocateL2CacheBlock;
1120 ii_allocateL1DCacheBlock;
1121 nb_copyFromTBEToL1;
1122 s_deallocateTBE;
1123 uu_profileMiss;
1124 zz_stallAndWaitMandatoryQueue;
1125 ll_L2toL1Transfer;
1126 }
1127
1128 transition(MM, Trigger_L2_to_L1D, MMT) {
1129 i_allocateTBE;
1130 rr_deallocateL2CacheBlock;
1131 ii_allocateL1DCacheBlock;
1132 nb_copyFromTBEToL1;
1133 s_deallocateTBE;
1134 uu_profileMiss;
1135 zz_stallAndWaitMandatoryQueue;
1136 ll_L2toL1Transfer;
1137 }
1138
1139 transition(I, Trigger_L2_to_L1I, IT) {
1140 i_allocateTBE;
1141 rr_deallocateL2CacheBlock;
1142 jj_allocateL1ICacheBlock;
1143 nb_copyFromTBEToL1;
1144 s_deallocateTBE;
1145 uu_profileMiss;
1146 zz_stallAndWaitMandatoryQueue;
1147 ll_L2toL1Transfer;
1148 }
1149
1150 transition(S, Trigger_L2_to_L1I, ST) {
1151 i_allocateTBE;
1152 rr_deallocateL2CacheBlock;
1153 jj_allocateL1ICacheBlock;
1154 nb_copyFromTBEToL1;
1155 s_deallocateTBE;
1156 uu_profileMiss;
1157 zz_stallAndWaitMandatoryQueue;
1158 ll_L2toL1Transfer;
1159 }
1160
1161 transition(O, Trigger_L2_to_L1I, OT) {
1162 i_allocateTBE;
1163 rr_deallocateL2CacheBlock;
1164 jj_allocateL1ICacheBlock;
1165 nb_copyFromTBEToL1;
1166 s_deallocateTBE;
1167 uu_profileMiss;
1168 zz_stallAndWaitMandatoryQueue;
1169 ll_L2toL1Transfer;
1170 }
1171
1172 transition(M, Trigger_L2_to_L1I, MT) {
1173 i_allocateTBE;
1174 rr_deallocateL2CacheBlock;
1175 jj_allocateL1ICacheBlock;
1176 nb_copyFromTBEToL1;
1177 s_deallocateTBE;
1178 uu_profileMiss;
1179 zz_stallAndWaitMandatoryQueue;
1180 ll_L2toL1Transfer;
1181 }
1182
1183 transition(MM, Trigger_L2_to_L1I, MMT) {
1184 i_allocateTBE;
1185 rr_deallocateL2CacheBlock;
1186 jj_allocateL1ICacheBlock;
1187 nb_copyFromTBEToL1;
1188 s_deallocateTBE;
1189 uu_profileMiss;
1190 zz_stallAndWaitMandatoryQueue;
1191 ll_L2toL1Transfer;
1192 }
1193
1194 transition(IT, Complete_L2_to_L1, I) {
1195 j_popTriggerQueue;
1196 kd_wakeUpDependents;
1197 }
1198
1199 transition(ST, Complete_L2_to_L1, S) {
1200 j_popTriggerQueue;
1201 kd_wakeUpDependents;
1202 }
1203
1204 transition(OT, Complete_L2_to_L1, O) {
1205 j_popTriggerQueue;
1206 kd_wakeUpDependents;
1207 }
1208
1209 transition(MT, Complete_L2_to_L1, M) {
1210 j_popTriggerQueue;
1211 kd_wakeUpDependents;
1212 }
1213
1214 transition(MMT, Complete_L2_to_L1, MM) {
1215 j_popTriggerQueue;
1216 kd_wakeUpDependents;
1217 }
1218
1219 // Transitions from Idle
1220 transition(I, Load, IS) {
1221 ii_allocateL1DCacheBlock;
1222 i_allocateTBE;
1223 a_issueGETS;
1224 uu_profileMiss;
1225 k_popMandatoryQueue;
1226 }
1227
1228 transition(I, Ifetch, IS) {
1229 jj_allocateL1ICacheBlock;
1230 i_allocateTBE;
1231 a_issueGETS;
1232 uu_profileMiss;
1233 k_popMandatoryQueue;
1234 }
1235
1236 transition(I, Store, IM) {
1237 ii_allocateL1DCacheBlock;
1238 i_allocateTBE;
1239 b_issueGETX;
1240 uu_profileMiss;
1241 k_popMandatoryQueue;
1242 }
1243
1244 transition(I, L2_Replacement) {
1245 rr_deallocateL2CacheBlock;
1246 ka_wakeUpAllDependents;
1247 }
1248
1249 transition(I, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1250 f_sendAck;
1251 l_popForwardQueue;
1252 }
1253
1254 // Transitions from Shared
1255 transition({S, SM, ISM}, {Load, Ifetch}) {
1256 h_load_hit;
1257 k_popMandatoryQueue;
1258 }
1259
1260 transition(S, Store, SM) {
1261 i_allocateTBE;
1262 b_issueGETX;
1263 uu_profileMiss;
1264 k_popMandatoryQueue;
1265 }
1266
1267 transition(S, L2_Replacement, I) {
1268 rr_deallocateL2CacheBlock;
1269 ka_wakeUpAllDependents;
1270 }
1271
1272 transition(S, {Other_GETX, Invalidate}, I) {
1273 f_sendAck;
1274 l_popForwardQueue;
1275 }
1276
1277 transition(S, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1278 ff_sendAckShared;
1279 l_popForwardQueue;
1280 }
1281
1282 // Transitions from Owned
1283 transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
1284 h_load_hit;
1285 k_popMandatoryQueue;
1286 }
1287
1288 transition(O, Store, OM) {
1289 i_allocateTBE;
1290 b_issueGETX;
1291 p_decrementNumberOfMessagesByOne;
1292 uu_profileMiss;
1293 k_popMandatoryQueue;
1294 }
1295
1296 transition(O, L2_Replacement, OI) {
1297 i_allocateTBE;
1298 d_issuePUT;
1299 rr_deallocateL2CacheBlock;
1300 ka_wakeUpAllDependents;
1301 }
1302
1303 transition(O, {Other_GETX, Invalidate}, I) {
1304 e_sendData;
1305 l_popForwardQueue;
1306 }
1307
1308 transition(O, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1309 ee_sendDataShared;
1310 l_popForwardQueue;
1311 }
1312
1313 transition(O, Merged_GETS) {
1314 em_sendDataSharedMultiple;
1315 l_popForwardQueue;
1316 }
1317
1318 // Transitions from Modified
1319 transition(MM, {Load, Ifetch}) {
1320 h_load_hit;
1321 k_popMandatoryQueue;
1322 }
1323
1324 transition(MM, Store) {
1325 hh_store_hit;
1326 k_popMandatoryQueue;
1327 }
1328
1329 transition(MM, L2_Replacement, MI) {
1330 i_allocateTBE;
1331 d_issuePUT;
1332 rr_deallocateL2CacheBlock;
1333 ka_wakeUpAllDependents;
1334 }
1335
1336 transition(MM, {Other_GETX, Invalidate}, I) {
1337 c_sendExclusiveData;
1338 l_popForwardQueue;
1339 }
1340
1341 transition(MM, Other_GETS, I) {
1342 c_sendExclusiveData;
1343 l_popForwardQueue;
1344 }
1345
1346 transition(MM, NC_DMA_GETS) {
1347 c_sendExclusiveData;
1348 l_popForwardQueue;
1349 }
1350
1351 transition(MM, Other_GETS_No_Mig, O) {
1352 ee_sendDataShared;
1353 l_popForwardQueue;
1354 }
1355
1356 transition(MM, Merged_GETS, O) {
1357 em_sendDataSharedMultiple;
1358 l_popForwardQueue;
1359 }
1360
1361 // Transitions from Dirty Exclusive
1362 transition(M, {Load, Ifetch}) {
1363 h_load_hit;
1364 k_popMandatoryQueue;
1365 }
1366
1367 transition(M, Store, MM) {
1368 hh_store_hit;
1369 k_popMandatoryQueue;
1370 }
1371
1372 transition(M, L2_Replacement, MI) {
1373 i_allocateTBE;
1374 d_issuePUT;
1375 rr_deallocateL2CacheBlock;
1376 ka_wakeUpAllDependents;
1377 }
1378
1379 transition(M, {Other_GETX, Invalidate}, I) {
1380 c_sendExclusiveData;
1381 l_popForwardQueue;
1382 }
1383
1384 transition(M, {Other_GETS, Other_GETS_No_Mig}, O) {
1385 ee_sendDataShared;
1386 l_popForwardQueue;
1387 }
1388
1389 transition(M, NC_DMA_GETS) {
1390 ee_sendDataShared;
1391 l_popForwardQueue;
1392 }
1393
1394 transition(M, Merged_GETS, O) {
1395 em_sendDataSharedMultiple;
1396 l_popForwardQueue;
1397 }
1398
1399 // Transitions from IM
1400
1401 transition(IM, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1402 f_sendAck;
1403 l_popForwardQueue;
1404 }
1405
1406 transition(IM, Ack) {
1407 m_decrementNumberOfMessages;
1408 o_checkForCompletion;
1409 n_popResponseQueue;
1410 }
1411
1412 transition(IM, Data, ISM) {
1413 u_writeDataToCache;
1414 m_decrementNumberOfMessages;
1415 o_checkForCompletion;
1416 n_popResponseQueue;
1417 }
1418
1419 transition(IM, Exclusive_Data, MM_W) {
1420 u_writeDataToCache;
1421 m_decrementNumberOfMessages;
1422 o_checkForCompletion;
1423 sx_external_store_hit;
1424 n_popResponseQueue;
1425 kd_wakeUpDependents;
1426 }
1427
1428 // Transitions from SM
1429 transition(SM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1430 ff_sendAckShared;
1431 l_popForwardQueue;
1432 }
1433
1434 transition(SM, {Other_GETX, Invalidate}, IM) {
1435 f_sendAck;
1436 l_popForwardQueue;
1437 }
1438
1439 transition(SM, Ack) {
1440 m_decrementNumberOfMessages;
1441 o_checkForCompletion;
1442 n_popResponseQueue;
1443 }
1444
1445 transition(SM, {Data, Exclusive_Data}, ISM) {
1446 v_writeDataToCacheVerify;
1447 m_decrementNumberOfMessages;
1448 o_checkForCompletion;
1449 n_popResponseQueue;
1450 }
1451
1452 // Transitions from ISM
1453 transition(ISM, Ack) {
1454 m_decrementNumberOfMessages;
1455 o_checkForCompletion;
1456 n_popResponseQueue;
1457 }
1458
1459 transition(ISM, All_acks_no_sharers, MM) {
1460 sxt_trig_ext_store_hit;
1461 gm_sendUnblockM;
1462 s_deallocateTBE;
1463 j_popTriggerQueue;
1464 kd_wakeUpDependents;
1465 }
1466
1467 // Transitions from OM
1468
1469 transition(OM, {Other_GETX, Invalidate}, IM) {
1470 e_sendData;
1471 pp_incrementNumberOfMessagesByOne;
1472 l_popForwardQueue;
1473 }
1474
1475 transition(OM, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}) {
1476 ee_sendDataShared;
1477 l_popForwardQueue;
1478 }
1479
1480 transition(OM, Merged_GETS) {
1481 em_sendDataSharedMultiple;
1482 l_popForwardQueue;
1483 }
1484
1485 transition(OM, Ack) {
1486 m_decrementNumberOfMessages;
1487 o_checkForCompletion;
1488 n_popResponseQueue;
1489 }
1490
1491 transition(OM, {All_acks, All_acks_no_sharers}, MM) {
1492 sxt_trig_ext_store_hit;
1493 gm_sendUnblockM;
1494 s_deallocateTBE;
1495 j_popTriggerQueue;
1496 kd_wakeUpDependents;
1497 }
1498
1499 // Transitions from IS
1500
1501 transition(IS, {Other_GETX, NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Invalidate}) {
1502 f_sendAck;
1503 l_popForwardQueue;
1504 }
1505
1506 transition(IS, Ack) {
1507 m_decrementNumberOfMessages;
1508 o_checkForCompletion;
1509 n_popResponseQueue;
1510 }
1511
1512 transition(IS, Shared_Ack) {
1513 m_decrementNumberOfMessages;
1514 r_setSharerBit;
1515 o_checkForCompletion;
1516 n_popResponseQueue;
1517 }
1518
1519 transition(IS, Data, SS) {
1520 u_writeDataToCache;
1521 m_decrementNumberOfMessages;
1522 o_checkForCompletion;
1523 hx_external_load_hit;
1524 uo_updateCurrentOwner;
1525 n_popResponseQueue;
1526 kd_wakeUpDependents;
1527 }
1528
1529 transition(IS, Exclusive_Data, M_W) {
1530 u_writeDataToCache;
1531 m_decrementNumberOfMessages;
1532 o_checkForCompletion;
1533 hx_external_load_hit;
1534 n_popResponseQueue;
1535 kd_wakeUpDependents;
1536 }
1537
1538 transition(IS, Shared_Data, SS) {
1539 u_writeDataToCache;
1540 r_setSharerBit;
1541 m_decrementNumberOfMessages;
1542 o_checkForCompletion;
1543 hx_external_load_hit;
1544 uo_updateCurrentOwner;
1545 n_popResponseQueue;
1546 kd_wakeUpDependents;
1547 }
1548
1549 // Transitions from SS
1550
1551 transition(SS, Ack) {
1552 m_decrementNumberOfMessages;
1553 o_checkForCompletion;
1554 n_popResponseQueue;
1555 }
1556
1557 transition(SS, Shared_Ack) {
1558 m_decrementNumberOfMessages;
1559 r_setSharerBit;
1560 o_checkForCompletion;
1561 n_popResponseQueue;
1562 }
1563
1564 transition(SS, All_acks, S) {
1565 gs_sendUnblockS;
1566 s_deallocateTBE;
1567 j_popTriggerQueue;
1568 kd_wakeUpDependents;
1569 }
1570
1571 transition(SS, All_acks_no_sharers, S) {
1572 // Note: The directory might still be the owner, so that is why we go to S
1573 gs_sendUnblockS;
1574 s_deallocateTBE;
1575 j_popTriggerQueue;
1576 kd_wakeUpDependents;
1577 }
1578
1579 // Transitions from MM_W
1580
1581 transition(MM_W, Store) {
1582 hh_store_hit;
1583 k_popMandatoryQueue;
1584 }
1585
1586 transition(MM_W, Ack) {
1587 m_decrementNumberOfMessages;
1588 o_checkForCompletion;
1589 n_popResponseQueue;
1590 }
1591
1592 transition(MM_W, All_acks_no_sharers, MM) {
1593 gm_sendUnblockM;
1594 s_deallocateTBE;
1595 j_popTriggerQueue;
1596 kd_wakeUpDependents;
1597 }
1598
1599 // Transitions from M_W
1600
1601 transition(M_W, Store, MM_W) {
1602 hh_store_hit;
1603 k_popMandatoryQueue;
1604 }
1605
1606 transition(M_W, Ack) {
1607 m_decrementNumberOfMessages;
1608 o_checkForCompletion;
1609 n_popResponseQueue;
1610 }
1611
1612 transition(M_W, All_acks_no_sharers, M) {
1613 gm_sendUnblockM;
1614 s_deallocateTBE;
1615 j_popTriggerQueue;
1616 kd_wakeUpDependents;
1617 }
1618
1619 // Transitions from OI/MI
1620
1621 transition({OI, MI}, {Other_GETX, Invalidate}, II) {
1622 q_sendDataFromTBEToCache;
1623 l_popForwardQueue;
1624 }
1625
1626 transition({OI, MI}, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig}, OI) {
1627 sq_sendSharedDataFromTBEToCache;
1628 l_popForwardQueue;
1629 }
1630
1631 transition({OI, MI}, Merged_GETS, OI) {
1632 qm_sendDataFromTBEToCache;
1633 l_popForwardQueue;
1634 }
1635
1636 transition(MI, Writeback_Ack, I) {
1637 t_sendExclusiveDataFromTBEToMemory;
1638 s_deallocateTBE;
1639 l_popForwardQueue;
1640 kd_wakeUpDependents;
1641 }
1642
1643 transition(OI, Writeback_Ack, I) {
1644 qq_sendDataFromTBEToMemory;
1645 s_deallocateTBE;
1646 l_popForwardQueue;
1647 kd_wakeUpDependents;
1648 }
1649
1650 // Transitions from II
1651 transition(II, {NC_DMA_GETS, Other_GETS, Other_GETS_No_Mig, Other_GETX, Invalidate}, II) {
1652 f_sendAck;
1653 l_popForwardQueue;
1654 }
1655
1656 transition(II, Writeback_Ack, I) {
1657 g_sendUnblock;
1658 s_deallocateTBE;
1659 l_popForwardQueue;
1660 kd_wakeUpDependents;
1661 }
1662
1663 transition(II, Writeback_Nack, I) {
1664 s_deallocateTBE;
1665 l_popForwardQueue;
1666 kd_wakeUpDependents;
1667 }
1668 }