ruby: cleaning up RubyQueue and RubyNetwork dprintfs
[gem5.git] / src / mem / protocol / MOSI_SMP_directory_1level-cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id: MOSI_directory_1level-cache.sm 1.18 04/09/07 13:52:52-05:00 mikem@maya.cs.wisc.edu $
32 *
33 */
34
35 machine(L1Cache, "MOSI Directory Optimized") {
36
37 MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
38 MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
39
40 MessageBuffer forwardedRequestToCache, network="From", virtual_network="1", ordered="true";
41 MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
42
43 // STATES
44 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
45 // Base states
46 I, desc="Idle";
47 S, desc="Shared";
48 O, desc="Owned";
49 M, desc="Modified", format="!b";
50
51 // Transient States
52 MI, desc="modified, issued PUTX, have not seen response yet";
53 OI, desc="owned, issued PUTX, have not seen response yet";
54
55 IS, desc="idle, issued GETS, have not seen response yet";
56 ISI, desc="idle, issued GETS, saw INV, have not seen data for GETS yet", format="!b";
57
58 IM, desc="idle, issued GETX, have not seen response yet";
59 IMI, desc="idle, issued GETX, saw forwarded GETX";
60 IMO, desc="idle, issued GETX, saw forwarded GETS";
61 IMOI, desc="idle, issued GETX, saw forwarded GETS, saw forwarded GETX";
62
63 // Note: OM is a strange state, because it is waiting for the line
64 // to be stolen away, or look like it has been stolen away. The
65 // common case is that we see a forward from the directory that is
66 // really from us, we forwarded the data to our dataqueue, and
67 // everythings works fine.
68
69 OM, desc="owned, issued GETX, have not seen response yet";
70 }
71
72 // EVENTS
73 enumeration(Event, desc="Cache events") {
74 Load, desc="Load request from the processor";
75 Load_prefetch, desc="Load prefetch request from the processor";
76 Ifetch, desc="I-fetch request from the processor";
77 Store_prefetch, desc="Store prefetch request from the processor";
78 Store, desc="Store request from the processor";
79 Replacement, desc="Replacement", format="!r";
80
81 Forwarded_GETS, "Forwarded GETS", desc="Directory forwards GETS to us";
82 Forwarded_GETX, "Forwarded GETX", desc="Directory forwards GETX to us";
83 INV, "INV", desc="Invalidation", format="!r";
84
85 Proc_ack, "Proc ack", desc="Ack from proc";
86 Proc_last_ack, "Proc last ack", desc="Last ack", format="!r";
87
88 Data_ack_0, "Data ack 0", desc="Data with ack count = 0";
89 Data_ack_not_0, "Data ack not 0", desc="Data with ack count != 0 (but haven't seen all acks first";
90 Data_ack_not_0_last, "Data ack not 0 last", desc="Data with ack count != 0 after having received all acks";
91
92 Dir_WB_ack, "WB ack", desc="Writeback ack from dir";
93 }
94
95 // TYPES
96
97 // CacheEntry
98 structure(Entry, desc="...", interface="AbstractCacheEntry") {
99 State CacheState, desc="cache state";
100 DataBlock DataBlk, desc="data for the block";
101 }
102
103 // TBE fields
104 structure(TBE, desc="...") {
105 Address Address, desc="Physical address for this TBE";
106 State TBEState, desc="Transient state";
107 DataBlock DataBlk, desc="Buffer for the data block";
108 int NumPendingAcks, desc="Number of acks that this processor is waiting for";
109 NetDest ForwardGetS_IDs, desc="Set of the processors to forward the block";
110 MachineID ForwardGetX_ID, desc="ID of the processor to forward the block";
111 int ForwardGetX_AckCount, desc="Number of acks the GetX we are forwarded needs";
112 bool isPrefetch, desc="Set if this was caused by a prefetch";
113 }
114
115 external_type(CacheMemory) {
116 bool cacheAvail(Address);
117 Address cacheProbe(Address);
118 void allocate(Address);
119 void deallocate(Address);
120 Entry lookup(Address);
121 void changePermission(Address, AccessPermission);
122 bool isTagPresent(Address);
123 }
124
125 external_type(TBETable) {
126 TBE lookup(Address);
127 void allocate(Address);
128 void deallocate(Address);
129 bool isPresent(Address);
130 }
131
132 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
133 MessageBuffer optionalQueue, ordered="false", abstract_chip_ptr="true";
134 Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
135 StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
136
137 TBETable TBEs, template_hack="<L1Cache_TBE>";
138 CacheMemory cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_unified L1"', abstract_chip_ptr="true";
139
140 State getState(Address addr) {
141 if(TBEs.isPresent(addr)) {
142 return TBEs[addr].TBEState;
143 } else if (cacheMemory.isTagPresent(addr)) {
144 return cacheMemory[addr].CacheState;
145 }
146 return State:I;
147 }
148
149 void setState(Address addr, State state) {
150 if (TBEs.isPresent(addr)) {
151 TBEs[addr].TBEState := state;
152 }
153 if (cacheMemory.isTagPresent(addr)) {
154 cacheMemory[addr].CacheState := state;
155
156 // Set permission
157 if ((state == State:I) || (state == State:MI) || (state == State:OI)) {
158 cacheMemory.changePermission(addr, AccessPermission:Invalid);
159 } else if (state == State:S || state == State:O) {
160 cacheMemory.changePermission(addr, AccessPermission:Read_Only);
161 } else if (state == State:M) {
162 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
163 } else {
164 cacheMemory.changePermission(addr, AccessPermission:Busy);
165 }
166 }
167 }
168
169 // ** OUT_PORTS **
170
171 out_port(requestNetwork_out, RequestMsg, requestFromCache);
172 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
173
174 // ** IN_PORTS **
175
176 // Response Network
177 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
178 if (responseNetwork_in.isReady()) {
179 peek(responseNetwork_in, ResponseMsg) {
180 if(in_msg.Type == CoherenceResponseType:DATA) {
181 if(in_msg.NumPendingAcks == 0) {
182 trigger(Event:Data_ack_0, in_msg.Address);
183 } else {
184 if(in_msg.NumPendingAcks + TBEs[in_msg.Address].NumPendingAcks != 0) {
185 trigger(Event:Data_ack_not_0, in_msg.Address);
186 } else {
187 trigger(Event:Data_ack_not_0_last, in_msg.Address);
188 }
189 }
190 } else if(in_msg.Type == CoherenceResponseType:ACK) {
191 if(TBEs[in_msg.Address].NumPendingAcks != 1){
192 trigger(Event:Proc_ack, in_msg.Address);
193 } else {
194 trigger(Event:Proc_last_ack, in_msg.Address);
195 }
196 }
197 }
198 }
199 }
200
201 // Forwarded Request network
202 in_port(forwardedRequestNetwork_in, RequestMsg, forwardedRequestToCache) {
203 if(forwardedRequestNetwork_in.isReady()) {
204 peek(forwardedRequestNetwork_in, RequestMsg) {
205 if(in_msg.Type == CoherenceRequestType:GETS) {
206 trigger(Event:Forwarded_GETS, in_msg.Address);
207 } else if (in_msg.Type == CoherenceRequestType:GETX) {
208 trigger(Event:Forwarded_GETX, in_msg.Address);
209 } else if (in_msg.Type == CoherenceRequestType:INV) {
210 trigger(Event:INV, in_msg.Address);
211 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
212 trigger(Event:Dir_WB_ack, in_msg.Address);
213 } else {
214 error("Invalid forwarded request type");
215 }
216 }
217 }
218 }
219
220 // Mandatory Queue
221 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
222 if (mandatoryQueue_in.isReady()) {
223 peek(mandatoryQueue_in, CacheMsg) {
224 if (cacheMemory.cacheAvail(in_msg.Address) == false) {
225 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
226 } else {
227 if (in_msg.Type == CacheRequestType:LD) {
228 trigger(Event:Load, in_msg.Address);
229 } else if (in_msg.Type == CacheRequestType:IFETCH) {
230 trigger(Event:Ifetch, in_msg.Address);
231 } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
232 trigger(Event:Store, in_msg.Address);
233 } else {
234 error("Invalid CacheRequestType");
235 }
236 }
237 }
238 }
239 }
240
241 // Optional Queue
242 in_port(optionalQueue_in, CacheMsg, optionalQueue, desc="...") {
243 if (optionalQueue_in.isReady()) {
244 peek(optionalQueue_in, CacheMsg) {
245 if (cacheMemory.cacheAvail(in_msg.Address) == false) {
246 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.Address));
247 } else {
248 if (in_msg.Type == CacheRequestType:LD) {
249 trigger(Event:Load_prefetch, in_msg.Address);
250 } else if (in_msg.Type == CacheRequestType:IFETCH) {
251 trigger(Event:Load_prefetch, in_msg.Address);
252 } else if ((in_msg.Type == CacheRequestType:ST) || (in_msg.Type == CacheRequestType:ATOMIC)) {
253 trigger(Event:Store_prefetch, in_msg.Address);
254 } else {
255 error("Invalid CacheRequestType");
256 }
257 }
258 }
259 }
260 }
261
262 // ACTIONS
263
264 action(a_issueGETS, "a", desc="Issue GETS") {
265 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
266 out_msg.Address := address;
267 out_msg.Type := CoherenceRequestType:GETS;
268 out_msg.Requestor := machineID;
269 out_msg.Destination.add(map_Address_to_Directory(address));
270 out_msg.MessageSize := MessageSizeType:Control;
271 }
272 }
273
274 action(b_issueGETX, "b", desc="Issue GETX") {
275 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
276 out_msg.Address := address;
277 out_msg.Type := CoherenceRequestType:GETX;
278 out_msg.Requestor := machineID;
279 out_msg.Destination.add(map_Address_to_Directory(address));
280 out_msg.MessageSize := MessageSizeType:Control;
281 }
282 }
283
284 action(d_issuePUTX, "d", desc="Issue PUTX") {
285 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
286 out_msg.Address := address;
287 out_msg.Type := CoherenceRequestType:PUTX;
288 out_msg.Requestor := machineID;
289 out_msg.Destination.add(map_Address_to_Directory(address));
290 out_msg.DataBlk := cacheMemory[address].DataBlk;
291 out_msg.MessageSize := MessageSizeType:Data;
292 }
293 }
294
295 action(e_dataFromCacheToRequestor, "e", desc="Send data from cache to requestor") {
296 peek(forwardedRequestNetwork_in, RequestMsg) {
297 enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
298 out_msg.Address := address;
299 out_msg.Type := CoherenceResponseType:DATA;
300 out_msg.Sender := machineID;
301 out_msg.SenderMachine := MachineType:L1Cache;
302 out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state O and we see a GetX
303 out_msg.Destination.add(in_msg.Requestor);
304 out_msg.DestMachine := MachineType:L1Cache;
305 DEBUG_EXPR(out_msg.Destination);
306 out_msg.DataBlk := cacheMemory[address].DataBlk;
307 out_msg.MessageSize := MessageSizeType:Data;
308 }
309 }
310 }
311
312 action(g_allocateCacheBlock, "g", desc="Allocate cache block") {
313 if (cacheMemory.isTagPresent(address) == false) {
314 cacheMemory.allocate(address);
315 }
316 }
317
318 action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
319 DEBUG_EXPR(cacheMemory[address].DataBlk);
320 if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
321 // Non-prefetch
322 sequencer.readCallback(address, cacheMemory[address].DataBlk);
323 } else {
324 // Prefetch - don't call back
325 }
326 }
327
328 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
329 DEBUG_EXPR(cacheMemory[address].DataBlk);
330 if((TBEs.isPresent(address) == false) || (TBEs[address].isPrefetch == false)) {
331 // Non-prefetch
332 sequencer.writeCallback(address, cacheMemory[address].DataBlk);
333 } else {
334 // Prefetch - don't call back
335 }
336 }
337
338 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
339 check_allocate(TBEs);
340 TBEs.allocate(address);
341 TBEs[address].NumPendingAcks := 0; // default value
342 TBEs[address].isPrefetch := false;
343 TBEs[address].ForwardGetS_IDs.clear();
344 }
345
346 action(j_setPrefetchBit, "j", desc="Set prefetch bit") {
347 TBEs[address].isPrefetch := true;
348 }
349
350 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
351 mandatoryQueue_in.dequeue();
352 }
353
354 action(l_popForwardedRequestQueue, "l", desc="Pop incoming forwarded request queue") {
355 forwardedRequestNetwork_in.dequeue();
356 }
357
358 action(m_popOptionalQueue, "m", desc="Pop optional queue") {
359 optionalQueue_in.dequeue();
360 }
361
362 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
363 responseNetwork_in.dequeue();
364 }
365
366 action(p_addNumberOfPendingAcks, "p", desc="Add number of pending acks to TBE") {
367 peek(responseNetwork_in, ResponseMsg) {
368 DEBUG_EXPR(TBEs[address].NumPendingAcks);
369 TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks + in_msg.NumPendingAcks;
370 DEBUG_EXPR(in_msg.NumPendingAcks);
371 DEBUG_EXPR(TBEs[address].NumPendingAcks);
372 }
373 }
374
375 action(q_decrementNumberOfPendingAcks, "q", desc="Decrement number of pending invalidations by one") {
376 DEBUG_EXPR(TBEs[address].NumPendingAcks);
377 TBEs[address].NumPendingAcks := TBEs[address].NumPendingAcks - 1;
378 DEBUG_EXPR(TBEs[address].NumPendingAcks);
379 }
380
381 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
382 TBEs.deallocate(address);
383 }
384
385 action(t_sendAckToInvalidator, "t", desc="Send ack to invalidator") {
386 peek(forwardedRequestNetwork_in, RequestMsg) {
387 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
388 out_msg.Address := address;
389 out_msg.Type := CoherenceResponseType:ACK;
390 out_msg.Sender := machineID;
391 out_msg.SenderMachine := MachineType:L1Cache;
392 out_msg.Destination.add(in_msg.Requestor);
393 out_msg.DestMachine := MachineType:L1Cache;
394 DEBUG_EXPR(out_msg.Destination);
395 out_msg.NumPendingAcks := 0;
396 out_msg.MessageSize := MessageSizeType:Control;
397 }
398 }
399 }
400
401 action(u_writeDataToCache, "u", desc="Write data to cache") {
402 peek(responseNetwork_in, ResponseMsg) {
403 cacheMemory[address].DataBlk := in_msg.DataBlk;
404 }
405 }
406
407 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
408 TBEs[address].DataBlk := cacheMemory[address].DataBlk;
409 }
410
411 action(y_dataFromTBEToRequestor, "y", desc="Send data from TBE to requestor") {
412 peek(forwardedRequestNetwork_in, RequestMsg) {
413 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
414 out_msg.Address := address;
415 out_msg.Type := CoherenceResponseType:DATA;
416 out_msg.Sender := machineID;
417 out_msg.SenderMachine := MachineType:L1Cache;
418 out_msg.NumPendingAcks := in_msg.NumPendingAcks; // Needed when in state MS and we see a GetX
419 out_msg.Destination.add(in_msg.Requestor);
420 out_msg.DestMachine := MachineType:L1Cache;
421 DEBUG_EXPR(out_msg.Destination);
422 out_msg.DataBlk := TBEs[address].DataBlk;
423 out_msg.MessageSize := MessageSizeType:Data;
424 }
425 }
426 }
427
428 action(z_stall, "z", desc="Stall") {
429 }
430
431 action(dd_recordGetSForwardID, "\d", desc="Record forwarded GetS for future forwarding") {
432 peek(forwardedRequestNetwork_in, RequestMsg) {
433 TBEs[address].ForwardGetS_IDs.add(in_msg.Requestor);
434 }
435 }
436
437 action(ee_dataFromCacheToGetSForwardIDs, "\e", desc="Send data from cache to GetS ForwardIDs") {
438 // FIXME - In some cases this should be from the TBE, not the cache.
439 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
440 out_msg.Address := address;
441 out_msg.Type := CoherenceResponseType:DATA;
442 out_msg.Sender := machineID;
443 out_msg.SenderMachine := MachineType:L1Cache;
444 out_msg.Destination := TBEs[address].ForwardGetS_IDs;
445 out_msg.DestMachine := MachineType:L1Cache;
446 DEBUG_EXPR(out_msg.Destination);
447 out_msg.DataBlk := cacheMemory[address].DataBlk;
448 out_msg.NumPendingAcks := 0;
449 out_msg.MessageSize := MessageSizeType:Data;
450 }
451 }
452
453 action(ff_deallocateCacheBlock, "\f", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
454 cacheMemory.deallocate(address);
455 }
456
457 action(gg_dataFromCacheToGetXForwardID, "\g", desc="Send data from cache to GetX ForwardID") {
458 // FIXME - In some cases this should be from the TBE, not the cache.
459 enqueue(responseNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
460 out_msg.Address := address;
461 out_msg.Type := CoherenceResponseType:DATA;
462 out_msg.Sender := machineID;
463 out_msg.SenderMachine := MachineType:L1Cache;
464 out_msg.Destination.add(TBEs[address].ForwardGetX_ID);
465 out_msg.DestMachine := MachineType:L1Cache;
466 DEBUG_EXPR(out_msg.Destination);
467 out_msg.DataBlk := cacheMemory[address].DataBlk;
468 out_msg.NumPendingAcks := TBEs[address].ForwardGetX_AckCount;
469 out_msg.MessageSize := MessageSizeType:Data;
470 }
471 }
472
473 action(ii_recordGetXForwardID, "\i", desc="Record forwarded GetX and ack count for future forwarding") {
474 peek(forwardedRequestNetwork_in, RequestMsg) {
475 TBEs[address].ForwardGetX_ID := in_msg.Requestor;
476 TBEs[address].ForwardGetX_AckCount := in_msg.NumPendingAcks;
477 }
478 }
479
480 //*****************************************************
481 // TRANSITIONS
482 //*****************************************************
483
484 // Transitions for Load/Store/Prefetch/Replacement from transient states
485 transition({OM, OI, IS, ISI, IM, IMO, IMOI, IMI, MI}, {Load, Load_prefetch, Ifetch, Store, Store_prefetch, Replacement}) {
486 z_stall;
487 }
488
489 // Transitions from Idle
490 transition(I, {Load,Ifetch}, IS) {
491 g_allocateCacheBlock;
492 i_allocateTBE;
493 a_issueGETS;
494 k_popMandatoryQueue;
495 }
496
497 transition(I, {Load_prefetch}, IS) {
498 g_allocateCacheBlock;
499 i_allocateTBE;
500 j_setPrefetchBit;
501 a_issueGETS;
502 m_popOptionalQueue;
503 }
504
505 transition(I, Store, IM) {
506 g_allocateCacheBlock;
507 i_allocateTBE;
508 b_issueGETX;
509 k_popMandatoryQueue;
510 }
511
512 transition(I, Store_prefetch, IM) {
513 g_allocateCacheBlock;
514 i_allocateTBE;
515 j_setPrefetchBit;
516 b_issueGETX;
517 m_popOptionalQueue;
518 }
519
520 transition(I, Replacement) {
521 ff_deallocateCacheBlock;
522 }
523
524 transition(I, INV) {
525 t_sendAckToInvalidator;
526 l_popForwardedRequestQueue;
527 }
528
529 // Transitions from Shared
530 transition({S, O}, {Load,Ifetch}) {
531 h_load_hit;
532 k_popMandatoryQueue;
533 }
534
535 transition({S, O, M}, Load_prefetch) {
536 m_popOptionalQueue;
537 }
538
539 transition(S, Store, IM) {
540 i_allocateTBE;
541 b_issueGETX;
542 k_popMandatoryQueue;
543 }
544
545 transition(S, Store_prefetch, IM) {
546 i_allocateTBE;
547 j_setPrefetchBit;
548 b_issueGETX;
549 m_popOptionalQueue;
550 }
551
552 transition(S, Replacement, I) {
553 ff_deallocateCacheBlock;
554 }
555
556 transition(S, INV, I) {
557 t_sendAckToInvalidator;
558 l_popForwardedRequestQueue;
559 }
560
561 // Transitions from Modified
562 transition(M, {Load, Ifetch}) {
563 h_load_hit;
564 k_popMandatoryQueue;
565 }
566
567 transition(M, Store) {
568 hh_store_hit;
569 k_popMandatoryQueue;
570 }
571
572 transition(M, Store_prefetch) {
573 m_popOptionalQueue;
574 }
575
576 transition(M, Replacement, MI) {
577 i_allocateTBE;
578 d_issuePUTX;
579 x_copyDataFromCacheToTBE;
580 ff_deallocateCacheBlock;
581 }
582
583 transition(M, Forwarded_GETS, O) {
584 e_dataFromCacheToRequestor;
585 l_popForwardedRequestQueue;
586 }
587
588 transition(M, Forwarded_GETX, I) {
589 e_dataFromCacheToRequestor;
590 l_popForwardedRequestQueue;
591 }
592
593 // Transitions from O
594 transition(O, Store, OM) {
595 i_allocateTBE;
596 b_issueGETX;
597 k_popMandatoryQueue;
598 }
599
600 transition(O, Store_prefetch, OM) {
601 i_allocateTBE;
602 j_setPrefetchBit;
603 b_issueGETX;
604 m_popOptionalQueue;
605 }
606
607 transition(O, Replacement, OI){
608 i_allocateTBE;
609 d_issuePUTX;
610 x_copyDataFromCacheToTBE;
611 ff_deallocateCacheBlock;
612 }
613
614 transition(O, Forwarded_GETS) {
615 e_dataFromCacheToRequestor;
616 l_popForwardedRequestQueue;
617 }
618
619 transition(O, Forwarded_GETX, I) {
620 e_dataFromCacheToRequestor;
621 l_popForwardedRequestQueue;
622 }
623
624 // transitions from OI
625
626 transition(OI, Forwarded_GETS) {
627 y_dataFromTBEToRequestor;
628 l_popForwardedRequestQueue;
629 }
630
631 transition(OI, Forwarded_GETX) {
632 y_dataFromTBEToRequestor;
633 l_popForwardedRequestQueue;
634 }
635
636 transition(OI, Dir_WB_ack, I) {
637 s_deallocateTBE;
638 l_popForwardedRequestQueue;
639 }
640
641 // Transitions from IS
642
643 transition(IS, INV, ISI) {
644 t_sendAckToInvalidator;
645 l_popForwardedRequestQueue;
646 }
647
648 transition(IS, Data_ack_0, S) {
649 u_writeDataToCache;
650 h_load_hit;
651 s_deallocateTBE;
652 o_popIncomingResponseQueue;
653 }
654
655 // transitions from ISI
656
657 // in ISI, could get data from the Proc whose GETX caused INV to go from IS to ISI
658 // or, could get data from Dir if Dir's data lost race to Dir's INV
659 // or, could get data from Dir, if my GETS took forever to get to Dir, and the GETX
660 // processor already wrote it back
661
662 transition(ISI, Data_ack_0, I) {
663 u_writeDataToCache;
664 h_load_hit;
665 s_deallocateTBE;
666 o_popIncomingResponseQueue;
667 }
668
669 transition(ISI, INV) {
670 t_sendAckToInvalidator;
671 l_popForwardedRequestQueue;
672 }
673
674 // Transitions from IM
675
676 transition(IM, INV) { // do not need to go to IMI, since INV is for earlier epoch
677 t_sendAckToInvalidator;
678 l_popForwardedRequestQueue;
679 }
680
681 transition({IM, IMO}, Forwarded_GETS, IMO) {
682 dd_recordGetSForwardID;
683 l_popForwardedRequestQueue;
684 }
685
686 transition(IM, Forwarded_GETX, IMI) {
687 ii_recordGetXForwardID;
688 l_popForwardedRequestQueue;
689 }
690
691 transition(IM, {Data_ack_0, Data_ack_not_0_last}, M) {
692 u_writeDataToCache;
693 hh_store_hit;
694 s_deallocateTBE;
695 o_popIncomingResponseQueue;
696 }
697
698 transition(IM, Data_ack_not_0) {
699 u_writeDataToCache;
700 p_addNumberOfPendingAcks;
701 o_popIncomingResponseQueue;
702 }
703
704 transition(IM, Proc_ack) {
705 q_decrementNumberOfPendingAcks;
706 o_popIncomingResponseQueue;
707 }
708
709 transition(IM, Proc_last_ack, M) {
710 hh_store_hit;
711 s_deallocateTBE;
712 o_popIncomingResponseQueue;
713 }
714
715 // transitions from IMO
716
717 transition(IMO, Forwarded_GETX, IMOI) {
718 ii_recordGetXForwardID;
719 l_popForwardedRequestQueue;
720 }
721
722 transition(IMO, {Data_ack_0, Data_ack_not_0_last}, O) {
723 u_writeDataToCache;
724 hh_store_hit;
725 ee_dataFromCacheToGetSForwardIDs;
726 s_deallocateTBE;
727 o_popIncomingResponseQueue;
728 }
729
730 transition(IMO, Data_ack_not_0) {
731 u_writeDataToCache;
732 p_addNumberOfPendingAcks;
733 o_popIncomingResponseQueue;
734 }
735
736 transition(IMO, Proc_ack) {
737 q_decrementNumberOfPendingAcks;
738 o_popIncomingResponseQueue;
739 }
740
741 transition(IMO, Proc_last_ack, O) {
742 hh_store_hit;
743 ee_dataFromCacheToGetSForwardIDs;
744 s_deallocateTBE;
745 o_popIncomingResponseQueue;
746 }
747
748 // transitions from IMI
749
750 transition(IMI, {Data_ack_0, Data_ack_not_0_last}, I) {
751 u_writeDataToCache;
752 hh_store_hit;
753 gg_dataFromCacheToGetXForwardID;
754 s_deallocateTBE;
755 o_popIncomingResponseQueue;
756 }
757
758 transition(IMI, Data_ack_not_0) {
759 u_writeDataToCache;
760 p_addNumberOfPendingAcks;
761 o_popIncomingResponseQueue;
762 }
763
764 transition(IMI, Proc_ack) {
765 q_decrementNumberOfPendingAcks;
766 o_popIncomingResponseQueue;
767 }
768
769 transition(IMI, Proc_last_ack, I) {
770 hh_store_hit;
771 gg_dataFromCacheToGetXForwardID;
772 s_deallocateTBE;
773 o_popIncomingResponseQueue;
774 }
775
776 // transitions from IMOI
777
778 transition(IMOI, {Data_ack_0, Data_ack_not_0_last}, I) {
779 u_writeDataToCache;
780 hh_store_hit;
781 ee_dataFromCacheToGetSForwardIDs;
782 gg_dataFromCacheToGetXForwardID;
783 s_deallocateTBE;
784 o_popIncomingResponseQueue;
785 }
786
787 transition(IMOI, Data_ack_not_0) {
788 u_writeDataToCache;
789 p_addNumberOfPendingAcks;
790 o_popIncomingResponseQueue;
791 }
792
793 transition(IMOI, Proc_ack) {
794 q_decrementNumberOfPendingAcks;
795 o_popIncomingResponseQueue;
796 }
797
798 transition(IMOI, Proc_last_ack, I) {
799 hh_store_hit;
800 ee_dataFromCacheToGetSForwardIDs;
801 gg_dataFromCacheToGetXForwardID;
802 s_deallocateTBE;
803 o_popIncomingResponseQueue;
804 }
805
806 // Transitions from OM
807 transition(OM, Proc_ack) {
808 q_decrementNumberOfPendingAcks;
809 o_popIncomingResponseQueue;
810 }
811
812 transition(OM, Forwarded_GETS) {
813 e_dataFromCacheToRequestor;
814 l_popForwardedRequestQueue;
815 }
816
817 transition(OM, Forwarded_GETX, IM) {
818 e_dataFromCacheToRequestor;
819 l_popForwardedRequestQueue;
820 }
821
822 // Transitions from MI
823
824 transition(MI, Forwarded_GETS) {
825 y_dataFromTBEToRequestor;
826 l_popForwardedRequestQueue;
827 }
828
829 transition(MI, Forwarded_GETX) {
830 y_dataFromTBEToRequestor;
831 l_popForwardedRequestQueue;
832 }
833
834 transition(MI, Dir_WB_ack, I) {
835 s_deallocateTBE;
836 l_popForwardedRequestQueue;
837 }
838 }