Ruby: Remove CacheMsg class from SLICC
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L1Cache, "Directory protocol")
36 : Sequencer * sequencer,
37 CacheMemory * L1IcacheMemory,
38 CacheMemory * L1DcacheMemory,
39 int l2_select_num_bits,
40 int request_latency = 2
41 {
42
43 // NODE L1 CACHE
44 // From this node's L1 cache TO the network
45 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
46 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false";
47 // a local L1 -> this L2 bank
48 MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false";
49 // MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false";
50
51
52 // To this node's L1 cache FROM the network
53 // a L2 bank -> this L1
54 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false";
55 // a L2 bank -> this L1
56 MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false";
57
58
59
60 // STATES
61 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
62 // Base states
63 I, AccessPermission:Invalid, desc="Idle";
64 S, AccessPermission:Read_Only, desc="Shared";
65 O, AccessPermission:Read_Only, desc="Owned";
66 M, AccessPermission:Read_Only, desc="Modified (dirty)";
67 M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
68 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
69 MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
70
71 // Transient States
72 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
73 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
74 OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
75 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
76 SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
77 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
78 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
79 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
80 }
81
82 // EVENTS
83 enumeration(Event, desc="Cache events") {
84 Load, desc="Load request from the processor";
85 Ifetch, desc="I-fetch request from the processor";
86 Store, desc="Store request from the processor";
87 L1_Replacement, desc="Replacement";
88
89 // Requests
90 Own_GETX, desc="We observe our own GetX forwarded back to us";
91 Fwd_GETX, desc="A GetX from another processor";
92 Fwd_GETS, desc="A GetS from another processor";
93 Fwd_DMA, desc="A GetS from another processor";
94 Inv, desc="Invalidations from the directory";
95
96 // Responses
97 Ack, desc="Received an ack message";
98 Data, desc="Received a data message, responder has a shared copy";
99 Exclusive_Data, desc="Received a data message";
100
101 Writeback_Ack, desc="Writeback O.K. from directory";
102 Writeback_Ack_Data, desc="Writeback O.K. from directory";
103 Writeback_Nack, desc="Writeback not O.K. from directory";
104
105 // Triggers
106 All_acks, desc="Received all required data and message acks";
107
108 // Timeouts
109 Use_Timeout, desc="lockout period ended";
110 }
111
112 // TYPES
113
114 // CacheEntry
115 structure(Entry, desc="...", interface="AbstractCacheEntry") {
116 State CacheState, desc="cache state";
117 bool Dirty, desc="Is the data dirty (different than memory)?";
118 DataBlock DataBlk, desc="data for the block";
119 }
120
121 // TBE fields
122 structure(TBE, desc="...") {
123 Address Address, desc="Physical address for this TBE";
124 State TBEState, desc="Transient state";
125 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
126 bool Dirty, desc="Is the data dirty (different than memory)?";
127 int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
128 }
129
130 structure(TBETable, external ="yes") {
131 TBE lookup(Address);
132 void allocate(Address);
133 void deallocate(Address);
134 bool isPresent(Address);
135 }
136
137 void set_cache_entry(AbstractCacheEntry b);
138 void unset_cache_entry();
139 void set_tbe(TBE b);
140 void unset_tbe();
141
142 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
143
144 TBETable TBEs, template_hack="<L1Cache_TBE>";
145 TimerTable useTimerTable;
146 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
147
148 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
149 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
150 if(is_valid(L1Dcache_entry)) {
151 return L1Dcache_entry;
152 }
153
154 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
155 return L1Icache_entry;
156 }
157
158 Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
159 return static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
160 }
161
162 Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
163 return static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
164 }
165
166 State getState(TBE tbe, Entry cache_entry, Address addr) {
167 if(is_valid(tbe)) {
168 return tbe.TBEState;
169 } else if (is_valid(cache_entry)) {
170 return cache_entry.CacheState;
171 }
172 return State:I;
173 }
174
175 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
176 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
177
178 if (is_valid(tbe)) {
179 tbe.TBEState := state;
180 }
181
182 if (is_valid(cache_entry)) {
183 if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
184 ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
185 ((cache_entry.CacheState != State:S) && (state == State:S)) ||
186 ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
187
188 cache_entry.CacheState := state;
189 sequencer.checkCoherence(addr);
190 }
191 else {
192 cache_entry.CacheState := state;
193 }
194 }
195 }
196
197 Event mandatory_request_type_to_event(RubyRequestType type) {
198 if (type == RubyRequestType:LD) {
199 return Event:Load;
200 } else if (type == RubyRequestType:IFETCH) {
201 return Event:Ifetch;
202 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
203 return Event:Store;
204 } else {
205 error("Invalid RubyRequestType");
206 }
207 }
208
209 MessageBuffer triggerQueue, ordered="true";
210
211 // ** OUT_PORTS **
212
213 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
214 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
215 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
216
217 // ** IN_PORTS **
218
219 // Use Timer
220 in_port(useTimerTable_in, Address, useTimerTable) {
221 if (useTimerTable_in.isReady()) {
222 trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
223 getCacheEntry(useTimerTable.readyAddress()),
224 TBEs[useTimerTable.readyAddress()]);
225 }
226 }
227
228 // Trigger Queue
229 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
230 if (triggerQueue_in.isReady()) {
231 peek(triggerQueue_in, TriggerMsg) {
232 if (in_msg.Type == TriggerType:ALL_ACKS) {
233 trigger(Event:All_acks, in_msg.Address,
234 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
235 } else {
236 error("Unexpected message");
237 }
238 }
239 }
240 }
241
242 // Nothing from the request network
243
244 // Request Network
245 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
246 if (requestNetwork_in.isReady()) {
247 peek(requestNetwork_in, RequestMsg, block_on="Address") {
248 assert(in_msg.Destination.isElement(machineID));
249 DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
250
251 if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
252 if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
253 trigger(Event:Own_GETX, in_msg.Address,
254 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
255 } else {
256 trigger(Event:Fwd_GETX, in_msg.Address,
257 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
258 }
259 } else if (in_msg.Type == CoherenceRequestType:GETS) {
260 trigger(Event:Fwd_GETS, in_msg.Address,
261 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
262 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
263 trigger(Event:Fwd_DMA, in_msg.Address,
264 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
265 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
266 trigger(Event:Writeback_Ack, in_msg.Address,
267 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
268 } else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
269 trigger(Event:Writeback_Ack_Data, in_msg.Address,
270 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
271 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
272 trigger(Event:Writeback_Nack, in_msg.Address,
273 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
274 } else if (in_msg.Type == CoherenceRequestType:INV) {
275 trigger(Event:Inv, in_msg.Address,
276 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
277 } else {
278 error("Unexpected message");
279 }
280 }
281 }
282 }
283
284 // Response Network
285 in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
286 if (responseToL1Cache_in.isReady()) {
287 peek(responseToL1Cache_in, ResponseMsg, block_on="Address") {
288 if (in_msg.Type == CoherenceResponseType:ACK) {
289 trigger(Event:Ack, in_msg.Address,
290 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
291 } else if (in_msg.Type == CoherenceResponseType:DATA) {
292 trigger(Event:Data, in_msg.Address,
293 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
294 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
295 trigger(Event:Exclusive_Data, in_msg.Address,
296 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
297 } else {
298 error("Unexpected message");
299 }
300 }
301 }
302 }
303
304 // Nothing from the unblock network
305 // Mandatory Queue betweens Node's CPU and it's L1 caches
306 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
307 if (mandatoryQueue_in.isReady()) {
308 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
309
310 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
311
312 if (in_msg.Type == RubyRequestType:IFETCH) {
313 // ** INSTRUCTION ACCESS ***
314
315 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
316 if (is_valid(L1Icache_entry)) {
317 // The tag matches for the L1, so the L1 asks the L2 for it.
318 trigger(mandatory_request_type_to_event(in_msg.Type),
319 in_msg.LineAddress, L1Icache_entry,
320 TBEs[in_msg.LineAddress]);
321 } else {
322
323 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
324 // Check to see if it is in the OTHER L1
325 if (is_valid(L1Dcache_entry)) {
326 // The block is in the wrong L1, put the request on the queue to the shared L2
327 trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
328 TBEs[in_msg.LineAddress]);
329 }
330 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
331 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
332 trigger(mandatory_request_type_to_event(in_msg.Type),
333 in_msg.LineAddress, L1Icache_entry,
334 TBEs[in_msg.LineAddress]);
335 } else {
336 // No room in the L1, so we need to make room in the L1
337 trigger(Event:L1_Replacement,
338 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
339 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
340 TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
341 }
342 }
343 } else {
344 // *** DATA ACCESS ***
345
346 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
347 if (is_valid(L1Dcache_entry)) {
348 // The tag matches for the L1, so the L1 ask the L2 for it
349 trigger(mandatory_request_type_to_event(in_msg.Type),
350 in_msg.LineAddress, L1Dcache_entry,
351 TBEs[in_msg.LineAddress]);
352 } else {
353
354 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
355 // Check to see if it is in the OTHER L1
356 if (is_valid(L1Icache_entry)) {
357 // The block is in the wrong L1, put the request on the queue to the shared L2
358 trigger(Event:L1_Replacement, in_msg.LineAddress,
359 L1Icache_entry, TBEs[in_msg.LineAddress]);
360 }
361 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
362 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
363 trigger(mandatory_request_type_to_event(in_msg.Type),
364 in_msg.LineAddress, L1Dcache_entry,
365 TBEs[in_msg.LineAddress]);
366 } else {
367 // No room in the L1, so we need to make room in the L1
368 trigger(Event:L1_Replacement,
369 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
370 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
371 TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
372 }
373 }
374 }
375 }
376 }
377 }
378
379
380 // ACTIONS
381
382 action(a_issueGETS, "a", desc="Issue GETS") {
383 peek(mandatoryQueue_in, RubyRequest) {
384 enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
385 out_msg.Address := address;
386 out_msg.Type := CoherenceRequestType:GETS;
387 out_msg.Requestor := machineID;
388 out_msg.RequestorMachine := MachineType:L1Cache;
389 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
390 l2_select_low_bit, l2_select_num_bits));
391 out_msg.MessageSize := MessageSizeType:Request_Control;
392 out_msg.AccessMode := in_msg.AccessMode;
393 out_msg.Prefetch := in_msg.Prefetch;
394 }
395 }
396 }
397
398 action(b_issueGETX, "b", desc="Issue GETX") {
399 peek(mandatoryQueue_in, RubyRequest) {
400 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
401 out_msg.Address := address;
402 out_msg.Type := CoherenceRequestType:GETX;
403 out_msg.Requestor := machineID;
404 out_msg.RequestorMachine := MachineType:L1Cache;
405 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
406 l2_select_low_bit, l2_select_num_bits));
407 out_msg.MessageSize := MessageSizeType:Request_Control;
408 out_msg.AccessMode := in_msg.AccessMode;
409 out_msg.Prefetch := in_msg.Prefetch;
410 }
411 }
412 }
413
414 action(d_issuePUTX, "d", desc="Issue PUTX") {
415 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
416 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
417 out_msg.Address := address;
418 out_msg.Type := CoherenceRequestType:PUTX;
419 out_msg.Requestor := machineID;
420 out_msg.RequestorMachine := MachineType:L1Cache;
421 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
422 l2_select_low_bit, l2_select_num_bits));
423 out_msg.MessageSize := MessageSizeType:Writeback_Control;
424 }
425 }
426
427 action(dd_issuePUTO, "\d", desc="Issue PUTO") {
428 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
429 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
430 out_msg.Address := address;
431 out_msg.Type := CoherenceRequestType:PUTO;
432 out_msg.Requestor := machineID;
433 out_msg.RequestorMachine := MachineType:L1Cache;
434 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
435 l2_select_low_bit, l2_select_num_bits));
436 out_msg.MessageSize := MessageSizeType:Writeback_Control;
437 }
438 }
439
440 action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
441 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
442 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
443 out_msg.Address := address;
444 out_msg.Type := CoherenceRequestType:PUTS;
445 out_msg.Requestor := machineID;
446 out_msg.RequestorMachine := MachineType:L1Cache;
447 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
448 l2_select_low_bit, l2_select_num_bits));
449 out_msg.MessageSize := MessageSizeType:Writeback_Control;
450 }
451 }
452
453 action(e_sendData, "e", desc="Send data from cache to requestor") {
454 peek(requestNetwork_in, RequestMsg) {
455 assert(is_valid(cache_entry));
456 if (in_msg.RequestorMachine == MachineType:L2Cache) {
457 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
458 out_msg.Address := address;
459 out_msg.Type := CoherenceResponseType:DATA;
460 out_msg.Sender := machineID;
461 out_msg.SenderMachine := MachineType:L1Cache;
462 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
463 l2_select_low_bit, l2_select_num_bits));
464 out_msg.DataBlk := cache_entry.DataBlk;
465 // out_msg.Dirty := cache_entry.Dirty;
466 out_msg.Dirty := false;
467 out_msg.Acks := in_msg.Acks;
468 out_msg.MessageSize := MessageSizeType:Response_Data;
469 }
470 DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Address);
471 }
472 else {
473 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
474 out_msg.Address := address;
475 out_msg.Type := CoherenceResponseType:DATA;
476 out_msg.Sender := machineID;
477 out_msg.SenderMachine := MachineType:L1Cache;
478 out_msg.Destination.add(in_msg.Requestor);
479 out_msg.DataBlk := cache_entry.DataBlk;
480 // out_msg.Dirty := cache_entry.Dirty;
481 out_msg.Dirty := false;
482 out_msg.Acks := in_msg.Acks;
483 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
484 }
485 DPRINTF(RubySlicc, "Sending data to L1\n");
486 }
487 }
488 }
489
490 action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
491 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
492 assert(is_valid(cache_entry));
493 out_msg.Address := address;
494 out_msg.Type := CoherenceResponseType:DATA;
495 out_msg.Sender := machineID;
496 out_msg.SenderMachine := MachineType:L1Cache;
497 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
498 l2_select_low_bit, l2_select_num_bits));
499 out_msg.DataBlk := cache_entry.DataBlk;
500 out_msg.Dirty := cache_entry.Dirty;
501 out_msg.Acks := 0; // irrelevant
502 out_msg.MessageSize := MessageSizeType:Response_Data;
503 }
504 }
505
506
507 action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
508 peek(requestNetwork_in, RequestMsg) {
509 assert(is_valid(cache_entry));
510 if (in_msg.RequestorMachine == MachineType:L2Cache) {
511 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
512 out_msg.Address := address;
513 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
514 out_msg.Sender := machineID;
515 out_msg.SenderMachine := MachineType:L1Cache;
516 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
517 l2_select_low_bit, l2_select_num_bits));
518 out_msg.DataBlk := cache_entry.DataBlk;
519 out_msg.Dirty := cache_entry.Dirty;
520 out_msg.Acks := in_msg.Acks;
521 out_msg.MessageSize := MessageSizeType:Response_Data;
522 }
523 DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
524 }
525 else {
526 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
527 out_msg.Address := address;
528 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
529 out_msg.Sender := machineID;
530 out_msg.SenderMachine := MachineType:L1Cache;
531 out_msg.Destination.add(in_msg.Requestor);
532 out_msg.DataBlk := cache_entry.DataBlk;
533 out_msg.Dirty := cache_entry.Dirty;
534 out_msg.Acks := in_msg.Acks;
535 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
536 }
537 DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
538 }
539 }
540 }
541
542 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
543 peek(requestNetwork_in, RequestMsg) {
544 if (in_msg.RequestorMachine == MachineType:L1Cache) {
545 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
546 out_msg.Address := address;
547 out_msg.Type := CoherenceResponseType:ACK;
548 out_msg.Sender := machineID;
549 out_msg.SenderMachine := MachineType:L1Cache;
550 out_msg.Destination.add(in_msg.Requestor);
551 out_msg.Acks := 0 - 1; // -1
552 out_msg.MessageSize := MessageSizeType:Response_Control;
553 }
554 }
555 else {
556 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
557 out_msg.Address := address;
558 out_msg.Type := CoherenceResponseType:ACK;
559 out_msg.Sender := machineID;
560 out_msg.SenderMachine := MachineType:L1Cache;
561 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
562 l2_select_low_bit, l2_select_num_bits));
563 out_msg.Acks := 0 - 1; // -1
564 out_msg.MessageSize := MessageSizeType:Response_Control;
565 }
566 }
567 }
568 }
569
570 action(g_sendUnblock, "g", desc="Send unblock to memory") {
571 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
572 out_msg.Address := address;
573 out_msg.Type := CoherenceResponseType:UNBLOCK;
574 out_msg.Sender := machineID;
575 out_msg.SenderMachine := MachineType:L1Cache;
576 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
577 l2_select_low_bit, l2_select_num_bits));
578 out_msg.MessageSize := MessageSizeType:Unblock_Control;
579 }
580 }
581
582 action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
583 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
584 out_msg.Address := address;
585 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
586 out_msg.Sender := machineID;
587 out_msg.SenderMachine := MachineType:L1Cache;
588 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
589 l2_select_low_bit, l2_select_num_bits));
590 out_msg.MessageSize := MessageSizeType:Unblock_Control;
591 }
592 }
593
594 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
595 assert(is_valid(cache_entry));
596 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
597 sequencer.readCallback(address, cache_entry.DataBlk);
598 }
599
600 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
601 assert(is_valid(cache_entry));
602 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
603 sequencer.writeCallback(address, cache_entry.DataBlk);
604 cache_entry.Dirty := true;
605 }
606
607 action(i_allocateTBE, "i", desc="Allocate TBE") {
608 check_allocate(TBEs);
609 TBEs.allocate(address);
610 set_tbe(TBEs[address]);
611 assert(is_valid(cache_entry));
612 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
613 tbe.Dirty := cache_entry.Dirty;
614 }
615
616 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
617 triggerQueue_in.dequeue();
618 }
619
620 action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
621 useTimerTable.unset(address);
622 }
623
624 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
625 mandatoryQueue_in.dequeue();
626 }
627
628 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
629 requestNetwork_in.dequeue();
630 }
631
632 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
633 peek(responseToL1Cache_in, ResponseMsg) {
634 assert(is_valid(tbe));
635 DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
636 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
637 }
638 }
639
640 action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
641 peek(requestNetwork_in, RequestMsg) {
642 assert(is_valid(tbe));
643 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
644 }
645 }
646
647 action(n_popResponseQueue, "n", desc="Pop response queue") {
648 responseToL1Cache_in.dequeue();
649 }
650
651 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
652 assert(is_valid(tbe));
653 if (tbe.NumPendingMsgs == 0) {
654 enqueue(triggerQueue_out, TriggerMsg) {
655 out_msg.Address := address;
656 out_msg.Type := TriggerType:ALL_ACKS;
657 }
658 }
659 }
660
661 action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
662 useTimerTable.set(address, 50);
663 }
664
665
666 action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
667 peek(requestNetwork_in, RequestMsg) {
668 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
669 out_msg.Address := address;
670 out_msg.Type := CoherenceResponseType:DMA_ACK;
671 out_msg.Sender := machineID;
672 out_msg.SenderMachine := MachineType:L1Cache;
673 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
674 l2_select_low_bit, l2_select_num_bits));
675 out_msg.Dirty := false;
676 out_msg.Acks := 1;
677 out_msg.MessageSize := MessageSizeType:Response_Control;
678 }
679 }
680 }
681
682 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
683 peek(requestNetwork_in, RequestMsg) {
684 assert(is_valid(tbe));
685 if (in_msg.RequestorMachine == MachineType:L1Cache ||
686 in_msg.RequestorMachine == MachineType:DMA) {
687 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
688 out_msg.Address := address;
689 out_msg.Type := CoherenceResponseType:DATA;
690 out_msg.Sender := machineID;
691 out_msg.SenderMachine := MachineType:L1Cache;
692 out_msg.Destination.add(in_msg.Requestor);
693 out_msg.DataBlk := tbe.DataBlk;
694 // out_msg.Dirty := tbe.Dirty;
695 out_msg.Dirty := false;
696 out_msg.Acks := in_msg.Acks;
697 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
698 }
699 }
700 else {
701 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
702 out_msg.Address := address;
703 out_msg.Type := CoherenceResponseType:DATA;
704 out_msg.Sender := machineID;
705 out_msg.SenderMachine := MachineType:L1Cache;
706 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
707 l2_select_low_bit, l2_select_num_bits));
708 out_msg.DataBlk := tbe.DataBlk;
709 // out_msg.Dirty := tbe.Dirty;
710 out_msg.Dirty := false;
711 out_msg.Acks := in_msg.Acks;
712 out_msg.MessageSize := MessageSizeType:Response_Data;
713 }
714 }
715 }
716 }
717
718 action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
719 peek(requestNetwork_in, RequestMsg) {
720 assert(is_valid(tbe));
721 if (in_msg.RequestorMachine == MachineType:L1Cache) {
722 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
723 out_msg.Address := address;
724 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
725 out_msg.Sender := machineID;
726 out_msg.SenderMachine := MachineType:L1Cache;
727 out_msg.Destination.add(in_msg.Requestor);
728 out_msg.DataBlk := tbe.DataBlk;
729 out_msg.Dirty := tbe.Dirty;
730 out_msg.Acks := in_msg.Acks;
731 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
732 }
733 }
734 else {
735 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
736 out_msg.Address := address;
737 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
738 out_msg.Sender := machineID;
739 out_msg.SenderMachine := MachineType:L1Cache;
740 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
741 l2_select_low_bit, l2_select_num_bits));
742 out_msg.DataBlk := tbe.DataBlk;
743 out_msg.Dirty := tbe.Dirty;
744 out_msg.Acks := in_msg.Acks;
745 out_msg.MessageSize := MessageSizeType:Response_Data;
746 }
747 }
748 }
749 }
750
751
752 // L2 will usually request data for a writeback
753 action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
754 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
755 assert(is_valid(tbe));
756 out_msg.Address := address;
757 out_msg.Sender := machineID;
758 out_msg.SenderMachine := MachineType:L1Cache;
759 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
760 l2_select_low_bit, l2_select_num_bits));
761 out_msg.Dirty := tbe.Dirty;
762 if (tbe.Dirty) {
763 out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
764 } else {
765 out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
766 }
767 out_msg.DataBlk := tbe.DataBlk;
768 out_msg.MessageSize := MessageSizeType:Writeback_Data;
769 }
770 }
771
772 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
773 TBEs.deallocate(address);
774 unset_tbe();
775 }
776
777 action(u_writeDataToCache, "u", desc="Write data to cache") {
778 peek(responseToL1Cache_in, ResponseMsg) {
779 assert(is_valid(cache_entry));
780 cache_entry.DataBlk := in_msg.DataBlk;
781 cache_entry.Dirty := in_msg.Dirty;
782
783 if (in_msg.Type == CoherenceResponseType:DATA) {
784 //assert(in_msg.Dirty == false);
785 }
786 }
787
788 }
789
790 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
791 peek(responseToL1Cache_in, ResponseMsg) {
792 assert(is_valid(cache_entry));
793 assert(cache_entry.DataBlk == in_msg.DataBlk);
794 cache_entry.DataBlk := in_msg.DataBlk;
795 cache_entry.Dirty := in_msg.Dirty;
796 }
797 }
798
799 action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
800 if (L1DcacheMemory.isTagPresent(address)) {
801 L1DcacheMemory.deallocate(address);
802 } else {
803 L1IcacheMemory.deallocate(address);
804 }
805 unset_cache_entry();
806 }
807
808 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
809 if ((is_invalid(cache_entry))) {
810 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
811 }
812 }
813
814 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
815 if ((is_invalid(cache_entry))) {
816 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
817 }
818 }
819
820
821
822 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
823 peek(mandatoryQueue_in, RubyRequest) {
824 // profile_miss(in_msg);
825 }
826 }
827
828 action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
829 requestNetwork_in.recycle();
830 }
831
832 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
833 mandatoryQueue_in.recycle();
834 }
835
836 //*****************************************************
837 // TRANSITIONS
838 //*****************************************************
839
840 // Transitions for Load/Store/L2_Replacement from transient states
841 transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
842 zz_recycleMandatoryQueue;
843 }
844
845 transition({M_W, MM_W}, L1_Replacement) {
846 zz_recycleMandatoryQueue;
847 }
848
849 transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
850 z_recycleRequestQueue;
851 }
852
853 transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
854 zz_recycleMandatoryQueue;
855 }
856
857 // Transitions from Idle
858 transition(I, Load, IS) {
859 ii_allocateL1DCacheBlock;
860 i_allocateTBE;
861 a_issueGETS;
862 // uu_profileMiss;
863 k_popMandatoryQueue;
864 }
865
866 transition(I, Ifetch, IS) {
867 jj_allocateL1ICacheBlock;
868 i_allocateTBE;
869 a_issueGETS;
870 // uu_profileMiss;
871 k_popMandatoryQueue;
872 }
873
874 transition(I, Store, IM) {
875 ii_allocateL1DCacheBlock;
876 i_allocateTBE;
877 b_issueGETX;
878 // uu_profileMiss;
879 k_popMandatoryQueue;
880 }
881
882 transition(I, L1_Replacement) {
883 kk_deallocateL1CacheBlock;
884 }
885
886 transition(I, Inv) {
887 f_sendAck;
888 l_popForwardQueue;
889 }
890
891 // Transitions from Shared
892 transition({S, SM}, {Load, Ifetch}) {
893 h_load_hit;
894 k_popMandatoryQueue;
895 }
896
897 transition(S, Store, SM) {
898 i_allocateTBE;
899 b_issueGETX;
900 // uu_profileMiss;
901 k_popMandatoryQueue;
902 }
903
904 transition(S, L1_Replacement, SI) {
905 i_allocateTBE;
906 dd_issuePUTS;
907 kk_deallocateL1CacheBlock;
908 }
909
910 transition(S, Inv, I) {
911 f_sendAck;
912 l_popForwardQueue;
913 }
914
915 transition(S, Fwd_GETS) {
916 e_sendData;
917 l_popForwardQueue;
918 }
919
920 transition(S, Fwd_DMA) {
921 e_sendData;
922 ub_dmaUnblockL2Cache;
923 l_popForwardQueue;
924 }
925
926 // Transitions from Owned
927 transition({O, OM}, {Load, Ifetch}) {
928 h_load_hit;
929 k_popMandatoryQueue;
930 }
931
932 transition(O, Store, OM) {
933 i_allocateTBE;
934 b_issueGETX;
935 // uu_profileMiss;
936 k_popMandatoryQueue;
937 }
938
939 transition(O, L1_Replacement, OI) {
940 i_allocateTBE;
941 dd_issuePUTO;
942 kk_deallocateL1CacheBlock;
943 }
944
945 transition(O, Fwd_GETX, I) {
946 ee_sendDataExclusive;
947 l_popForwardQueue;
948 }
949
950 transition(O, Fwd_GETS) {
951 e_sendData;
952 l_popForwardQueue;
953 }
954
955 transition(O, Fwd_DMA) {
956 e_sendData;
957 ub_dmaUnblockL2Cache;
958 l_popForwardQueue;
959 }
960
961 // Transitions from MM
962 transition({MM, MM_W}, {Load, Ifetch}) {
963 h_load_hit;
964 k_popMandatoryQueue;
965 }
966
967 transition({MM, MM_W}, Store) {
968 hh_store_hit;
969 k_popMandatoryQueue;
970 }
971
972 transition(MM, L1_Replacement, MI) {
973 i_allocateTBE;
974 d_issuePUTX;
975 kk_deallocateL1CacheBlock;
976 }
977
978 transition(MM, Fwd_GETX, I) {
979 ee_sendDataExclusive;
980 l_popForwardQueue;
981 }
982
983 transition(MM, Fwd_GETS, I) {
984 ee_sendDataExclusive;
985 l_popForwardQueue;
986 }
987
988 transition(MM, Fwd_DMA, MM) {
989 e_sendData;
990 ub_dmaUnblockL2Cache;
991 l_popForwardQueue;
992 }
993
994 // Transitions from M
995 transition({M, M_W}, {Load, Ifetch}) {
996 h_load_hit;
997 k_popMandatoryQueue;
998 }
999
1000 transition(M, Store, MM) {
1001 hh_store_hit;
1002 k_popMandatoryQueue;
1003 }
1004
1005 transition(M_W, Store, MM_W) {
1006 hh_store_hit;
1007 k_popMandatoryQueue;
1008 }
1009
1010 transition(M, L1_Replacement, MI) {
1011 i_allocateTBE;
1012 d_issuePUTX;
1013 kk_deallocateL1CacheBlock;
1014 }
1015
1016 transition(M, Fwd_GETX, I) {
1017 // e_sendData;
1018 ee_sendDataExclusive;
1019 l_popForwardQueue;
1020 }
1021
1022 transition(M, Fwd_GETS, O) {
1023 e_sendData;
1024 l_popForwardQueue;
1025 }
1026
1027 transition(M, Fwd_DMA) {
1028 e_sendData;
1029 ub_dmaUnblockL2Cache;
1030 l_popForwardQueue;
1031 }
1032
1033 // Transitions from IM
1034
1035 transition(IM, Inv) {
1036 f_sendAck;
1037 l_popForwardQueue;
1038 }
1039
1040 transition(IM, Ack) {
1041 m_decrementNumberOfMessages;
1042 o_checkForCompletion;
1043 n_popResponseQueue;
1044 }
1045
1046 transition(IM, {Exclusive_Data, Data}, OM) {
1047 u_writeDataToCache;
1048 m_decrementNumberOfMessages;
1049 o_checkForCompletion;
1050 n_popResponseQueue;
1051 }
1052
1053 // Transitions from SM
1054 transition(SM, Inv, IM) {
1055 f_sendAck;
1056 l_popForwardQueue;
1057 }
1058
1059 transition(SM, Ack) {
1060 m_decrementNumberOfMessages;
1061 o_checkForCompletion;
1062 n_popResponseQueue;
1063 }
1064
1065 transition(SM, {Data, Exclusive_Data}, OM) {
1066 // v_writeDataToCacheVerify;
1067 m_decrementNumberOfMessages;
1068 o_checkForCompletion;
1069 n_popResponseQueue;
1070 }
1071
1072 transition(SM, Fwd_GETS) {
1073 e_sendData;
1074 l_popForwardQueue;
1075 }
1076
1077 transition(SM, Fwd_DMA) {
1078 e_sendData;
1079 ub_dmaUnblockL2Cache;
1080 l_popForwardQueue;
1081 }
1082
1083 // Transitions from OM
1084 transition(OM, Own_GETX) {
1085 mm_decrementNumberOfMessages;
1086 o_checkForCompletion;
1087 l_popForwardQueue;
1088 }
1089
1090
1091 // transition(OM, Fwd_GETX, OMF) {
1092 transition(OM, Fwd_GETX, IM) {
1093 ee_sendDataExclusive;
1094 l_popForwardQueue;
1095 }
1096
1097 transition(OM, Fwd_GETS) {
1098 e_sendData;
1099 l_popForwardQueue;
1100 }
1101
1102 transition(OM, Fwd_DMA) {
1103 e_sendData;
1104 ub_dmaUnblockL2Cache;
1105 l_popForwardQueue;
1106 }
1107
1108 //transition({OM, OMF}, Ack) {
1109 transition(OM, Ack) {
1110 m_decrementNumberOfMessages;
1111 o_checkForCompletion;
1112 n_popResponseQueue;
1113 }
1114
1115 transition(OM, All_acks, MM_W) {
1116 hh_store_hit;
1117 gg_sendUnblockExclusive;
1118 s_deallocateTBE;
1119 o_scheduleUseTimeout;
1120 j_popTriggerQueue;
1121 }
1122
1123 transition(MM_W, Use_Timeout, MM) {
1124 jj_unsetUseTimer;
1125 }
1126
1127 // Transitions from IS
1128
1129 transition(IS, Inv) {
1130 f_sendAck;
1131 l_popForwardQueue;
1132 }
1133
1134 transition(IS, Data, S) {
1135 u_writeDataToCache;
1136 m_decrementNumberOfMessages;
1137 h_load_hit;
1138 g_sendUnblock;
1139 s_deallocateTBE;
1140 n_popResponseQueue;
1141 }
1142
1143 transition(IS, Exclusive_Data, M_W) {
1144 u_writeDataToCache;
1145 m_decrementNumberOfMessages;
1146 h_load_hit;
1147 gg_sendUnblockExclusive;
1148 o_scheduleUseTimeout;
1149 s_deallocateTBE;
1150 n_popResponseQueue;
1151 }
1152
1153 transition(M_W, Use_Timeout, M) {
1154 jj_unsetUseTimer;
1155 }
1156
1157 // Transitions from OI/MI
1158
1159 transition(MI, Fwd_GETS, OI) {
1160 q_sendDataFromTBEToCache;
1161 l_popForwardQueue;
1162 }
1163
1164 transition(MI, Fwd_DMA) {
1165 q_sendDataFromTBEToCache;
1166 ub_dmaUnblockL2Cache;
1167 l_popForwardQueue;
1168 }
1169
1170 transition(MI, Fwd_GETX, II) {
1171 q_sendExclusiveDataFromTBEToCache;
1172 l_popForwardQueue;
1173 }
1174
1175 transition({SI, OI}, Fwd_GETS) {
1176 q_sendDataFromTBEToCache;
1177 l_popForwardQueue;
1178 }
1179
1180 transition({SI, OI}, Fwd_DMA) {
1181 q_sendDataFromTBEToCache;
1182 ub_dmaUnblockL2Cache;
1183 l_popForwardQueue;
1184 }
1185
1186 transition(OI, Fwd_GETX, II) {
1187 q_sendExclusiveDataFromTBEToCache;
1188 l_popForwardQueue;
1189 }
1190
1191 transition({SI, OI, MI}, Writeback_Ack_Data, I) {
1192 qq_sendWBDataFromTBEToL2; // always send data
1193 s_deallocateTBE;
1194 l_popForwardQueue;
1195 }
1196
1197 transition({SI, OI, MI}, Writeback_Ack, I) {
1198 g_sendUnblock;
1199 s_deallocateTBE;
1200 l_popForwardQueue;
1201 }
1202
1203 transition({MI, OI}, Writeback_Nack, OI) {
1204 // FIXME: This might cause deadlock by re-using the writeback
1205 // channel, we should handle this case differently.
1206 dd_issuePUTO;
1207 l_popForwardQueue;
1208 }
1209
1210 // Transitions from II
1211 transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
1212 g_sendUnblock;
1213 s_deallocateTBE;
1214 l_popForwardQueue;
1215 }
1216
1217 // transition({II, SI}, Writeback_Nack, I) {
1218 transition(II, Writeback_Nack, I) {
1219 s_deallocateTBE;
1220 l_popForwardQueue;
1221 }
1222
1223 transition(SI, Writeback_Nack) {
1224 dd_issuePUTS;
1225 l_popForwardQueue;
1226 }
1227
1228 transition(II, Inv) {
1229 f_sendAck;
1230 l_popForwardQueue;
1231 }
1232
1233 transition(SI, Inv, II) {
1234 f_sendAck;
1235 l_popForwardQueue;
1236 }
1237 }