35832ee9ca9cb419167f768fb3451150d11cf04d
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L1Cache, "Directory protocol")
36 : Sequencer * sequencer,
37 CacheMemory * L1IcacheMemory,
38 CacheMemory * L1DcacheMemory,
39 int l2_select_num_bits,
40 int request_latency = 2
41 {
42
43 // NODE L1 CACHE
44 // From this node's L1 cache TO the network
45 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
46 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
47 // a local L1 -> this L2 bank
48 MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
49 // MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
50
51
52 // To this node's L1 cache FROM the network
53 // a L2 bank -> this L1
54 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
55 // a L2 bank -> this L1
56 MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
57
58
59
60 // STATES
61 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
62 // Base states
63 I, AccessPermission:Invalid, desc="Idle";
64 S, AccessPermission:Read_Only, desc="Shared";
65 O, AccessPermission:Read_Only, desc="Owned";
66 M, AccessPermission:Read_Only, desc="Modified (dirty)";
67 M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
68 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
69 MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
70
71 // Transient States
72 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
73 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
74 OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
75 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
76 SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
77 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
78 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
79 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
80 }
81
82 // EVENTS
83 enumeration(Event, desc="Cache events") {
84 Load, desc="Load request from the processor";
85 Ifetch, desc="I-fetch request from the processor";
86 Store, desc="Store request from the processor";
87 L1_Replacement, desc="Replacement";
88
89 // Requests
90 Own_GETX, desc="We observe our own GetX forwarded back to us";
91 Fwd_GETX, desc="A GetX from another processor";
92 Fwd_GETS, desc="A GetS from another processor";
93 Fwd_DMA, desc="A GetS from another processor";
94 Inv, desc="Invalidations from the directory";
95
96 // Responses
97 Ack, desc="Received an ack message";
98 Data, desc="Received a data message, responder has a shared copy";
99 Exclusive_Data, desc="Received a data message";
100
101 Writeback_Ack, desc="Writeback O.K. from directory";
102 Writeback_Ack_Data, desc="Writeback O.K. from directory";
103 Writeback_Nack, desc="Writeback not O.K. from directory";
104
105 // Triggers
106 All_acks, desc="Received all required data and message acks";
107
108 // Timeouts
109 Use_Timeout, desc="lockout period ended";
110 }
111
112 // TYPES
113
114 // CacheEntry
115 structure(Entry, desc="...", interface="AbstractCacheEntry") {
116 State CacheState, desc="cache state";
117 bool Dirty, desc="Is the data dirty (different than memory)?";
118 DataBlock DataBlk, desc="data for the block";
119 }
120
121 // TBE fields
122 structure(TBE, desc="...") {
123 Address Address, desc="Physical address for this TBE";
124 State TBEState, desc="Transient state";
125 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
126 bool Dirty, desc="Is the data dirty (different than memory)?";
127 int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
128 }
129
130 structure(TBETable, external ="yes") {
131 TBE lookup(Address);
132 void allocate(Address);
133 void deallocate(Address);
134 bool isPresent(Address);
135 }
136
137 void set_cache_entry(AbstractCacheEntry b);
138 void unset_cache_entry();
139 void set_tbe(TBE b);
140 void unset_tbe();
141
142 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
143
144 TBETable TBEs, template_hack="<L1Cache_TBE>";
145 TimerTable useTimerTable;
146 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
147
148 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
149 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
150 if(is_valid(L1Dcache_entry)) {
151 return L1Dcache_entry;
152 }
153
154 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
155 return L1Icache_entry;
156 }
157
158 Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
159 return static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
160 }
161
162 Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
163 return static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
164 }
165
166 State getState(TBE tbe, Entry cache_entry, Address addr) {
167 if(is_valid(tbe)) {
168 return tbe.TBEState;
169 } else if (is_valid(cache_entry)) {
170 return cache_entry.CacheState;
171 }
172 return State:I;
173 }
174
175 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
176 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
177
178 if (is_valid(tbe)) {
179 tbe.TBEState := state;
180 }
181
182 if (is_valid(cache_entry)) {
183 if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
184 ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
185 ((cache_entry.CacheState != State:S) && (state == State:S)) ||
186 ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
187
188 cache_entry.CacheState := state;
189 sequencer.checkCoherence(addr);
190 }
191 else {
192 cache_entry.CacheState := state;
193 }
194 }
195 }
196
197 AccessPermission getAccessPermission(Address addr) {
198 TBE tbe := TBEs[addr];
199 if(is_valid(tbe)) {
200 return L1Cache_State_to_permission(tbe.TBEState);
201 }
202
203 Entry cache_entry := getCacheEntry(addr);
204 if(is_valid(cache_entry)) {
205 return L1Cache_State_to_permission(cache_entry.CacheState);
206 }
207
208 return AccessPermission:NotPresent;
209 }
210
211 void setAccessPermission(Entry cache_entry, Address addr, State state) {
212 if (is_valid(cache_entry)) {
213 cache_entry.changePermission(L1Cache_State_to_permission(state));
214 }
215 }
216
217 Event mandatory_request_type_to_event(RubyRequestType type) {
218 if (type == RubyRequestType:LD) {
219 return Event:Load;
220 } else if (type == RubyRequestType:IFETCH) {
221 return Event:Ifetch;
222 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
223 return Event:Store;
224 } else {
225 error("Invalid RubyRequestType");
226 }
227 }
228
229 MessageBuffer triggerQueue, ordered="true";
230
231 // ** OUT_PORTS **
232
233 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
234 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
235 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
236
237 // ** IN_PORTS **
238
239 // Use Timer
240 in_port(useTimerTable_in, Address, useTimerTable) {
241 if (useTimerTable_in.isReady()) {
242 trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
243 getCacheEntry(useTimerTable.readyAddress()),
244 TBEs[useTimerTable.readyAddress()]);
245 }
246 }
247
248 // Trigger Queue
249 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
250 if (triggerQueue_in.isReady()) {
251 peek(triggerQueue_in, TriggerMsg) {
252 if (in_msg.Type == TriggerType:ALL_ACKS) {
253 trigger(Event:All_acks, in_msg.Address,
254 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
255 } else {
256 error("Unexpected message");
257 }
258 }
259 }
260 }
261
262 // Nothing from the request network
263
264 // Request Network
265 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
266 if (requestNetwork_in.isReady()) {
267 peek(requestNetwork_in, RequestMsg, block_on="Address") {
268 assert(in_msg.Destination.isElement(machineID));
269 DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
270
271 if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
272 if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
273 trigger(Event:Own_GETX, in_msg.Address,
274 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
275 } else {
276 trigger(Event:Fwd_GETX, in_msg.Address,
277 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
278 }
279 } else if (in_msg.Type == CoherenceRequestType:GETS) {
280 trigger(Event:Fwd_GETS, in_msg.Address,
281 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
282 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
283 trigger(Event:Fwd_DMA, in_msg.Address,
284 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
285 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
286 trigger(Event:Writeback_Ack, in_msg.Address,
287 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
288 } else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
289 trigger(Event:Writeback_Ack_Data, in_msg.Address,
290 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
291 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
292 trigger(Event:Writeback_Nack, in_msg.Address,
293 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
294 } else if (in_msg.Type == CoherenceRequestType:INV) {
295 trigger(Event:Inv, in_msg.Address,
296 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
297 } else {
298 error("Unexpected message");
299 }
300 }
301 }
302 }
303
304 // Response Network
305 in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
306 if (responseToL1Cache_in.isReady()) {
307 peek(responseToL1Cache_in, ResponseMsg, block_on="Address") {
308 if (in_msg.Type == CoherenceResponseType:ACK) {
309 trigger(Event:Ack, in_msg.Address,
310 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
311 } else if (in_msg.Type == CoherenceResponseType:DATA) {
312 trigger(Event:Data, in_msg.Address,
313 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
314 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
315 trigger(Event:Exclusive_Data, in_msg.Address,
316 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
317 } else {
318 error("Unexpected message");
319 }
320 }
321 }
322 }
323
324 // Nothing from the unblock network
325 // Mandatory Queue betweens Node's CPU and it's L1 caches
326 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
327 if (mandatoryQueue_in.isReady()) {
328 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
329
330 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
331
332 if (in_msg.Type == RubyRequestType:IFETCH) {
333 // ** INSTRUCTION ACCESS ***
334
335 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
336 if (is_valid(L1Icache_entry)) {
337 // The tag matches for the L1, so the L1 asks the L2 for it.
338 trigger(mandatory_request_type_to_event(in_msg.Type),
339 in_msg.LineAddress, L1Icache_entry,
340 TBEs[in_msg.LineAddress]);
341 } else {
342
343 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
344 // Check to see if it is in the OTHER L1
345 if (is_valid(L1Dcache_entry)) {
346 // The block is in the wrong L1, put the request on the queue to the shared L2
347 trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
348 TBEs[in_msg.LineAddress]);
349 }
350 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
351 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
352 trigger(mandatory_request_type_to_event(in_msg.Type),
353 in_msg.LineAddress, L1Icache_entry,
354 TBEs[in_msg.LineAddress]);
355 } else {
356 // No room in the L1, so we need to make room in the L1
357 trigger(Event:L1_Replacement,
358 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
359 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
360 TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
361 }
362 }
363 } else {
364 // *** DATA ACCESS ***
365
366 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
367 if (is_valid(L1Dcache_entry)) {
368 // The tag matches for the L1, so the L1 ask the L2 for it
369 trigger(mandatory_request_type_to_event(in_msg.Type),
370 in_msg.LineAddress, L1Dcache_entry,
371 TBEs[in_msg.LineAddress]);
372 } else {
373
374 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
375 // Check to see if it is in the OTHER L1
376 if (is_valid(L1Icache_entry)) {
377 // The block is in the wrong L1, put the request on the queue to the shared L2
378 trigger(Event:L1_Replacement, in_msg.LineAddress,
379 L1Icache_entry, TBEs[in_msg.LineAddress]);
380 }
381 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
382 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
383 trigger(mandatory_request_type_to_event(in_msg.Type),
384 in_msg.LineAddress, L1Dcache_entry,
385 TBEs[in_msg.LineAddress]);
386 } else {
387 // No room in the L1, so we need to make room in the L1
388 trigger(Event:L1_Replacement,
389 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
390 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
391 TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
392 }
393 }
394 }
395 }
396 }
397 }
398
399
400 // ACTIONS
401
402 action(a_issueGETS, "a", desc="Issue GETS") {
403 peek(mandatoryQueue_in, RubyRequest) {
404 enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
405 out_msg.Address := address;
406 out_msg.Type := CoherenceRequestType:GETS;
407 out_msg.Requestor := machineID;
408 out_msg.RequestorMachine := MachineType:L1Cache;
409 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
410 l2_select_low_bit, l2_select_num_bits));
411 out_msg.MessageSize := MessageSizeType:Request_Control;
412 out_msg.AccessMode := in_msg.AccessMode;
413 out_msg.Prefetch := in_msg.Prefetch;
414 }
415 }
416 }
417
418 action(b_issueGETX, "b", desc="Issue GETX") {
419 peek(mandatoryQueue_in, RubyRequest) {
420 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
421 out_msg.Address := address;
422 out_msg.Type := CoherenceRequestType:GETX;
423 out_msg.Requestor := machineID;
424 out_msg.RequestorMachine := MachineType:L1Cache;
425 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
426 l2_select_low_bit, l2_select_num_bits));
427 out_msg.MessageSize := MessageSizeType:Request_Control;
428 out_msg.AccessMode := in_msg.AccessMode;
429 out_msg.Prefetch := in_msg.Prefetch;
430 }
431 }
432 }
433
434 action(d_issuePUTX, "d", desc="Issue PUTX") {
435 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
436 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
437 out_msg.Address := address;
438 out_msg.Type := CoherenceRequestType:PUTX;
439 out_msg.Requestor := machineID;
440 out_msg.RequestorMachine := MachineType:L1Cache;
441 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
442 l2_select_low_bit, l2_select_num_bits));
443 out_msg.MessageSize := MessageSizeType:Writeback_Control;
444 }
445 }
446
447 action(dd_issuePUTO, "\d", desc="Issue PUTO") {
448 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
449 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
450 out_msg.Address := address;
451 out_msg.Type := CoherenceRequestType:PUTO;
452 out_msg.Requestor := machineID;
453 out_msg.RequestorMachine := MachineType:L1Cache;
454 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
455 l2_select_low_bit, l2_select_num_bits));
456 out_msg.MessageSize := MessageSizeType:Writeback_Control;
457 }
458 }
459
460 action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
461 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
462 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
463 out_msg.Address := address;
464 out_msg.Type := CoherenceRequestType:PUTS;
465 out_msg.Requestor := machineID;
466 out_msg.RequestorMachine := MachineType:L1Cache;
467 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
468 l2_select_low_bit, l2_select_num_bits));
469 out_msg.MessageSize := MessageSizeType:Writeback_Control;
470 }
471 }
472
473 action(e_sendData, "e", desc="Send data from cache to requestor") {
474 peek(requestNetwork_in, RequestMsg) {
475 assert(is_valid(cache_entry));
476 if (in_msg.RequestorMachine == MachineType:L2Cache) {
477 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
478 out_msg.Address := address;
479 out_msg.Type := CoherenceResponseType:DATA;
480 out_msg.Sender := machineID;
481 out_msg.SenderMachine := MachineType:L1Cache;
482 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
483 l2_select_low_bit, l2_select_num_bits));
484 out_msg.DataBlk := cache_entry.DataBlk;
485 // out_msg.Dirty := cache_entry.Dirty;
486 out_msg.Dirty := false;
487 out_msg.Acks := in_msg.Acks;
488 out_msg.MessageSize := MessageSizeType:Response_Data;
489 }
490 DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Address);
491 }
492 else {
493 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
494 out_msg.Address := address;
495 out_msg.Type := CoherenceResponseType:DATA;
496 out_msg.Sender := machineID;
497 out_msg.SenderMachine := MachineType:L1Cache;
498 out_msg.Destination.add(in_msg.Requestor);
499 out_msg.DataBlk := cache_entry.DataBlk;
500 // out_msg.Dirty := cache_entry.Dirty;
501 out_msg.Dirty := false;
502 out_msg.Acks := in_msg.Acks;
503 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
504 }
505 DPRINTF(RubySlicc, "Sending data to L1\n");
506 }
507 }
508 }
509
510 action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
511 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
512 assert(is_valid(cache_entry));
513 out_msg.Address := address;
514 out_msg.Type := CoherenceResponseType:DATA;
515 out_msg.Sender := machineID;
516 out_msg.SenderMachine := MachineType:L1Cache;
517 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
518 l2_select_low_bit, l2_select_num_bits));
519 out_msg.DataBlk := cache_entry.DataBlk;
520 out_msg.Dirty := cache_entry.Dirty;
521 out_msg.Acks := 0; // irrelevant
522 out_msg.MessageSize := MessageSizeType:Response_Data;
523 }
524 }
525
526
527 action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
528 peek(requestNetwork_in, RequestMsg) {
529 assert(is_valid(cache_entry));
530 if (in_msg.RequestorMachine == MachineType:L2Cache) {
531 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
532 out_msg.Address := address;
533 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
534 out_msg.Sender := machineID;
535 out_msg.SenderMachine := MachineType:L1Cache;
536 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
537 l2_select_low_bit, l2_select_num_bits));
538 out_msg.DataBlk := cache_entry.DataBlk;
539 out_msg.Dirty := cache_entry.Dirty;
540 out_msg.Acks := in_msg.Acks;
541 out_msg.MessageSize := MessageSizeType:Response_Data;
542 }
543 DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
544 }
545 else {
546 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
547 out_msg.Address := address;
548 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
549 out_msg.Sender := machineID;
550 out_msg.SenderMachine := MachineType:L1Cache;
551 out_msg.Destination.add(in_msg.Requestor);
552 out_msg.DataBlk := cache_entry.DataBlk;
553 out_msg.Dirty := cache_entry.Dirty;
554 out_msg.Acks := in_msg.Acks;
555 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
556 }
557 DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
558 }
559 }
560 }
561
562 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
563 peek(requestNetwork_in, RequestMsg) {
564 if (in_msg.RequestorMachine == MachineType:L1Cache) {
565 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
566 out_msg.Address := address;
567 out_msg.Type := CoherenceResponseType:ACK;
568 out_msg.Sender := machineID;
569 out_msg.SenderMachine := MachineType:L1Cache;
570 out_msg.Destination.add(in_msg.Requestor);
571 out_msg.Acks := 0 - 1; // -1
572 out_msg.MessageSize := MessageSizeType:Response_Control;
573 }
574 }
575 else {
576 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
577 out_msg.Address := address;
578 out_msg.Type := CoherenceResponseType:ACK;
579 out_msg.Sender := machineID;
580 out_msg.SenderMachine := MachineType:L1Cache;
581 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
582 l2_select_low_bit, l2_select_num_bits));
583 out_msg.Acks := 0 - 1; // -1
584 out_msg.MessageSize := MessageSizeType:Response_Control;
585 }
586 }
587 }
588 }
589
590 action(g_sendUnblock, "g", desc="Send unblock to memory") {
591 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
592 out_msg.Address := address;
593 out_msg.Type := CoherenceResponseType:UNBLOCK;
594 out_msg.Sender := machineID;
595 out_msg.SenderMachine := MachineType:L1Cache;
596 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
597 l2_select_low_bit, l2_select_num_bits));
598 out_msg.MessageSize := MessageSizeType:Unblock_Control;
599 }
600 }
601
602 action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
603 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
604 out_msg.Address := address;
605 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
606 out_msg.Sender := machineID;
607 out_msg.SenderMachine := MachineType:L1Cache;
608 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
609 l2_select_low_bit, l2_select_num_bits));
610 out_msg.MessageSize := MessageSizeType:Unblock_Control;
611 }
612 }
613
614 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
615 assert(is_valid(cache_entry));
616 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
617 sequencer.readCallback(address, cache_entry.DataBlk);
618 }
619
620 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
621 assert(is_valid(cache_entry));
622 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
623 sequencer.writeCallback(address, cache_entry.DataBlk);
624 cache_entry.Dirty := true;
625 }
626
627 action(i_allocateTBE, "i", desc="Allocate TBE") {
628 check_allocate(TBEs);
629 TBEs.allocate(address);
630 set_tbe(TBEs[address]);
631 assert(is_valid(cache_entry));
632 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
633 tbe.Dirty := cache_entry.Dirty;
634 }
635
636 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
637 triggerQueue_in.dequeue();
638 }
639
640 action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
641 useTimerTable.unset(address);
642 }
643
644 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
645 mandatoryQueue_in.dequeue();
646 }
647
648 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
649 requestNetwork_in.dequeue();
650 }
651
652 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
653 peek(responseToL1Cache_in, ResponseMsg) {
654 assert(is_valid(tbe));
655 DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
656 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
657 }
658 }
659
660 action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
661 peek(requestNetwork_in, RequestMsg) {
662 assert(is_valid(tbe));
663 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
664 }
665 }
666
667 action(n_popResponseQueue, "n", desc="Pop response queue") {
668 responseToL1Cache_in.dequeue();
669 }
670
671 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
672 assert(is_valid(tbe));
673 if (tbe.NumPendingMsgs == 0) {
674 enqueue(triggerQueue_out, TriggerMsg) {
675 out_msg.Address := address;
676 out_msg.Type := TriggerType:ALL_ACKS;
677 }
678 }
679 }
680
681 action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
682 useTimerTable.set(address, 50);
683 }
684
685
686 action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
687 peek(requestNetwork_in, RequestMsg) {
688 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
689 out_msg.Address := address;
690 out_msg.Type := CoherenceResponseType:DMA_ACK;
691 out_msg.Sender := machineID;
692 out_msg.SenderMachine := MachineType:L1Cache;
693 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
694 l2_select_low_bit, l2_select_num_bits));
695 out_msg.Dirty := false;
696 out_msg.Acks := 1;
697 out_msg.MessageSize := MessageSizeType:Response_Control;
698 }
699 }
700 }
701
702 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
703 peek(requestNetwork_in, RequestMsg) {
704 assert(is_valid(tbe));
705 if (in_msg.RequestorMachine == MachineType:L1Cache ||
706 in_msg.RequestorMachine == MachineType:DMA) {
707 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
708 out_msg.Address := address;
709 out_msg.Type := CoherenceResponseType:DATA;
710 out_msg.Sender := machineID;
711 out_msg.SenderMachine := MachineType:L1Cache;
712 out_msg.Destination.add(in_msg.Requestor);
713 out_msg.DataBlk := tbe.DataBlk;
714 // out_msg.Dirty := tbe.Dirty;
715 out_msg.Dirty := false;
716 out_msg.Acks := in_msg.Acks;
717 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
718 }
719 }
720 else {
721 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
722 out_msg.Address := address;
723 out_msg.Type := CoherenceResponseType:DATA;
724 out_msg.Sender := machineID;
725 out_msg.SenderMachine := MachineType:L1Cache;
726 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
727 l2_select_low_bit, l2_select_num_bits));
728 out_msg.DataBlk := tbe.DataBlk;
729 // out_msg.Dirty := tbe.Dirty;
730 out_msg.Dirty := false;
731 out_msg.Acks := in_msg.Acks;
732 out_msg.MessageSize := MessageSizeType:Response_Data;
733 }
734 }
735 }
736 }
737
738 action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
739 peek(requestNetwork_in, RequestMsg) {
740 assert(is_valid(tbe));
741 if (in_msg.RequestorMachine == MachineType:L1Cache) {
742 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
743 out_msg.Address := address;
744 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
745 out_msg.Sender := machineID;
746 out_msg.SenderMachine := MachineType:L1Cache;
747 out_msg.Destination.add(in_msg.Requestor);
748 out_msg.DataBlk := tbe.DataBlk;
749 out_msg.Dirty := tbe.Dirty;
750 out_msg.Acks := in_msg.Acks;
751 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
752 }
753 }
754 else {
755 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
756 out_msg.Address := address;
757 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
758 out_msg.Sender := machineID;
759 out_msg.SenderMachine := MachineType:L1Cache;
760 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
761 l2_select_low_bit, l2_select_num_bits));
762 out_msg.DataBlk := tbe.DataBlk;
763 out_msg.Dirty := tbe.Dirty;
764 out_msg.Acks := in_msg.Acks;
765 out_msg.MessageSize := MessageSizeType:Response_Data;
766 }
767 }
768 }
769 }
770
771
772 // L2 will usually request data for a writeback
773 action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
774 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
775 assert(is_valid(tbe));
776 out_msg.Address := address;
777 out_msg.Sender := machineID;
778 out_msg.SenderMachine := MachineType:L1Cache;
779 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
780 l2_select_low_bit, l2_select_num_bits));
781 out_msg.Dirty := tbe.Dirty;
782 if (tbe.Dirty) {
783 out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
784 } else {
785 out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
786 }
787 out_msg.DataBlk := tbe.DataBlk;
788 out_msg.MessageSize := MessageSizeType:Writeback_Data;
789 }
790 }
791
792 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
793 TBEs.deallocate(address);
794 unset_tbe();
795 }
796
797 action(u_writeDataToCache, "u", desc="Write data to cache") {
798 peek(responseToL1Cache_in, ResponseMsg) {
799 assert(is_valid(cache_entry));
800 cache_entry.DataBlk := in_msg.DataBlk;
801 cache_entry.Dirty := in_msg.Dirty;
802
803 if (in_msg.Type == CoherenceResponseType:DATA) {
804 //assert(in_msg.Dirty == false);
805 }
806 }
807
808 }
809
810 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
811 peek(responseToL1Cache_in, ResponseMsg) {
812 assert(is_valid(cache_entry));
813 assert(cache_entry.DataBlk == in_msg.DataBlk);
814 cache_entry.DataBlk := in_msg.DataBlk;
815 cache_entry.Dirty := in_msg.Dirty;
816 }
817 }
818
819 action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
820 if (L1DcacheMemory.isTagPresent(address)) {
821 L1DcacheMemory.deallocate(address);
822 } else {
823 L1IcacheMemory.deallocate(address);
824 }
825 unset_cache_entry();
826 }
827
828 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
829 if ((is_invalid(cache_entry))) {
830 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
831 }
832 }
833
834 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
835 if ((is_invalid(cache_entry))) {
836 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
837 }
838 }
839
840
841
842 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
843 peek(mandatoryQueue_in, RubyRequest) {
844 // profile_miss(in_msg);
845 }
846 }
847
848 action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
849 requestNetwork_in.recycle();
850 }
851
852 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
853 mandatoryQueue_in.recycle();
854 }
855
856 //*****************************************************
857 // TRANSITIONS
858 //*****************************************************
859
860 // Transitions for Load/Store/L2_Replacement from transient states
861 transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
862 zz_recycleMandatoryQueue;
863 }
864
865 transition({M_W, MM_W}, L1_Replacement) {
866 zz_recycleMandatoryQueue;
867 }
868
869 transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
870 z_recycleRequestQueue;
871 }
872
873 transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
874 zz_recycleMandatoryQueue;
875 }
876
877 // Transitions from Idle
878 transition(I, Load, IS) {
879 ii_allocateL1DCacheBlock;
880 i_allocateTBE;
881 a_issueGETS;
882 // uu_profileMiss;
883 k_popMandatoryQueue;
884 }
885
886 transition(I, Ifetch, IS) {
887 jj_allocateL1ICacheBlock;
888 i_allocateTBE;
889 a_issueGETS;
890 // uu_profileMiss;
891 k_popMandatoryQueue;
892 }
893
894 transition(I, Store, IM) {
895 ii_allocateL1DCacheBlock;
896 i_allocateTBE;
897 b_issueGETX;
898 // uu_profileMiss;
899 k_popMandatoryQueue;
900 }
901
902 transition(I, L1_Replacement) {
903 kk_deallocateL1CacheBlock;
904 }
905
906 transition(I, Inv) {
907 f_sendAck;
908 l_popForwardQueue;
909 }
910
911 // Transitions from Shared
912 transition({S, SM}, {Load, Ifetch}) {
913 h_load_hit;
914 k_popMandatoryQueue;
915 }
916
917 transition(S, Store, SM) {
918 i_allocateTBE;
919 b_issueGETX;
920 // uu_profileMiss;
921 k_popMandatoryQueue;
922 }
923
924 transition(S, L1_Replacement, SI) {
925 i_allocateTBE;
926 dd_issuePUTS;
927 kk_deallocateL1CacheBlock;
928 }
929
930 transition(S, Inv, I) {
931 f_sendAck;
932 l_popForwardQueue;
933 }
934
935 transition(S, Fwd_GETS) {
936 e_sendData;
937 l_popForwardQueue;
938 }
939
940 transition(S, Fwd_DMA) {
941 e_sendData;
942 ub_dmaUnblockL2Cache;
943 l_popForwardQueue;
944 }
945
946 // Transitions from Owned
947 transition({O, OM}, {Load, Ifetch}) {
948 h_load_hit;
949 k_popMandatoryQueue;
950 }
951
952 transition(O, Store, OM) {
953 i_allocateTBE;
954 b_issueGETX;
955 // uu_profileMiss;
956 k_popMandatoryQueue;
957 }
958
959 transition(O, L1_Replacement, OI) {
960 i_allocateTBE;
961 dd_issuePUTO;
962 kk_deallocateL1CacheBlock;
963 }
964
965 transition(O, Fwd_GETX, I) {
966 ee_sendDataExclusive;
967 l_popForwardQueue;
968 }
969
970 transition(O, Fwd_GETS) {
971 e_sendData;
972 l_popForwardQueue;
973 }
974
975 transition(O, Fwd_DMA) {
976 e_sendData;
977 ub_dmaUnblockL2Cache;
978 l_popForwardQueue;
979 }
980
981 // Transitions from MM
982 transition({MM, MM_W}, {Load, Ifetch}) {
983 h_load_hit;
984 k_popMandatoryQueue;
985 }
986
987 transition({MM, MM_W}, Store) {
988 hh_store_hit;
989 k_popMandatoryQueue;
990 }
991
992 transition(MM, L1_Replacement, MI) {
993 i_allocateTBE;
994 d_issuePUTX;
995 kk_deallocateL1CacheBlock;
996 }
997
998 transition(MM, Fwd_GETX, I) {
999 ee_sendDataExclusive;
1000 l_popForwardQueue;
1001 }
1002
1003 transition(MM, Fwd_GETS, I) {
1004 ee_sendDataExclusive;
1005 l_popForwardQueue;
1006 }
1007
1008 transition(MM, Fwd_DMA, MM) {
1009 e_sendData;
1010 ub_dmaUnblockL2Cache;
1011 l_popForwardQueue;
1012 }
1013
1014 // Transitions from M
1015 transition({M, M_W}, {Load, Ifetch}) {
1016 h_load_hit;
1017 k_popMandatoryQueue;
1018 }
1019
1020 transition(M, Store, MM) {
1021 hh_store_hit;
1022 k_popMandatoryQueue;
1023 }
1024
1025 transition(M_W, Store, MM_W) {
1026 hh_store_hit;
1027 k_popMandatoryQueue;
1028 }
1029
1030 transition(M, L1_Replacement, MI) {
1031 i_allocateTBE;
1032 d_issuePUTX;
1033 kk_deallocateL1CacheBlock;
1034 }
1035
1036 transition(M, Fwd_GETX, I) {
1037 // e_sendData;
1038 ee_sendDataExclusive;
1039 l_popForwardQueue;
1040 }
1041
1042 transition(M, Fwd_GETS, O) {
1043 e_sendData;
1044 l_popForwardQueue;
1045 }
1046
1047 transition(M, Fwd_DMA) {
1048 e_sendData;
1049 ub_dmaUnblockL2Cache;
1050 l_popForwardQueue;
1051 }
1052
1053 // Transitions from IM
1054
1055 transition(IM, Inv) {
1056 f_sendAck;
1057 l_popForwardQueue;
1058 }
1059
1060 transition(IM, Ack) {
1061 m_decrementNumberOfMessages;
1062 o_checkForCompletion;
1063 n_popResponseQueue;
1064 }
1065
1066 transition(IM, {Exclusive_Data, Data}, OM) {
1067 u_writeDataToCache;
1068 m_decrementNumberOfMessages;
1069 o_checkForCompletion;
1070 n_popResponseQueue;
1071 }
1072
1073 // Transitions from SM
1074 transition(SM, Inv, IM) {
1075 f_sendAck;
1076 l_popForwardQueue;
1077 }
1078
1079 transition(SM, Ack) {
1080 m_decrementNumberOfMessages;
1081 o_checkForCompletion;
1082 n_popResponseQueue;
1083 }
1084
1085 transition(SM, {Data, Exclusive_Data}, OM) {
1086 // v_writeDataToCacheVerify;
1087 m_decrementNumberOfMessages;
1088 o_checkForCompletion;
1089 n_popResponseQueue;
1090 }
1091
1092 transition(SM, Fwd_GETS) {
1093 e_sendData;
1094 l_popForwardQueue;
1095 }
1096
1097 transition(SM, Fwd_DMA) {
1098 e_sendData;
1099 ub_dmaUnblockL2Cache;
1100 l_popForwardQueue;
1101 }
1102
1103 // Transitions from OM
1104 transition(OM, Own_GETX) {
1105 mm_decrementNumberOfMessages;
1106 o_checkForCompletion;
1107 l_popForwardQueue;
1108 }
1109
1110
1111 // transition(OM, Fwd_GETX, OMF) {
1112 transition(OM, Fwd_GETX, IM) {
1113 ee_sendDataExclusive;
1114 l_popForwardQueue;
1115 }
1116
1117 transition(OM, Fwd_GETS) {
1118 e_sendData;
1119 l_popForwardQueue;
1120 }
1121
1122 transition(OM, Fwd_DMA) {
1123 e_sendData;
1124 ub_dmaUnblockL2Cache;
1125 l_popForwardQueue;
1126 }
1127
1128 //transition({OM, OMF}, Ack) {
1129 transition(OM, Ack) {
1130 m_decrementNumberOfMessages;
1131 o_checkForCompletion;
1132 n_popResponseQueue;
1133 }
1134
1135 transition(OM, All_acks, MM_W) {
1136 hh_store_hit;
1137 gg_sendUnblockExclusive;
1138 s_deallocateTBE;
1139 o_scheduleUseTimeout;
1140 j_popTriggerQueue;
1141 }
1142
1143 transition(MM_W, Use_Timeout, MM) {
1144 jj_unsetUseTimer;
1145 }
1146
1147 // Transitions from IS
1148
1149 transition(IS, Inv) {
1150 f_sendAck;
1151 l_popForwardQueue;
1152 }
1153
1154 transition(IS, Data, S) {
1155 u_writeDataToCache;
1156 m_decrementNumberOfMessages;
1157 h_load_hit;
1158 g_sendUnblock;
1159 s_deallocateTBE;
1160 n_popResponseQueue;
1161 }
1162
1163 transition(IS, Exclusive_Data, M_W) {
1164 u_writeDataToCache;
1165 m_decrementNumberOfMessages;
1166 h_load_hit;
1167 gg_sendUnblockExclusive;
1168 o_scheduleUseTimeout;
1169 s_deallocateTBE;
1170 n_popResponseQueue;
1171 }
1172
1173 transition(M_W, Use_Timeout, M) {
1174 jj_unsetUseTimer;
1175 }
1176
1177 // Transitions from OI/MI
1178
1179 transition(MI, Fwd_GETS, OI) {
1180 q_sendDataFromTBEToCache;
1181 l_popForwardQueue;
1182 }
1183
1184 transition(MI, Fwd_DMA) {
1185 q_sendDataFromTBEToCache;
1186 ub_dmaUnblockL2Cache;
1187 l_popForwardQueue;
1188 }
1189
1190 transition(MI, Fwd_GETX, II) {
1191 q_sendExclusiveDataFromTBEToCache;
1192 l_popForwardQueue;
1193 }
1194
1195 transition({SI, OI}, Fwd_GETS) {
1196 q_sendDataFromTBEToCache;
1197 l_popForwardQueue;
1198 }
1199
1200 transition({SI, OI}, Fwd_DMA) {
1201 q_sendDataFromTBEToCache;
1202 ub_dmaUnblockL2Cache;
1203 l_popForwardQueue;
1204 }
1205
1206 transition(OI, Fwd_GETX, II) {
1207 q_sendExclusiveDataFromTBEToCache;
1208 l_popForwardQueue;
1209 }
1210
1211 transition({SI, OI, MI}, Writeback_Ack_Data, I) {
1212 qq_sendWBDataFromTBEToL2; // always send data
1213 s_deallocateTBE;
1214 l_popForwardQueue;
1215 }
1216
1217 transition({SI, OI, MI}, Writeback_Ack, I) {
1218 g_sendUnblock;
1219 s_deallocateTBE;
1220 l_popForwardQueue;
1221 }
1222
1223 transition({MI, OI}, Writeback_Nack, OI) {
1224 // FIXME: This might cause deadlock by re-using the writeback
1225 // channel, we should handle this case differently.
1226 dd_issuePUTO;
1227 l_popForwardQueue;
1228 }
1229
1230 // Transitions from II
1231 transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
1232 g_sendUnblock;
1233 s_deallocateTBE;
1234 l_popForwardQueue;
1235 }
1236
1237 // transition({II, SI}, Writeback_Nack, I) {
1238 transition(II, Writeback_Nack, I) {
1239 s_deallocateTBE;
1240 l_popForwardQueue;
1241 }
1242
1243 transition(SI, Writeback_Nack) {
1244 dd_issuePUTS;
1245 l_popForwardQueue;
1246 }
1247
1248 transition(II, Inv) {
1249 f_sendAck;
1250 l_popForwardQueue;
1251 }
1252
1253 transition(SI, Inv, II) {
1254 f_sendAck;
1255 l_popForwardQueue;
1256 }
1257 }