mem: Fix guest corruption when caches handle uncacheable accesses
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L1Cache, "Directory protocol")
36 : Sequencer * sequencer,
37 CacheMemory * L1IcacheMemory,
38 CacheMemory * L1DcacheMemory,
39 int l2_select_num_bits,
40 int request_latency = 2,
41 bool send_evictions
42 {
43
44 // NODE L1 CACHE
45 // From this node's L1 cache TO the network
46 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
47 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
48 // a local L1 -> this L2 bank
49 MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
50 // MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
51
52
53 // To this node's L1 cache FROM the network
54 // a L2 bank -> this L1
55 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
56 // a L2 bank -> this L1
57 MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
58
59
60
61 // STATES
62 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
63 // Base states
64 I, AccessPermission:Invalid, desc="Idle";
65 S, AccessPermission:Read_Only, desc="Shared";
66 O, AccessPermission:Read_Only, desc="Owned";
67 M, AccessPermission:Read_Only, desc="Modified (dirty)";
68 M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
69 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
70 MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
71
72 // Transient States
73 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
74 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
75 OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
76 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
77 SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
78 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
79 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
80 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
81 }
82
83 // EVENTS
84 enumeration(Event, desc="Cache events") {
85 Load, desc="Load request from the processor";
86 Ifetch, desc="I-fetch request from the processor";
87 Store, desc="Store request from the processor";
88 L1_Replacement, desc="Replacement";
89
90 // Requests
91 Own_GETX, desc="We observe our own GetX forwarded back to us";
92 Fwd_GETX, desc="A GetX from another processor";
93 Fwd_GETS, desc="A GetS from another processor";
94 Fwd_DMA, desc="A GetS from another processor";
95 Inv, desc="Invalidations from the directory";
96
97 // Responses
98 Ack, desc="Received an ack message";
99 Data, desc="Received a data message, responder has a shared copy";
100 Exclusive_Data, desc="Received a data message";
101
102 Writeback_Ack, desc="Writeback O.K. from directory";
103 Writeback_Ack_Data, desc="Writeback O.K. from directory";
104 Writeback_Nack, desc="Writeback not O.K. from directory";
105
106 // Triggers
107 All_acks, desc="Received all required data and message acks";
108
109 // Timeouts
110 Use_Timeout, desc="lockout period ended";
111 }
112
113 // TYPES
114
115 // CacheEntry
116 structure(Entry, desc="...", interface="AbstractCacheEntry") {
117 State CacheState, desc="cache state";
118 bool Dirty, desc="Is the data dirty (different than memory)?";
119 DataBlock DataBlk, desc="data for the block";
120 }
121
122 // TBE fields
123 structure(TBE, desc="...") {
124 Address Address, desc="Physical address for this TBE";
125 State TBEState, desc="Transient state";
126 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
129 }
130
131 structure(TBETable, external ="yes") {
132 TBE lookup(Address);
133 void allocate(Address);
134 void deallocate(Address);
135 bool isPresent(Address);
136 }
137
138 void set_cache_entry(AbstractCacheEntry b);
139 void unset_cache_entry();
140 void set_tbe(TBE b);
141 void unset_tbe();
142
143 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
144
145 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
146 TimerTable useTimerTable;
147 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
148
149 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
150 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
151 if(is_valid(L1Dcache_entry)) {
152 return L1Dcache_entry;
153 }
154
155 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
156 return L1Icache_entry;
157 }
158
159 Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
160 return static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
161 }
162
163 Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
164 return static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
165 }
166
167 State getState(TBE tbe, Entry cache_entry, Address addr) {
168 if(is_valid(tbe)) {
169 return tbe.TBEState;
170 } else if (is_valid(cache_entry)) {
171 return cache_entry.CacheState;
172 }
173 return State:I;
174 }
175
176 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
177 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
178
179 if (is_valid(tbe)) {
180 tbe.TBEState := state;
181 }
182
183 if (is_valid(cache_entry)) {
184 if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
185 ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
186 ((cache_entry.CacheState != State:S) && (state == State:S)) ||
187 ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
188
189 cache_entry.CacheState := state;
190 sequencer.checkCoherence(addr);
191 }
192 else {
193 cache_entry.CacheState := state;
194 }
195 }
196 }
197
198 AccessPermission getAccessPermission(Address addr) {
199 TBE tbe := TBEs[addr];
200 if(is_valid(tbe)) {
201 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
202 return L1Cache_State_to_permission(tbe.TBEState);
203 }
204
205 Entry cache_entry := getCacheEntry(addr);
206 if(is_valid(cache_entry)) {
207 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
208 return L1Cache_State_to_permission(cache_entry.CacheState);
209 }
210
211 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
212 return AccessPermission:NotPresent;
213 }
214
215 void setAccessPermission(Entry cache_entry, Address addr, State state) {
216 if (is_valid(cache_entry)) {
217 cache_entry.changePermission(L1Cache_State_to_permission(state));
218 }
219 }
220
221 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
222 Entry cache_entry := getCacheEntry(addr);
223 if(is_valid(cache_entry)) {
224 return cache_entry.DataBlk;
225 }
226
227 TBE tbe := TBEs[addr];
228 if(is_valid(tbe)) {
229 return tbe.DataBlk;
230 }
231
232 error("Data block missing!");
233 }
234
235 Event mandatory_request_type_to_event(RubyRequestType type) {
236 if (type == RubyRequestType:LD) {
237 return Event:Load;
238 } else if (type == RubyRequestType:IFETCH) {
239 return Event:Ifetch;
240 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
241 return Event:Store;
242 } else {
243 error("Invalid RubyRequestType");
244 }
245 }
246
247 MessageBuffer triggerQueue, ordered="true";
248
249 // ** OUT_PORTS **
250
251 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
252 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
253 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
254
255 // ** IN_PORTS **
256
257 // Use Timer
258 in_port(useTimerTable_in, Address, useTimerTable) {
259 if (useTimerTable_in.isReady()) {
260 trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
261 getCacheEntry(useTimerTable.readyAddress()),
262 TBEs[useTimerTable.readyAddress()]);
263 }
264 }
265
266 // Trigger Queue
267 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
268 if (triggerQueue_in.isReady()) {
269 peek(triggerQueue_in, TriggerMsg) {
270 if (in_msg.Type == TriggerType:ALL_ACKS) {
271 trigger(Event:All_acks, in_msg.Address,
272 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
273 } else {
274 error("Unexpected message");
275 }
276 }
277 }
278 }
279
280 // Nothing from the request network
281
282 // Request Network
283 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
284 if (requestNetwork_in.isReady()) {
285 peek(requestNetwork_in, RequestMsg, block_on="Address") {
286 assert(in_msg.Destination.isElement(machineID));
287 DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
288
289 if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
290 if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
291 trigger(Event:Own_GETX, in_msg.Address,
292 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
293 } else {
294 trigger(Event:Fwd_GETX, in_msg.Address,
295 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
296 }
297 } else if (in_msg.Type == CoherenceRequestType:GETS) {
298 trigger(Event:Fwd_GETS, in_msg.Address,
299 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
300 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
301 trigger(Event:Fwd_DMA, in_msg.Address,
302 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
303 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
304 trigger(Event:Writeback_Ack, in_msg.Address,
305 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
306 } else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
307 trigger(Event:Writeback_Ack_Data, in_msg.Address,
308 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
309 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
310 trigger(Event:Writeback_Nack, in_msg.Address,
311 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
312 } else if (in_msg.Type == CoherenceRequestType:INV) {
313 trigger(Event:Inv, in_msg.Address,
314 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
315 } else {
316 error("Unexpected message");
317 }
318 }
319 }
320 }
321
322 // Response Network
323 in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
324 if (responseToL1Cache_in.isReady()) {
325 peek(responseToL1Cache_in, ResponseMsg, block_on="Address") {
326 if (in_msg.Type == CoherenceResponseType:ACK) {
327 trigger(Event:Ack, in_msg.Address,
328 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
329 } else if (in_msg.Type == CoherenceResponseType:DATA) {
330 trigger(Event:Data, in_msg.Address,
331 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
332 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
333 trigger(Event:Exclusive_Data, in_msg.Address,
334 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
335 } else {
336 error("Unexpected message");
337 }
338 }
339 }
340 }
341
342 // Nothing from the unblock network
343 // Mandatory Queue betweens Node's CPU and it's L1 caches
344 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
345 if (mandatoryQueue_in.isReady()) {
346 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
347
348 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
349
350 if (in_msg.Type == RubyRequestType:IFETCH) {
351 // ** INSTRUCTION ACCESS ***
352
353 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
354 if (is_valid(L1Icache_entry)) {
355 // The tag matches for the L1, so the L1 asks the L2 for it.
356 trigger(mandatory_request_type_to_event(in_msg.Type),
357 in_msg.LineAddress, L1Icache_entry,
358 TBEs[in_msg.LineAddress]);
359 } else {
360
361 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
362 // Check to see if it is in the OTHER L1
363 if (is_valid(L1Dcache_entry)) {
364 // The block is in the wrong L1, put the request on the queue to the shared L2
365 trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
366 TBEs[in_msg.LineAddress]);
367 }
368 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
369 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
370 trigger(mandatory_request_type_to_event(in_msg.Type),
371 in_msg.LineAddress, L1Icache_entry,
372 TBEs[in_msg.LineAddress]);
373 } else {
374 // No room in the L1, so we need to make room in the L1
375 trigger(Event:L1_Replacement,
376 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
377 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
378 TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
379 }
380 }
381 } else {
382 // *** DATA ACCESS ***
383
384 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
385 if (is_valid(L1Dcache_entry)) {
386 // The tag matches for the L1, so the L1 ask the L2 for it
387 trigger(mandatory_request_type_to_event(in_msg.Type),
388 in_msg.LineAddress, L1Dcache_entry,
389 TBEs[in_msg.LineAddress]);
390 } else {
391
392 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
393 // Check to see if it is in the OTHER L1
394 if (is_valid(L1Icache_entry)) {
395 // The block is in the wrong L1, put the request on the queue to the shared L2
396 trigger(Event:L1_Replacement, in_msg.LineAddress,
397 L1Icache_entry, TBEs[in_msg.LineAddress]);
398 }
399 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
400 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
401 trigger(mandatory_request_type_to_event(in_msg.Type),
402 in_msg.LineAddress, L1Dcache_entry,
403 TBEs[in_msg.LineAddress]);
404 } else {
405 // No room in the L1, so we need to make room in the L1
406 trigger(Event:L1_Replacement,
407 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
408 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
409 TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
410 }
411 }
412 }
413 }
414 }
415 }
416
417
418 // ACTIONS
419
420 action(a_issueGETS, "a", desc="Issue GETS") {
421 peek(mandatoryQueue_in, RubyRequest) {
422 enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
423 out_msg.Address := address;
424 out_msg.Type := CoherenceRequestType:GETS;
425 out_msg.Requestor := machineID;
426 out_msg.RequestorMachine := MachineType:L1Cache;
427 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
428 l2_select_low_bit, l2_select_num_bits));
429 out_msg.MessageSize := MessageSizeType:Request_Control;
430 out_msg.AccessMode := in_msg.AccessMode;
431 out_msg.Prefetch := in_msg.Prefetch;
432 }
433 }
434 }
435
436 action(b_issueGETX, "b", desc="Issue GETX") {
437 peek(mandatoryQueue_in, RubyRequest) {
438 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
439 out_msg.Address := address;
440 out_msg.Type := CoherenceRequestType:GETX;
441 out_msg.Requestor := machineID;
442 out_msg.RequestorMachine := MachineType:L1Cache;
443 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
444 l2_select_low_bit, l2_select_num_bits));
445 out_msg.MessageSize := MessageSizeType:Request_Control;
446 out_msg.AccessMode := in_msg.AccessMode;
447 out_msg.Prefetch := in_msg.Prefetch;
448 }
449 }
450 }
451
452 action(d_issuePUTX, "d", desc="Issue PUTX") {
453 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
454 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
455 out_msg.Address := address;
456 out_msg.Type := CoherenceRequestType:PUTX;
457 out_msg.Requestor := machineID;
458 out_msg.RequestorMachine := MachineType:L1Cache;
459 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
460 l2_select_low_bit, l2_select_num_bits));
461 out_msg.MessageSize := MessageSizeType:Writeback_Control;
462 }
463 }
464
465 action(dd_issuePUTO, "\d", desc="Issue PUTO") {
466 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
467 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
468 out_msg.Address := address;
469 out_msg.Type := CoherenceRequestType:PUTO;
470 out_msg.Requestor := machineID;
471 out_msg.RequestorMachine := MachineType:L1Cache;
472 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
473 l2_select_low_bit, l2_select_num_bits));
474 out_msg.MessageSize := MessageSizeType:Writeback_Control;
475 }
476 }
477
478 action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
479 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
480 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
481 out_msg.Address := address;
482 out_msg.Type := CoherenceRequestType:PUTS;
483 out_msg.Requestor := machineID;
484 out_msg.RequestorMachine := MachineType:L1Cache;
485 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
486 l2_select_low_bit, l2_select_num_bits));
487 out_msg.MessageSize := MessageSizeType:Writeback_Control;
488 }
489 }
490
491 action(e_sendData, "e", desc="Send data from cache to requestor") {
492 peek(requestNetwork_in, RequestMsg) {
493 assert(is_valid(cache_entry));
494 if (in_msg.RequestorMachine == MachineType:L2Cache) {
495 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
496 out_msg.Address := address;
497 out_msg.Type := CoherenceResponseType:DATA;
498 out_msg.Sender := machineID;
499 out_msg.SenderMachine := MachineType:L1Cache;
500 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
501 l2_select_low_bit, l2_select_num_bits));
502 out_msg.DataBlk := cache_entry.DataBlk;
503 // out_msg.Dirty := cache_entry.Dirty;
504 out_msg.Dirty := false;
505 out_msg.Acks := in_msg.Acks;
506 out_msg.MessageSize := MessageSizeType:Response_Data;
507 }
508 DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Address);
509 }
510 else {
511 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
512 out_msg.Address := address;
513 out_msg.Type := CoherenceResponseType:DATA;
514 out_msg.Sender := machineID;
515 out_msg.SenderMachine := MachineType:L1Cache;
516 out_msg.Destination.add(in_msg.Requestor);
517 out_msg.DataBlk := cache_entry.DataBlk;
518 // out_msg.Dirty := cache_entry.Dirty;
519 out_msg.Dirty := false;
520 out_msg.Acks := in_msg.Acks;
521 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
522 }
523 DPRINTF(RubySlicc, "Sending data to L1\n");
524 }
525 }
526 }
527
528 action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
529 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
530 assert(is_valid(cache_entry));
531 out_msg.Address := address;
532 out_msg.Type := CoherenceResponseType:DATA;
533 out_msg.Sender := machineID;
534 out_msg.SenderMachine := MachineType:L1Cache;
535 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
536 l2_select_low_bit, l2_select_num_bits));
537 out_msg.DataBlk := cache_entry.DataBlk;
538 out_msg.Dirty := cache_entry.Dirty;
539 out_msg.Acks := 0; // irrelevant
540 out_msg.MessageSize := MessageSizeType:Response_Data;
541 }
542 }
543
544 action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
545 peek(requestNetwork_in, RequestMsg) {
546 assert(is_valid(cache_entry));
547 if (in_msg.RequestorMachine == MachineType:L2Cache) {
548 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
549 out_msg.Address := address;
550 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
551 out_msg.Sender := machineID;
552 out_msg.SenderMachine := MachineType:L1Cache;
553 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
554 l2_select_low_bit, l2_select_num_bits));
555 out_msg.DataBlk := cache_entry.DataBlk;
556 out_msg.Dirty := cache_entry.Dirty;
557 out_msg.Acks := in_msg.Acks;
558 out_msg.MessageSize := MessageSizeType:Response_Data;
559 }
560 DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
561 }
562 else {
563 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
564 out_msg.Address := address;
565 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
566 out_msg.Sender := machineID;
567 out_msg.SenderMachine := MachineType:L1Cache;
568 out_msg.Destination.add(in_msg.Requestor);
569 out_msg.DataBlk := cache_entry.DataBlk;
570 out_msg.Dirty := cache_entry.Dirty;
571 out_msg.Acks := in_msg.Acks;
572 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
573 }
574 DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
575 }
576 }
577 }
578
579 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
580 peek(requestNetwork_in, RequestMsg) {
581 if (in_msg.RequestorMachine == MachineType:L1Cache) {
582 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
583 out_msg.Address := address;
584 out_msg.Type := CoherenceResponseType:ACK;
585 out_msg.Sender := machineID;
586 out_msg.SenderMachine := MachineType:L1Cache;
587 out_msg.Destination.add(in_msg.Requestor);
588 out_msg.Acks := 0 - 1; // -1
589 out_msg.MessageSize := MessageSizeType:Response_Control;
590 }
591 }
592 else {
593 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
594 out_msg.Address := address;
595 out_msg.Type := CoherenceResponseType:ACK;
596 out_msg.Sender := machineID;
597 out_msg.SenderMachine := MachineType:L1Cache;
598 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
599 l2_select_low_bit, l2_select_num_bits));
600 out_msg.Acks := 0 - 1; // -1
601 out_msg.MessageSize := MessageSizeType:Response_Control;
602 }
603 }
604 }
605 }
606
607 action(g_sendUnblock, "g", desc="Send unblock to memory") {
608 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
609 out_msg.Address := address;
610 out_msg.Type := CoherenceResponseType:UNBLOCK;
611 out_msg.Sender := machineID;
612 out_msg.SenderMachine := MachineType:L1Cache;
613 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
614 l2_select_low_bit, l2_select_num_bits));
615 out_msg.MessageSize := MessageSizeType:Unblock_Control;
616 }
617 }
618
619 action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
620 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
621 out_msg.Address := address;
622 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
623 out_msg.Sender := machineID;
624 out_msg.SenderMachine := MachineType:L1Cache;
625 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
626 l2_select_low_bit, l2_select_num_bits));
627 out_msg.MessageSize := MessageSizeType:Unblock_Control;
628 }
629 }
630
631 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
632 assert(is_valid(cache_entry));
633 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
634 sequencer.readCallback(address, cache_entry.DataBlk);
635 }
636
637 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
638 assert(is_valid(cache_entry));
639 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
640 sequencer.writeCallback(address, cache_entry.DataBlk);
641 cache_entry.Dirty := true;
642 }
643
644 action(i_allocateTBE, "i", desc="Allocate TBE") {
645 check_allocate(TBEs);
646 TBEs.allocate(address);
647 set_tbe(TBEs[address]);
648 assert(is_valid(cache_entry));
649 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
650 tbe.Dirty := cache_entry.Dirty;
651 }
652
653 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
654 triggerQueue_in.dequeue();
655 }
656
657 action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
658 useTimerTable.unset(address);
659 }
660
661 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
662 mandatoryQueue_in.dequeue();
663 }
664
665 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
666 requestNetwork_in.dequeue();
667 }
668
669 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
670 peek(responseToL1Cache_in, ResponseMsg) {
671 assert(is_valid(tbe));
672 DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
673 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
674 }
675 }
676
677 action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
678 peek(requestNetwork_in, RequestMsg) {
679 assert(is_valid(tbe));
680 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
681 }
682 }
683
684 action(n_popResponseQueue, "n", desc="Pop response queue") {
685 responseToL1Cache_in.dequeue();
686 }
687
688 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
689 assert(is_valid(tbe));
690 if (tbe.NumPendingMsgs == 0) {
691 enqueue(triggerQueue_out, TriggerMsg) {
692 out_msg.Address := address;
693 out_msg.Type := TriggerType:ALL_ACKS;
694 }
695 }
696 }
697
698 action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
699 useTimerTable.set(address, 50);
700 }
701
702 action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
703 peek(requestNetwork_in, RequestMsg) {
704 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
705 out_msg.Address := address;
706 out_msg.Type := CoherenceResponseType:DMA_ACK;
707 out_msg.Sender := machineID;
708 out_msg.SenderMachine := MachineType:L1Cache;
709 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
710 l2_select_low_bit, l2_select_num_bits));
711 out_msg.Dirty := false;
712 out_msg.Acks := 1;
713 out_msg.MessageSize := MessageSizeType:Response_Control;
714 }
715 }
716 }
717
718 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
719 peek(requestNetwork_in, RequestMsg) {
720 assert(is_valid(tbe));
721 if (in_msg.RequestorMachine == MachineType:L1Cache ||
722 in_msg.RequestorMachine == MachineType:DMA) {
723 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
724 out_msg.Address := address;
725 out_msg.Type := CoherenceResponseType:DATA;
726 out_msg.Sender := machineID;
727 out_msg.SenderMachine := MachineType:L1Cache;
728 out_msg.Destination.add(in_msg.Requestor);
729 out_msg.DataBlk := tbe.DataBlk;
730 // out_msg.Dirty := tbe.Dirty;
731 out_msg.Dirty := false;
732 out_msg.Acks := in_msg.Acks;
733 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
734 }
735 }
736 else {
737 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
738 out_msg.Address := address;
739 out_msg.Type := CoherenceResponseType:DATA;
740 out_msg.Sender := machineID;
741 out_msg.SenderMachine := MachineType:L1Cache;
742 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
743 l2_select_low_bit, l2_select_num_bits));
744 out_msg.DataBlk := tbe.DataBlk;
745 // out_msg.Dirty := tbe.Dirty;
746 out_msg.Dirty := false;
747 out_msg.Acks := in_msg.Acks;
748 out_msg.MessageSize := MessageSizeType:Response_Data;
749 }
750 }
751 }
752 }
753
754 action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
755 peek(requestNetwork_in, RequestMsg) {
756 assert(is_valid(tbe));
757 if (in_msg.RequestorMachine == MachineType:L1Cache) {
758 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
759 out_msg.Address := address;
760 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
761 out_msg.Sender := machineID;
762 out_msg.SenderMachine := MachineType:L1Cache;
763 out_msg.Destination.add(in_msg.Requestor);
764 out_msg.DataBlk := tbe.DataBlk;
765 out_msg.Dirty := tbe.Dirty;
766 out_msg.Acks := in_msg.Acks;
767 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
768 }
769 }
770 else {
771 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
772 out_msg.Address := address;
773 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
774 out_msg.Sender := machineID;
775 out_msg.SenderMachine := MachineType:L1Cache;
776 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
777 l2_select_low_bit, l2_select_num_bits));
778 out_msg.DataBlk := tbe.DataBlk;
779 out_msg.Dirty := tbe.Dirty;
780 out_msg.Acks := in_msg.Acks;
781 out_msg.MessageSize := MessageSizeType:Response_Data;
782 }
783 }
784 }
785 }
786
787 // L2 will usually request data for a writeback
788 action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
789 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
790 assert(is_valid(tbe));
791 out_msg.Address := address;
792 out_msg.Sender := machineID;
793 out_msg.SenderMachine := MachineType:L1Cache;
794 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
795 l2_select_low_bit, l2_select_num_bits));
796 out_msg.Dirty := tbe.Dirty;
797 if (tbe.Dirty) {
798 out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
799 } else {
800 out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
801 }
802 out_msg.DataBlk := tbe.DataBlk;
803 out_msg.MessageSize := MessageSizeType:Writeback_Data;
804 }
805 }
806
807 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
808 TBEs.deallocate(address);
809 unset_tbe();
810 }
811
812 action(u_writeDataToCache, "u", desc="Write data to cache") {
813 peek(responseToL1Cache_in, ResponseMsg) {
814 assert(is_valid(cache_entry));
815 cache_entry.DataBlk := in_msg.DataBlk;
816 cache_entry.Dirty := in_msg.Dirty;
817
818 if (in_msg.Type == CoherenceResponseType:DATA) {
819 //assert(in_msg.Dirty == false);
820 }
821 }
822 }
823
824 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
825 peek(responseToL1Cache_in, ResponseMsg) {
826 assert(is_valid(cache_entry));
827 assert(cache_entry.DataBlk == in_msg.DataBlk);
828 cache_entry.DataBlk := in_msg.DataBlk;
829 cache_entry.Dirty := in_msg.Dirty;
830 }
831 }
832
833 action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
834 if (L1DcacheMemory.isTagPresent(address)) {
835 L1DcacheMemory.deallocate(address);
836 } else {
837 L1IcacheMemory.deallocate(address);
838 }
839 unset_cache_entry();
840 }
841
842 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
843 if ((is_invalid(cache_entry))) {
844 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
845 }
846 }
847
848 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
849 if ((is_invalid(cache_entry))) {
850 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
851 }
852 }
853
854 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
855 if (send_evictions) {
856 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
857 sequencer.evictionCallback(address);
858 }
859 }
860
861 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
862 peek(mandatoryQueue_in, RubyRequest) {
863 // profile_miss(in_msg);
864 }
865 }
866
867 action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
868 requestNetwork_in.recycle();
869 }
870
871 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
872 mandatoryQueue_in.recycle();
873 }
874
875 //*****************************************************
876 // TRANSITIONS
877 //*****************************************************
878
879 // Transitions for Load/Store/L2_Replacement from transient states
880 transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
881 zz_recycleMandatoryQueue;
882 }
883
884 transition({M_W, MM_W}, L1_Replacement) {
885 zz_recycleMandatoryQueue;
886 }
887
888 transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
889 z_recycleRequestQueue;
890 }
891
892 transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
893 zz_recycleMandatoryQueue;
894 }
895
896 // Transitions from Idle
897 transition(I, Load, IS) {
898 ii_allocateL1DCacheBlock;
899 i_allocateTBE;
900 a_issueGETS;
901 // uu_profileMiss;
902 k_popMandatoryQueue;
903 }
904
905 transition(I, Ifetch, IS) {
906 jj_allocateL1ICacheBlock;
907 i_allocateTBE;
908 a_issueGETS;
909 // uu_profileMiss;
910 k_popMandatoryQueue;
911 }
912
913 transition(I, Store, IM) {
914 ii_allocateL1DCacheBlock;
915 i_allocateTBE;
916 b_issueGETX;
917 // uu_profileMiss;
918 k_popMandatoryQueue;
919 }
920
921 transition(I, L1_Replacement) {
922 kk_deallocateL1CacheBlock;
923 }
924
925 transition(I, Inv) {
926 f_sendAck;
927 l_popForwardQueue;
928 }
929
930 // Transitions from Shared
931 transition({S, SM}, {Load, Ifetch}) {
932 h_load_hit;
933 k_popMandatoryQueue;
934 }
935
936 transition(S, Store, SM) {
937 i_allocateTBE;
938 b_issueGETX;
939 // uu_profileMiss;
940 k_popMandatoryQueue;
941 }
942
943 transition(S, L1_Replacement, SI) {
944 i_allocateTBE;
945 dd_issuePUTS;
946 forward_eviction_to_cpu;
947 kk_deallocateL1CacheBlock;
948 }
949
950 transition(S, Inv, I) {
951 f_sendAck;
952 forward_eviction_to_cpu;
953 l_popForwardQueue;
954 }
955
956 transition(S, Fwd_GETS) {
957 e_sendData;
958 l_popForwardQueue;
959 }
960
961 transition(S, Fwd_DMA) {
962 e_sendData;
963 ub_dmaUnblockL2Cache;
964 l_popForwardQueue;
965 }
966
967 // Transitions from Owned
968 transition({O, OM}, {Load, Ifetch}) {
969 h_load_hit;
970 k_popMandatoryQueue;
971 }
972
973 transition(O, Store, OM) {
974 i_allocateTBE;
975 b_issueGETX;
976 // uu_profileMiss;
977 k_popMandatoryQueue;
978 }
979
980 transition(O, L1_Replacement, OI) {
981 i_allocateTBE;
982 dd_issuePUTO;
983 forward_eviction_to_cpu;
984 kk_deallocateL1CacheBlock;
985 }
986
987 transition(O, Fwd_GETX, I) {
988 ee_sendDataExclusive;
989 forward_eviction_to_cpu;
990 l_popForwardQueue;
991 }
992
993 transition(O, Fwd_GETS) {
994 e_sendData;
995 l_popForwardQueue;
996 }
997
998 transition(O, Fwd_DMA) {
999 e_sendData;
1000 ub_dmaUnblockL2Cache;
1001 l_popForwardQueue;
1002 }
1003
1004 // Transitions from MM
1005 transition({MM, MM_W}, {Load, Ifetch}) {
1006 h_load_hit;
1007 k_popMandatoryQueue;
1008 }
1009
1010 transition({MM, MM_W}, Store) {
1011 hh_store_hit;
1012 k_popMandatoryQueue;
1013 }
1014
1015 transition(MM, L1_Replacement, MI) {
1016 i_allocateTBE;
1017 d_issuePUTX;
1018 forward_eviction_to_cpu;
1019 kk_deallocateL1CacheBlock;
1020 }
1021
1022 transition(MM, Fwd_GETX, I) {
1023 ee_sendDataExclusive;
1024 forward_eviction_to_cpu;
1025 l_popForwardQueue;
1026 }
1027
1028 transition(MM, Fwd_GETS, I) {
1029 ee_sendDataExclusive;
1030 forward_eviction_to_cpu;
1031 l_popForwardQueue;
1032 }
1033
1034 transition(MM, Fwd_DMA, MM) {
1035 e_sendData;
1036 ub_dmaUnblockL2Cache;
1037 l_popForwardQueue;
1038 }
1039
1040 // Transitions from M
1041 transition({M, M_W}, {Load, Ifetch}) {
1042 h_load_hit;
1043 k_popMandatoryQueue;
1044 }
1045
1046 transition(M, Store, MM) {
1047 hh_store_hit;
1048 k_popMandatoryQueue;
1049 }
1050
1051 transition(M_W, Store, MM_W) {
1052 hh_store_hit;
1053 k_popMandatoryQueue;
1054 }
1055
1056 transition(M, L1_Replacement, MI) {
1057 i_allocateTBE;
1058 d_issuePUTX;
1059 forward_eviction_to_cpu;
1060 kk_deallocateL1CacheBlock;
1061 }
1062
1063 transition(M, Fwd_GETX, I) {
1064 // e_sendData;
1065 ee_sendDataExclusive;
1066 forward_eviction_to_cpu;
1067 l_popForwardQueue;
1068 }
1069
1070 transition(M, Fwd_GETS, O) {
1071 e_sendData;
1072 l_popForwardQueue;
1073 }
1074
1075 transition(M, Fwd_DMA) {
1076 e_sendData;
1077 ub_dmaUnblockL2Cache;
1078 l_popForwardQueue;
1079 }
1080
1081 // Transitions from IM
1082
1083 transition(IM, Inv) {
1084 f_sendAck;
1085 l_popForwardQueue;
1086 }
1087
1088 transition(IM, Ack) {
1089 m_decrementNumberOfMessages;
1090 o_checkForCompletion;
1091 n_popResponseQueue;
1092 }
1093
1094 transition(IM, {Exclusive_Data, Data}, OM) {
1095 u_writeDataToCache;
1096 m_decrementNumberOfMessages;
1097 o_checkForCompletion;
1098 n_popResponseQueue;
1099 }
1100
1101 // Transitions from SM
1102 transition(SM, Inv, IM) {
1103 f_sendAck;
1104 forward_eviction_to_cpu;
1105 l_popForwardQueue;
1106 }
1107
1108 transition(SM, Ack) {
1109 m_decrementNumberOfMessages;
1110 o_checkForCompletion;
1111 n_popResponseQueue;
1112 }
1113
1114 transition(SM, {Data, Exclusive_Data}, OM) {
1115 // v_writeDataToCacheVerify;
1116 m_decrementNumberOfMessages;
1117 o_checkForCompletion;
1118 n_popResponseQueue;
1119 }
1120
1121 transition(SM, Fwd_GETS) {
1122 e_sendData;
1123 l_popForwardQueue;
1124 }
1125
1126 transition(SM, Fwd_DMA) {
1127 e_sendData;
1128 ub_dmaUnblockL2Cache;
1129 l_popForwardQueue;
1130 }
1131
1132 // Transitions from OM
1133 transition(OM, Own_GETX) {
1134 mm_decrementNumberOfMessages;
1135 o_checkForCompletion;
1136 l_popForwardQueue;
1137 }
1138
1139
1140 // transition(OM, Fwd_GETX, OMF) {
1141 transition(OM, Fwd_GETX, IM) {
1142 ee_sendDataExclusive;
1143 l_popForwardQueue;
1144 }
1145
1146 transition(OM, Fwd_GETS) {
1147 e_sendData;
1148 l_popForwardQueue;
1149 }
1150
1151 transition(OM, Fwd_DMA) {
1152 e_sendData;
1153 ub_dmaUnblockL2Cache;
1154 l_popForwardQueue;
1155 }
1156
1157 //transition({OM, OMF}, Ack) {
1158 transition(OM, Ack) {
1159 m_decrementNumberOfMessages;
1160 o_checkForCompletion;
1161 n_popResponseQueue;
1162 }
1163
1164 transition(OM, All_acks, MM_W) {
1165 hh_store_hit;
1166 gg_sendUnblockExclusive;
1167 s_deallocateTBE;
1168 o_scheduleUseTimeout;
1169 j_popTriggerQueue;
1170 }
1171
1172 transition(MM_W, Use_Timeout, MM) {
1173 jj_unsetUseTimer;
1174 }
1175
1176 // Transitions from IS
1177
1178 transition(IS, Inv) {
1179 f_sendAck;
1180 l_popForwardQueue;
1181 }
1182
1183 transition(IS, Data, S) {
1184 u_writeDataToCache;
1185 m_decrementNumberOfMessages;
1186 h_load_hit;
1187 g_sendUnblock;
1188 s_deallocateTBE;
1189 n_popResponseQueue;
1190 }
1191
1192 transition(IS, Exclusive_Data, M_W) {
1193 u_writeDataToCache;
1194 m_decrementNumberOfMessages;
1195 h_load_hit;
1196 gg_sendUnblockExclusive;
1197 o_scheduleUseTimeout;
1198 s_deallocateTBE;
1199 n_popResponseQueue;
1200 }
1201
1202 transition(M_W, Use_Timeout, M) {
1203 jj_unsetUseTimer;
1204 }
1205
1206 // Transitions from OI/MI
1207
1208 transition(MI, Fwd_GETS, OI) {
1209 q_sendDataFromTBEToCache;
1210 l_popForwardQueue;
1211 }
1212
1213 transition(MI, Fwd_DMA) {
1214 q_sendDataFromTBEToCache;
1215 ub_dmaUnblockL2Cache;
1216 l_popForwardQueue;
1217 }
1218
1219 transition(MI, Fwd_GETX, II) {
1220 q_sendExclusiveDataFromTBEToCache;
1221 l_popForwardQueue;
1222 }
1223
1224 transition({SI, OI}, Fwd_GETS) {
1225 q_sendDataFromTBEToCache;
1226 l_popForwardQueue;
1227 }
1228
1229 transition({SI, OI}, Fwd_DMA) {
1230 q_sendDataFromTBEToCache;
1231 ub_dmaUnblockL2Cache;
1232 l_popForwardQueue;
1233 }
1234
1235 transition(OI, Fwd_GETX, II) {
1236 q_sendExclusiveDataFromTBEToCache;
1237 l_popForwardQueue;
1238 }
1239
1240 transition({SI, OI, MI}, Writeback_Ack_Data, I) {
1241 qq_sendWBDataFromTBEToL2; // always send data
1242 s_deallocateTBE;
1243 l_popForwardQueue;
1244 }
1245
1246 transition({SI, OI, MI}, Writeback_Ack, I) {
1247 g_sendUnblock;
1248 s_deallocateTBE;
1249 l_popForwardQueue;
1250 }
1251
1252 transition({MI, OI}, Writeback_Nack, OI) {
1253 // FIXME: This might cause deadlock by re-using the writeback
1254 // channel, we should handle this case differently.
1255 dd_issuePUTO;
1256 l_popForwardQueue;
1257 }
1258
1259 // Transitions from II
1260 transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
1261 g_sendUnblock;
1262 s_deallocateTBE;
1263 l_popForwardQueue;
1264 }
1265
1266 // transition({II, SI}, Writeback_Nack, I) {
1267 transition(II, Writeback_Nack, I) {
1268 s_deallocateTBE;
1269 l_popForwardQueue;
1270 }
1271
1272 transition(SI, Writeback_Nack) {
1273 dd_issuePUTS;
1274 l_popForwardQueue;
1275 }
1276
1277 transition(II, Inv) {
1278 f_sendAck;
1279 l_popForwardQueue;
1280 }
1281
1282 transition(SI, Inv, II) {
1283 f_sendAck;
1284 l_popForwardQueue;
1285 }
1286 }