x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L1Cache, "Directory protocol")
36 : Sequencer * sequencer,
37 CacheMemory * L1IcacheMemory,
38 CacheMemory * L1DcacheMemory,
39 int l2_select_num_bits,
40 Cycles request_latency = 2,
41 Cycles use_timeout_latency = 50,
42 bool send_evictions
43 {
44
45 // NODE L1 CACHE
46 // From this node's L1 cache TO the network
47 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
48 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
49 // a local L1 -> this L2 bank
50 MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
51 // MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
52
53
54 // To this node's L1 cache FROM the network
55 // a L2 bank -> this L1
56 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
57 // a L2 bank -> this L1
58 MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
59
60
61
62 // STATES
63 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
64 // Base states
65 I, AccessPermission:Invalid, desc="Idle";
66 S, AccessPermission:Read_Only, desc="Shared";
67 O, AccessPermission:Read_Only, desc="Owned";
68 M, AccessPermission:Read_Only, desc="Modified (dirty)";
69 M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
70 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
71 MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
72
73 // Transient States
74 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
75 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
76 OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
77 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
78 SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
79 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
80 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
81 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
82 }
83
84 // EVENTS
85 enumeration(Event, desc="Cache events") {
86 Load, desc="Load request from the processor";
87 Ifetch, desc="I-fetch request from the processor";
88 Store, desc="Store request from the processor";
89 L1_Replacement, desc="Replacement";
90
91 // Requests
92 Own_GETX, desc="We observe our own GetX forwarded back to us";
93 Fwd_GETX, desc="A GetX from another processor";
94 Fwd_GETS, desc="A GetS from another processor";
95 Fwd_DMA, desc="A GetS from another processor";
96 Inv, desc="Invalidations from the directory";
97
98 // Responses
99 Ack, desc="Received an ack message";
100 Data, desc="Received a data message, responder has a shared copy";
101 Exclusive_Data, desc="Received a data message";
102
103 Writeback_Ack, desc="Writeback O.K. from directory";
104 Writeback_Ack_Data, desc="Writeback O.K. from directory";
105 Writeback_Nack, desc="Writeback not O.K. from directory";
106
107 // Triggers
108 All_acks, desc="Received all required data and message acks";
109
110 // Timeouts
111 Use_Timeout, desc="lockout period ended";
112 }
113
114 // TYPES
115
116 // CacheEntry
117 structure(Entry, desc="...", interface="AbstractCacheEntry") {
118 State CacheState, desc="cache state";
119 bool Dirty, desc="Is the data dirty (different than memory)?";
120 DataBlock DataBlk, desc="data for the block";
121 }
122
123 // TBE fields
124 structure(TBE, desc="...") {
125 Address Address, desc="Physical address for this TBE";
126 State TBEState, desc="Transient state";
127 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
128 bool Dirty, desc="Is the data dirty (different than memory)?";
129 int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
130 }
131
132 structure(TBETable, external ="yes") {
133 TBE lookup(Address);
134 void allocate(Address);
135 void deallocate(Address);
136 bool isPresent(Address);
137 }
138
139 void set_cache_entry(AbstractCacheEntry b);
140 void unset_cache_entry();
141 void set_tbe(TBE b);
142 void unset_tbe();
143
144 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
145
146 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
147 TimerTable useTimerTable;
148 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
149
150 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
151 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
152 if(is_valid(L1Dcache_entry)) {
153 return L1Dcache_entry;
154 }
155
156 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
157 return L1Icache_entry;
158 }
159
160 Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
161 return static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
162 }
163
164 Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
165 return static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
166 }
167
168 State getState(TBE tbe, Entry cache_entry, Address addr) {
169 if(is_valid(tbe)) {
170 return tbe.TBEState;
171 } else if (is_valid(cache_entry)) {
172 return cache_entry.CacheState;
173 }
174 return State:I;
175 }
176
177 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
178 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
179
180 if (is_valid(tbe)) {
181 tbe.TBEState := state;
182 }
183
184 if (is_valid(cache_entry)) {
185 if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
186 ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
187 ((cache_entry.CacheState != State:S) && (state == State:S)) ||
188 ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
189
190 cache_entry.CacheState := state;
191 sequencer.checkCoherence(addr);
192 }
193 else {
194 cache_entry.CacheState := state;
195 }
196 }
197 }
198
199 AccessPermission getAccessPermission(Address addr) {
200 TBE tbe := TBEs[addr];
201 if(is_valid(tbe)) {
202 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
203 return L1Cache_State_to_permission(tbe.TBEState);
204 }
205
206 Entry cache_entry := getCacheEntry(addr);
207 if(is_valid(cache_entry)) {
208 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
209 return L1Cache_State_to_permission(cache_entry.CacheState);
210 }
211
212 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
213 return AccessPermission:NotPresent;
214 }
215
216 void setAccessPermission(Entry cache_entry, Address addr, State state) {
217 if (is_valid(cache_entry)) {
218 cache_entry.changePermission(L1Cache_State_to_permission(state));
219 }
220 }
221
222 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
223 Entry cache_entry := getCacheEntry(addr);
224 if(is_valid(cache_entry)) {
225 return cache_entry.DataBlk;
226 }
227
228 TBE tbe := TBEs[addr];
229 if(is_valid(tbe)) {
230 return tbe.DataBlk;
231 }
232
233 error("Data block missing!");
234 }
235
236 Event mandatory_request_type_to_event(RubyRequestType type) {
237 if (type == RubyRequestType:LD) {
238 return Event:Load;
239 } else if (type == RubyRequestType:IFETCH) {
240 return Event:Ifetch;
241 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
242 return Event:Store;
243 } else {
244 error("Invalid RubyRequestType");
245 }
246 }
247
248 MessageBuffer triggerQueue, ordered="true";
249
250 // ** OUT_PORTS **
251
252 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
253 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
254 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
255
256 // ** IN_PORTS **
257
258 // Use Timer
259 in_port(useTimerTable_in, Address, useTimerTable) {
260 if (useTimerTable_in.isReady()) {
261 trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
262 getCacheEntry(useTimerTable.readyAddress()),
263 TBEs[useTimerTable.readyAddress()]);
264 }
265 }
266
267 // Trigger Queue
268 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
269 if (triggerQueue_in.isReady()) {
270 peek(triggerQueue_in, TriggerMsg) {
271 if (in_msg.Type == TriggerType:ALL_ACKS) {
272 trigger(Event:All_acks, in_msg.Address,
273 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
274 } else {
275 error("Unexpected message");
276 }
277 }
278 }
279 }
280
281 // Nothing from the request network
282
283 // Request Network
284 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
285 if (requestNetwork_in.isReady()) {
286 peek(requestNetwork_in, RequestMsg, block_on="Address") {
287 assert(in_msg.Destination.isElement(machineID));
288 DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
289
290 if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
291 if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
292 trigger(Event:Own_GETX, in_msg.Address,
293 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
294 } else {
295 trigger(Event:Fwd_GETX, in_msg.Address,
296 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
297 }
298 } else if (in_msg.Type == CoherenceRequestType:GETS) {
299 trigger(Event:Fwd_GETS, in_msg.Address,
300 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
301 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
302 trigger(Event:Fwd_DMA, in_msg.Address,
303 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
304 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
305 trigger(Event:Writeback_Ack, in_msg.Address,
306 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
307 } else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
308 trigger(Event:Writeback_Ack_Data, in_msg.Address,
309 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
310 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
311 trigger(Event:Writeback_Nack, in_msg.Address,
312 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
313 } else if (in_msg.Type == CoherenceRequestType:INV) {
314 trigger(Event:Inv, in_msg.Address,
315 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
316 } else {
317 error("Unexpected message");
318 }
319 }
320 }
321 }
322
323 // Response Network
324 in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
325 if (responseToL1Cache_in.isReady()) {
326 peek(responseToL1Cache_in, ResponseMsg, block_on="Address") {
327 if (in_msg.Type == CoherenceResponseType:ACK) {
328 trigger(Event:Ack, in_msg.Address,
329 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
330 } else if (in_msg.Type == CoherenceResponseType:DATA) {
331 trigger(Event:Data, in_msg.Address,
332 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
333 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
334 trigger(Event:Exclusive_Data, in_msg.Address,
335 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
336 } else {
337 error("Unexpected message");
338 }
339 }
340 }
341 }
342
343 // Nothing from the unblock network
344 // Mandatory Queue betweens Node's CPU and it's L1 caches
345 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
346 if (mandatoryQueue_in.isReady()) {
347 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
348
349 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
350
351 if (in_msg.Type == RubyRequestType:IFETCH) {
352 // ** INSTRUCTION ACCESS ***
353
354 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
355 if (is_valid(L1Icache_entry)) {
356 // The tag matches for the L1, so the L1 asks the L2 for it.
357 trigger(mandatory_request_type_to_event(in_msg.Type),
358 in_msg.LineAddress, L1Icache_entry,
359 TBEs[in_msg.LineAddress]);
360 } else {
361
362 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
363 // Check to see if it is in the OTHER L1
364 if (is_valid(L1Dcache_entry)) {
365 // The block is in the wrong L1, put the request on the queue to the shared L2
366 trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
367 TBEs[in_msg.LineAddress]);
368 }
369 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
370 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
371 trigger(mandatory_request_type_to_event(in_msg.Type),
372 in_msg.LineAddress, L1Icache_entry,
373 TBEs[in_msg.LineAddress]);
374 } else {
375 // No room in the L1, so we need to make room in the L1
376 trigger(Event:L1_Replacement,
377 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
378 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
379 TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
380 }
381 }
382 } else {
383 // *** DATA ACCESS ***
384
385 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
386 if (is_valid(L1Dcache_entry)) {
387 // The tag matches for the L1, so the L1 ask the L2 for it
388 trigger(mandatory_request_type_to_event(in_msg.Type),
389 in_msg.LineAddress, L1Dcache_entry,
390 TBEs[in_msg.LineAddress]);
391 } else {
392
393 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
394 // Check to see if it is in the OTHER L1
395 if (is_valid(L1Icache_entry)) {
396 // The block is in the wrong L1, put the request on the queue to the shared L2
397 trigger(Event:L1_Replacement, in_msg.LineAddress,
398 L1Icache_entry, TBEs[in_msg.LineAddress]);
399 }
400 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
401 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
402 trigger(mandatory_request_type_to_event(in_msg.Type),
403 in_msg.LineAddress, L1Dcache_entry,
404 TBEs[in_msg.LineAddress]);
405 } else {
406 // No room in the L1, so we need to make room in the L1
407 trigger(Event:L1_Replacement,
408 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
409 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
410 TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
411 }
412 }
413 }
414 }
415 }
416 }
417
418
419 // ACTIONS
420
421 action(a_issueGETS, "a", desc="Issue GETS") {
422 peek(mandatoryQueue_in, RubyRequest) {
423 enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
424 out_msg.Address := address;
425 out_msg.Type := CoherenceRequestType:GETS;
426 out_msg.Requestor := machineID;
427 out_msg.RequestorMachine := MachineType:L1Cache;
428 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
429 l2_select_low_bit, l2_select_num_bits));
430 out_msg.MessageSize := MessageSizeType:Request_Control;
431 out_msg.AccessMode := in_msg.AccessMode;
432 out_msg.Prefetch := in_msg.Prefetch;
433 }
434 }
435 }
436
437 action(b_issueGETX, "b", desc="Issue GETX") {
438 peek(mandatoryQueue_in, RubyRequest) {
439 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
440 out_msg.Address := address;
441 out_msg.Type := CoherenceRequestType:GETX;
442 out_msg.Requestor := machineID;
443 out_msg.RequestorMachine := MachineType:L1Cache;
444 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
445 l2_select_low_bit, l2_select_num_bits));
446 out_msg.MessageSize := MessageSizeType:Request_Control;
447 out_msg.AccessMode := in_msg.AccessMode;
448 out_msg.Prefetch := in_msg.Prefetch;
449 }
450 }
451 }
452
453 action(d_issuePUTX, "d", desc="Issue PUTX") {
454 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
455 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
456 out_msg.Address := address;
457 out_msg.Type := CoherenceRequestType:PUTX;
458 out_msg.Requestor := machineID;
459 out_msg.RequestorMachine := MachineType:L1Cache;
460 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
461 l2_select_low_bit, l2_select_num_bits));
462 out_msg.MessageSize := MessageSizeType:Writeback_Control;
463 }
464 }
465
466 action(dd_issuePUTO, "\d", desc="Issue PUTO") {
467 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
468 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
469 out_msg.Address := address;
470 out_msg.Type := CoherenceRequestType:PUTO;
471 out_msg.Requestor := machineID;
472 out_msg.RequestorMachine := MachineType:L1Cache;
473 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
474 l2_select_low_bit, l2_select_num_bits));
475 out_msg.MessageSize := MessageSizeType:Writeback_Control;
476 }
477 }
478
479 action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
480 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
481 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
482 out_msg.Address := address;
483 out_msg.Type := CoherenceRequestType:PUTS;
484 out_msg.Requestor := machineID;
485 out_msg.RequestorMachine := MachineType:L1Cache;
486 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
487 l2_select_low_bit, l2_select_num_bits));
488 out_msg.MessageSize := MessageSizeType:Writeback_Control;
489 }
490 }
491
492 action(e_sendData, "e", desc="Send data from cache to requestor") {
493 peek(requestNetwork_in, RequestMsg) {
494 assert(is_valid(cache_entry));
495 if (in_msg.RequestorMachine == MachineType:L2Cache) {
496 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
497 out_msg.Address := address;
498 out_msg.Type := CoherenceResponseType:DATA;
499 out_msg.Sender := machineID;
500 out_msg.SenderMachine := MachineType:L1Cache;
501 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
502 l2_select_low_bit, l2_select_num_bits));
503 out_msg.DataBlk := cache_entry.DataBlk;
504 // out_msg.Dirty := cache_entry.Dirty;
505 out_msg.Dirty := false;
506 out_msg.Acks := in_msg.Acks;
507 out_msg.MessageSize := MessageSizeType:Response_Data;
508 }
509 DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Address);
510 }
511 else {
512 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
513 out_msg.Address := address;
514 out_msg.Type := CoherenceResponseType:DATA;
515 out_msg.Sender := machineID;
516 out_msg.SenderMachine := MachineType:L1Cache;
517 out_msg.Destination.add(in_msg.Requestor);
518 out_msg.DataBlk := cache_entry.DataBlk;
519 // out_msg.Dirty := cache_entry.Dirty;
520 out_msg.Dirty := false;
521 out_msg.Acks := in_msg.Acks;
522 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
523 }
524 DPRINTF(RubySlicc, "Sending data to L1\n");
525 }
526 }
527 }
528
529 action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
530 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
531 assert(is_valid(cache_entry));
532 out_msg.Address := address;
533 out_msg.Type := CoherenceResponseType:DATA;
534 out_msg.Sender := machineID;
535 out_msg.SenderMachine := MachineType:L1Cache;
536 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
537 l2_select_low_bit, l2_select_num_bits));
538 out_msg.DataBlk := cache_entry.DataBlk;
539 out_msg.Dirty := cache_entry.Dirty;
540 out_msg.Acks := 0; // irrelevant
541 out_msg.MessageSize := MessageSizeType:Response_Data;
542 }
543 }
544
545 action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
546 peek(requestNetwork_in, RequestMsg) {
547 assert(is_valid(cache_entry));
548 if (in_msg.RequestorMachine == MachineType:L2Cache) {
549 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
550 out_msg.Address := address;
551 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
552 out_msg.Sender := machineID;
553 out_msg.SenderMachine := MachineType:L1Cache;
554 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
555 l2_select_low_bit, l2_select_num_bits));
556 out_msg.DataBlk := cache_entry.DataBlk;
557 out_msg.Dirty := cache_entry.Dirty;
558 out_msg.Acks := in_msg.Acks;
559 out_msg.MessageSize := MessageSizeType:Response_Data;
560 }
561 DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
562 }
563 else {
564 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
565 out_msg.Address := address;
566 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
567 out_msg.Sender := machineID;
568 out_msg.SenderMachine := MachineType:L1Cache;
569 out_msg.Destination.add(in_msg.Requestor);
570 out_msg.DataBlk := cache_entry.DataBlk;
571 out_msg.Dirty := cache_entry.Dirty;
572 out_msg.Acks := in_msg.Acks;
573 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
574 }
575 DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
576 }
577 }
578 }
579
580 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
581 peek(requestNetwork_in, RequestMsg) {
582 if (in_msg.RequestorMachine == MachineType:L1Cache) {
583 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
584 out_msg.Address := address;
585 out_msg.Type := CoherenceResponseType:ACK;
586 out_msg.Sender := machineID;
587 out_msg.SenderMachine := MachineType:L1Cache;
588 out_msg.Destination.add(in_msg.Requestor);
589 out_msg.Acks := 0 - 1; // -1
590 out_msg.MessageSize := MessageSizeType:Response_Control;
591 }
592 }
593 else {
594 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
595 out_msg.Address := address;
596 out_msg.Type := CoherenceResponseType:ACK;
597 out_msg.Sender := machineID;
598 out_msg.SenderMachine := MachineType:L1Cache;
599 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
600 l2_select_low_bit, l2_select_num_bits));
601 out_msg.Acks := 0 - 1; // -1
602 out_msg.MessageSize := MessageSizeType:Response_Control;
603 }
604 }
605 }
606 }
607
608 action(g_sendUnblock, "g", desc="Send unblock to memory") {
609 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
610 out_msg.Address := address;
611 out_msg.Type := CoherenceResponseType:UNBLOCK;
612 out_msg.Sender := machineID;
613 out_msg.SenderMachine := MachineType:L1Cache;
614 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
615 l2_select_low_bit, l2_select_num_bits));
616 out_msg.MessageSize := MessageSizeType:Unblock_Control;
617 }
618 }
619
620 action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
621 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
622 out_msg.Address := address;
623 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
624 out_msg.Sender := machineID;
625 out_msg.SenderMachine := MachineType:L1Cache;
626 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
627 l2_select_low_bit, l2_select_num_bits));
628 out_msg.MessageSize := MessageSizeType:Unblock_Control;
629 }
630 }
631
632 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
633 assert(is_valid(cache_entry));
634 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
635 sequencer.readCallback(address, cache_entry.DataBlk);
636 }
637
638 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
639 assert(is_valid(cache_entry));
640 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
641 sequencer.writeCallback(address, cache_entry.DataBlk);
642 cache_entry.Dirty := true;
643 }
644
645 action(i_allocateTBE, "i", desc="Allocate TBE") {
646 check_allocate(TBEs);
647 TBEs.allocate(address);
648 set_tbe(TBEs[address]);
649 assert(is_valid(cache_entry));
650 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
651 tbe.Dirty := cache_entry.Dirty;
652 }
653
654 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
655 triggerQueue_in.dequeue();
656 }
657
658 action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
659 useTimerTable.unset(address);
660 }
661
662 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
663 mandatoryQueue_in.dequeue();
664 }
665
666 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
667 requestNetwork_in.dequeue();
668 }
669
670 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
671 peek(responseToL1Cache_in, ResponseMsg) {
672 assert(is_valid(tbe));
673 DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
674 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
675 }
676 }
677
678 action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
679 peek(requestNetwork_in, RequestMsg) {
680 assert(is_valid(tbe));
681 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
682 }
683 }
684
685 action(n_popResponseQueue, "n", desc="Pop response queue") {
686 responseToL1Cache_in.dequeue();
687 }
688
689 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
690 assert(is_valid(tbe));
691 if (tbe.NumPendingMsgs == 0) {
692 enqueue(triggerQueue_out, TriggerMsg) {
693 out_msg.Address := address;
694 out_msg.Type := TriggerType:ALL_ACKS;
695 }
696 }
697 }
698
699 action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
700 useTimerTable.set(address, use_timeout_latency);
701 }
702
703 action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
704 peek(requestNetwork_in, RequestMsg) {
705 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
706 out_msg.Address := address;
707 out_msg.Type := CoherenceResponseType:DMA_ACK;
708 out_msg.Sender := machineID;
709 out_msg.SenderMachine := MachineType:L1Cache;
710 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
711 l2_select_low_bit, l2_select_num_bits));
712 out_msg.Dirty := false;
713 out_msg.Acks := 1;
714 out_msg.MessageSize := MessageSizeType:Response_Control;
715 }
716 }
717 }
718
719 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
720 peek(requestNetwork_in, RequestMsg) {
721 assert(is_valid(tbe));
722 if (in_msg.RequestorMachine == MachineType:L1Cache ||
723 in_msg.RequestorMachine == MachineType:DMA) {
724 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
725 out_msg.Address := address;
726 out_msg.Type := CoherenceResponseType:DATA;
727 out_msg.Sender := machineID;
728 out_msg.SenderMachine := MachineType:L1Cache;
729 out_msg.Destination.add(in_msg.Requestor);
730 out_msg.DataBlk := tbe.DataBlk;
731 // out_msg.Dirty := tbe.Dirty;
732 out_msg.Dirty := false;
733 out_msg.Acks := in_msg.Acks;
734 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
735 }
736 }
737 else {
738 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
739 out_msg.Address := address;
740 out_msg.Type := CoherenceResponseType:DATA;
741 out_msg.Sender := machineID;
742 out_msg.SenderMachine := MachineType:L1Cache;
743 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
744 l2_select_low_bit, l2_select_num_bits));
745 out_msg.DataBlk := tbe.DataBlk;
746 // out_msg.Dirty := tbe.Dirty;
747 out_msg.Dirty := false;
748 out_msg.Acks := in_msg.Acks;
749 out_msg.MessageSize := MessageSizeType:Response_Data;
750 }
751 }
752 }
753 }
754
755 action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
756 peek(requestNetwork_in, RequestMsg) {
757 assert(is_valid(tbe));
758 if (in_msg.RequestorMachine == MachineType:L1Cache) {
759 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
760 out_msg.Address := address;
761 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
762 out_msg.Sender := machineID;
763 out_msg.SenderMachine := MachineType:L1Cache;
764 out_msg.Destination.add(in_msg.Requestor);
765 out_msg.DataBlk := tbe.DataBlk;
766 out_msg.Dirty := tbe.Dirty;
767 out_msg.Acks := in_msg.Acks;
768 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
769 }
770 }
771 else {
772 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
773 out_msg.Address := address;
774 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
775 out_msg.Sender := machineID;
776 out_msg.SenderMachine := MachineType:L1Cache;
777 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
778 l2_select_low_bit, l2_select_num_bits));
779 out_msg.DataBlk := tbe.DataBlk;
780 out_msg.Dirty := tbe.Dirty;
781 out_msg.Acks := in_msg.Acks;
782 out_msg.MessageSize := MessageSizeType:Response_Data;
783 }
784 }
785 }
786 }
787
788 // L2 will usually request data for a writeback
789 action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
790 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
791 assert(is_valid(tbe));
792 out_msg.Address := address;
793 out_msg.Sender := machineID;
794 out_msg.SenderMachine := MachineType:L1Cache;
795 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
796 l2_select_low_bit, l2_select_num_bits));
797 out_msg.Dirty := tbe.Dirty;
798 if (tbe.Dirty) {
799 out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
800 } else {
801 out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
802 }
803 out_msg.DataBlk := tbe.DataBlk;
804 out_msg.MessageSize := MessageSizeType:Writeback_Data;
805 }
806 }
807
808 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
809 TBEs.deallocate(address);
810 unset_tbe();
811 }
812
813 action(u_writeDataToCache, "u", desc="Write data to cache") {
814 peek(responseToL1Cache_in, ResponseMsg) {
815 assert(is_valid(cache_entry));
816 cache_entry.DataBlk := in_msg.DataBlk;
817 cache_entry.Dirty := in_msg.Dirty;
818
819 if (in_msg.Type == CoherenceResponseType:DATA) {
820 //assert(in_msg.Dirty == false);
821 }
822 }
823 }
824
825 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
826 peek(responseToL1Cache_in, ResponseMsg) {
827 assert(is_valid(cache_entry));
828 assert(cache_entry.DataBlk == in_msg.DataBlk);
829 cache_entry.DataBlk := in_msg.DataBlk;
830 cache_entry.Dirty := in_msg.Dirty;
831 }
832 }
833
834 action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
835 if (L1DcacheMemory.isTagPresent(address)) {
836 L1DcacheMemory.deallocate(address);
837 } else {
838 L1IcacheMemory.deallocate(address);
839 }
840 unset_cache_entry();
841 }
842
843 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
844 if ((is_invalid(cache_entry))) {
845 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
846 }
847 }
848
849 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
850 if ((is_invalid(cache_entry))) {
851 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
852 }
853 }
854
855 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
856 if (send_evictions) {
857 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
858 sequencer.evictionCallback(address);
859 }
860 }
861
862 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
863 peek(mandatoryQueue_in, RubyRequest) {
864 // profile_miss(in_msg);
865 }
866 }
867
868 action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
869 requestNetwork_in.recycle();
870 }
871
872 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
873 mandatoryQueue_in.recycle();
874 }
875
876 //*****************************************************
877 // TRANSITIONS
878 //*****************************************************
879
880 // Transitions for Load/Store/L2_Replacement from transient states
881 transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
882 zz_recycleMandatoryQueue;
883 }
884
885 transition({M_W, MM_W}, L1_Replacement) {
886 zz_recycleMandatoryQueue;
887 }
888
889 transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
890 z_recycleRequestQueue;
891 }
892
893 transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
894 zz_recycleMandatoryQueue;
895 }
896
897 // Transitions from Idle
898 transition(I, Load, IS) {
899 ii_allocateL1DCacheBlock;
900 i_allocateTBE;
901 a_issueGETS;
902 // uu_profileMiss;
903 k_popMandatoryQueue;
904 }
905
906 transition(I, Ifetch, IS) {
907 jj_allocateL1ICacheBlock;
908 i_allocateTBE;
909 a_issueGETS;
910 // uu_profileMiss;
911 k_popMandatoryQueue;
912 }
913
914 transition(I, Store, IM) {
915 ii_allocateL1DCacheBlock;
916 i_allocateTBE;
917 b_issueGETX;
918 // uu_profileMiss;
919 k_popMandatoryQueue;
920 }
921
922 transition(I, L1_Replacement) {
923 kk_deallocateL1CacheBlock;
924 }
925
926 transition(I, Inv) {
927 f_sendAck;
928 l_popForwardQueue;
929 }
930
931 // Transitions from Shared
932 transition({S, SM}, {Load, Ifetch}) {
933 h_load_hit;
934 k_popMandatoryQueue;
935 }
936
937 transition(S, Store, SM) {
938 i_allocateTBE;
939 b_issueGETX;
940 // uu_profileMiss;
941 k_popMandatoryQueue;
942 }
943
944 transition(S, L1_Replacement, SI) {
945 i_allocateTBE;
946 dd_issuePUTS;
947 forward_eviction_to_cpu;
948 kk_deallocateL1CacheBlock;
949 }
950
951 transition(S, Inv, I) {
952 f_sendAck;
953 forward_eviction_to_cpu;
954 l_popForwardQueue;
955 }
956
957 transition(S, Fwd_GETS) {
958 e_sendData;
959 l_popForwardQueue;
960 }
961
962 transition(S, Fwd_DMA) {
963 e_sendData;
964 ub_dmaUnblockL2Cache;
965 l_popForwardQueue;
966 }
967
968 // Transitions from Owned
969 transition({O, OM}, {Load, Ifetch}) {
970 h_load_hit;
971 k_popMandatoryQueue;
972 }
973
974 transition(O, Store, OM) {
975 i_allocateTBE;
976 b_issueGETX;
977 // uu_profileMiss;
978 k_popMandatoryQueue;
979 }
980
981 transition(O, L1_Replacement, OI) {
982 i_allocateTBE;
983 dd_issuePUTO;
984 forward_eviction_to_cpu;
985 kk_deallocateL1CacheBlock;
986 }
987
988 transition(O, Fwd_GETX, I) {
989 ee_sendDataExclusive;
990 forward_eviction_to_cpu;
991 l_popForwardQueue;
992 }
993
994 transition(O, Fwd_GETS) {
995 e_sendData;
996 l_popForwardQueue;
997 }
998
999 transition(O, Fwd_DMA) {
1000 e_sendData;
1001 ub_dmaUnblockL2Cache;
1002 l_popForwardQueue;
1003 }
1004
1005 // Transitions from MM
1006 transition({MM, MM_W}, {Load, Ifetch}) {
1007 h_load_hit;
1008 k_popMandatoryQueue;
1009 }
1010
1011 transition({MM, MM_W}, Store) {
1012 hh_store_hit;
1013 k_popMandatoryQueue;
1014 }
1015
1016 transition(MM, L1_Replacement, MI) {
1017 i_allocateTBE;
1018 d_issuePUTX;
1019 forward_eviction_to_cpu;
1020 kk_deallocateL1CacheBlock;
1021 }
1022
1023 transition(MM, Fwd_GETX, I) {
1024 ee_sendDataExclusive;
1025 forward_eviction_to_cpu;
1026 l_popForwardQueue;
1027 }
1028
1029 transition(MM, Fwd_GETS, I) {
1030 ee_sendDataExclusive;
1031 forward_eviction_to_cpu;
1032 l_popForwardQueue;
1033 }
1034
1035 transition(MM, Fwd_DMA, MM) {
1036 e_sendData;
1037 ub_dmaUnblockL2Cache;
1038 l_popForwardQueue;
1039 }
1040
1041 // Transitions from M
1042 transition({M, M_W}, {Load, Ifetch}) {
1043 h_load_hit;
1044 k_popMandatoryQueue;
1045 }
1046
1047 transition(M, Store, MM) {
1048 hh_store_hit;
1049 k_popMandatoryQueue;
1050 }
1051
1052 transition(M_W, Store, MM_W) {
1053 hh_store_hit;
1054 k_popMandatoryQueue;
1055 }
1056
1057 transition(M, L1_Replacement, MI) {
1058 i_allocateTBE;
1059 d_issuePUTX;
1060 forward_eviction_to_cpu;
1061 kk_deallocateL1CacheBlock;
1062 }
1063
1064 transition(M, Fwd_GETX, I) {
1065 // e_sendData;
1066 ee_sendDataExclusive;
1067 forward_eviction_to_cpu;
1068 l_popForwardQueue;
1069 }
1070
1071 transition(M, Fwd_GETS, O) {
1072 e_sendData;
1073 l_popForwardQueue;
1074 }
1075
1076 transition(M, Fwd_DMA) {
1077 e_sendData;
1078 ub_dmaUnblockL2Cache;
1079 l_popForwardQueue;
1080 }
1081
1082 // Transitions from IM
1083
1084 transition(IM, Inv) {
1085 f_sendAck;
1086 l_popForwardQueue;
1087 }
1088
1089 transition(IM, Ack) {
1090 m_decrementNumberOfMessages;
1091 o_checkForCompletion;
1092 n_popResponseQueue;
1093 }
1094
1095 transition(IM, {Exclusive_Data, Data}, OM) {
1096 u_writeDataToCache;
1097 m_decrementNumberOfMessages;
1098 o_checkForCompletion;
1099 n_popResponseQueue;
1100 }
1101
1102 // Transitions from SM
1103 transition(SM, Inv, IM) {
1104 f_sendAck;
1105 forward_eviction_to_cpu;
1106 l_popForwardQueue;
1107 }
1108
1109 transition(SM, Ack) {
1110 m_decrementNumberOfMessages;
1111 o_checkForCompletion;
1112 n_popResponseQueue;
1113 }
1114
1115 transition(SM, {Data, Exclusive_Data}, OM) {
1116 // v_writeDataToCacheVerify;
1117 m_decrementNumberOfMessages;
1118 o_checkForCompletion;
1119 n_popResponseQueue;
1120 }
1121
1122 transition(SM, Fwd_GETS) {
1123 e_sendData;
1124 l_popForwardQueue;
1125 }
1126
1127 transition(SM, Fwd_DMA) {
1128 e_sendData;
1129 ub_dmaUnblockL2Cache;
1130 l_popForwardQueue;
1131 }
1132
1133 // Transitions from OM
1134 transition(OM, Own_GETX) {
1135 mm_decrementNumberOfMessages;
1136 o_checkForCompletion;
1137 l_popForwardQueue;
1138 }
1139
1140
1141 // transition(OM, Fwd_GETX, OMF) {
1142 transition(OM, Fwd_GETX, IM) {
1143 ee_sendDataExclusive;
1144 l_popForwardQueue;
1145 }
1146
1147 transition(OM, Fwd_GETS) {
1148 e_sendData;
1149 l_popForwardQueue;
1150 }
1151
1152 transition(OM, Fwd_DMA) {
1153 e_sendData;
1154 ub_dmaUnblockL2Cache;
1155 l_popForwardQueue;
1156 }
1157
1158 //transition({OM, OMF}, Ack) {
1159 transition(OM, Ack) {
1160 m_decrementNumberOfMessages;
1161 o_checkForCompletion;
1162 n_popResponseQueue;
1163 }
1164
1165 transition(OM, All_acks, MM_W) {
1166 hh_store_hit;
1167 gg_sendUnblockExclusive;
1168 s_deallocateTBE;
1169 o_scheduleUseTimeout;
1170 j_popTriggerQueue;
1171 }
1172
1173 transition(MM_W, Use_Timeout, MM) {
1174 jj_unsetUseTimer;
1175 }
1176
1177 // Transitions from IS
1178
1179 transition(IS, Inv) {
1180 f_sendAck;
1181 l_popForwardQueue;
1182 }
1183
1184 transition(IS, Data, S) {
1185 u_writeDataToCache;
1186 m_decrementNumberOfMessages;
1187 h_load_hit;
1188 g_sendUnblock;
1189 s_deallocateTBE;
1190 n_popResponseQueue;
1191 }
1192
1193 transition(IS, Exclusive_Data, M_W) {
1194 u_writeDataToCache;
1195 m_decrementNumberOfMessages;
1196 h_load_hit;
1197 gg_sendUnblockExclusive;
1198 o_scheduleUseTimeout;
1199 s_deallocateTBE;
1200 n_popResponseQueue;
1201 }
1202
1203 transition(M_W, Use_Timeout, M) {
1204 jj_unsetUseTimer;
1205 }
1206
1207 // Transitions from OI/MI
1208
1209 transition(MI, Fwd_GETS, OI) {
1210 q_sendDataFromTBEToCache;
1211 l_popForwardQueue;
1212 }
1213
1214 transition(MI, Fwd_DMA) {
1215 q_sendDataFromTBEToCache;
1216 ub_dmaUnblockL2Cache;
1217 l_popForwardQueue;
1218 }
1219
1220 transition(MI, Fwd_GETX, II) {
1221 q_sendExclusiveDataFromTBEToCache;
1222 l_popForwardQueue;
1223 }
1224
1225 transition({SI, OI}, Fwd_GETS) {
1226 q_sendDataFromTBEToCache;
1227 l_popForwardQueue;
1228 }
1229
1230 transition({SI, OI}, Fwd_DMA) {
1231 q_sendDataFromTBEToCache;
1232 ub_dmaUnblockL2Cache;
1233 l_popForwardQueue;
1234 }
1235
1236 transition(OI, Fwd_GETX, II) {
1237 q_sendExclusiveDataFromTBEToCache;
1238 l_popForwardQueue;
1239 }
1240
1241 transition({SI, OI, MI}, Writeback_Ack_Data, I) {
1242 qq_sendWBDataFromTBEToL2; // always send data
1243 s_deallocateTBE;
1244 l_popForwardQueue;
1245 }
1246
1247 transition({SI, OI, MI}, Writeback_Ack, I) {
1248 g_sendUnblock;
1249 s_deallocateTBE;
1250 l_popForwardQueue;
1251 }
1252
1253 transition({MI, OI}, Writeback_Nack, OI) {
1254 // FIXME: This might cause deadlock by re-using the writeback
1255 // channel, we should handle this case differently.
1256 dd_issuePUTO;
1257 l_popForwardQueue;
1258 }
1259
1260 // Transitions from II
1261 transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
1262 g_sendUnblock;
1263 s_deallocateTBE;
1264 l_popForwardQueue;
1265 }
1266
1267 // transition({II, SI}, Writeback_Nack, I) {
1268 transition(II, Writeback_Nack, I) {
1269 s_deallocateTBE;
1270 l_popForwardQueue;
1271 }
1272
1273 transition(SI, Writeback_Nack) {
1274 dd_issuePUTS;
1275 l_popForwardQueue;
1276 }
1277
1278 transition(II, Inv) {
1279 f_sendAck;
1280 l_popForwardQueue;
1281 }
1282
1283 transition(SI, Inv, II) {
1284 f_sendAck;
1285 l_popForwardQueue;
1286 }
1287 }