Cache: Remove dangling doWriteback declaration
[gem5.git] / src / mem / protocol / MOESI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * $Id$
32 *
33 */
34
35 machine(L1Cache, "Directory protocol")
36 : Sequencer * sequencer,
37 CacheMemory * L1IcacheMemory,
38 CacheMemory * L1DcacheMemory,
39 int l2_select_num_bits,
40 int request_latency = 2,
41 bool send_evictions
42 {
43
44 // NODE L1 CACHE
45 // From this node's L1 cache TO the network
46 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
47 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
48 // a local L1 -> this L2 bank
49 MessageBuffer responseFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="response";
50 // MessageBuffer writebackFromL1Cache, network="To", virtual_network="3", ordered="false", vnet_type="writeback";
51
52
53 // To this node's L1 cache FROM the network
54 // a L2 bank -> this L1
55 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
56 // a L2 bank -> this L1
57 MessageBuffer responseToL1Cache, network="From", virtual_network="2", ordered="false", vnet_type="response";
58
59
60
61 // STATES
62 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
63 // Base states
64 I, AccessPermission:Invalid, desc="Idle";
65 S, AccessPermission:Read_Only, desc="Shared";
66 O, AccessPermission:Read_Only, desc="Owned";
67 M, AccessPermission:Read_Only, desc="Modified (dirty)";
68 M_W, AccessPermission:Read_Only, desc="Modified (dirty)";
69 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
70 MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
71
72 // Transient States
73 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
74 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
75 OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
76 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
77 SI, AccessPermission:Busy, "OI", desc="Issued PutS, waiting for ack";
78 OI, AccessPermission:Busy, "OI", desc="Issued PutO, waiting for ack";
79 MI, AccessPermission:Busy, "MI", desc="Issued PutX, waiting for ack";
80 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
81 }
82
83 // EVENTS
84 enumeration(Event, desc="Cache events") {
85 Load, desc="Load request from the processor";
86 Ifetch, desc="I-fetch request from the processor";
87 Store, desc="Store request from the processor";
88 L1_Replacement, desc="Replacement";
89
90 // Requests
91 Own_GETX, desc="We observe our own GetX forwarded back to us";
92 Fwd_GETX, desc="A GetX from another processor";
93 Fwd_GETS, desc="A GetS from another processor";
94 Fwd_DMA, desc="A GetS from another processor";
95 Inv, desc="Invalidations from the directory";
96
97 // Responses
98 Ack, desc="Received an ack message";
99 Data, desc="Received a data message, responder has a shared copy";
100 Exclusive_Data, desc="Received a data message";
101
102 Writeback_Ack, desc="Writeback O.K. from directory";
103 Writeback_Ack_Data, desc="Writeback O.K. from directory";
104 Writeback_Nack, desc="Writeback not O.K. from directory";
105
106 // Triggers
107 All_acks, desc="Received all required data and message acks";
108
109 // Timeouts
110 Use_Timeout, desc="lockout period ended";
111 }
112
113 // TYPES
114
115 // CacheEntry
116 structure(Entry, desc="...", interface="AbstractCacheEntry") {
117 State CacheState, desc="cache state";
118 bool Dirty, desc="Is the data dirty (different than memory)?";
119 DataBlock DataBlk, desc="data for the block";
120 }
121
122 // TBE fields
123 structure(TBE, desc="...") {
124 Address Address, desc="Physical address for this TBE";
125 State TBEState, desc="Transient state";
126 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
127 bool Dirty, desc="Is the data dirty (different than memory)?";
128 int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
129 }
130
131 structure(TBETable, external ="yes") {
132 TBE lookup(Address);
133 void allocate(Address);
134 void deallocate(Address);
135 bool isPresent(Address);
136 }
137
138 void set_cache_entry(AbstractCacheEntry b);
139 void unset_cache_entry();
140 void set_tbe(TBE b);
141 void unset_tbe();
142
143 MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
144
145 TBETable TBEs, template_hack="<L1Cache_TBE>";
146 TimerTable useTimerTable;
147 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
148
149 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
150 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
151 if(is_valid(L1Dcache_entry)) {
152 return L1Dcache_entry;
153 }
154
155 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
156 return L1Icache_entry;
157 }
158
159 Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
160 return static_cast(Entry, "pointer", L1DcacheMemory.lookup(addr));
161 }
162
163 Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
164 return static_cast(Entry, "pointer", L1IcacheMemory.lookup(addr));
165 }
166
167 State getState(TBE tbe, Entry cache_entry, Address addr) {
168 if(is_valid(tbe)) {
169 return tbe.TBEState;
170 } else if (is_valid(cache_entry)) {
171 return cache_entry.CacheState;
172 }
173 return State:I;
174 }
175
176 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
177 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
178
179 if (is_valid(tbe)) {
180 tbe.TBEState := state;
181 }
182
183 if (is_valid(cache_entry)) {
184 if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
185 ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
186 ((cache_entry.CacheState != State:S) && (state == State:S)) ||
187 ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
188
189 cache_entry.CacheState := state;
190 sequencer.checkCoherence(addr);
191 }
192 else {
193 cache_entry.CacheState := state;
194 }
195 }
196 }
197
198 AccessPermission getAccessPermission(Address addr) {
199 TBE tbe := TBEs[addr];
200 if(is_valid(tbe)) {
201 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
202 return L1Cache_State_to_permission(tbe.TBEState);
203 }
204
205 Entry cache_entry := getCacheEntry(addr);
206 if(is_valid(cache_entry)) {
207 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
208 return L1Cache_State_to_permission(cache_entry.CacheState);
209 }
210
211 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
212 return AccessPermission:NotPresent;
213 }
214
215 void setAccessPermission(Entry cache_entry, Address addr, State state) {
216 if (is_valid(cache_entry)) {
217 cache_entry.changePermission(L1Cache_State_to_permission(state));
218 }
219 }
220
221 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
222 return getCacheEntry(addr).DataBlk;
223 }
224
225 Event mandatory_request_type_to_event(RubyRequestType type) {
226 if (type == RubyRequestType:LD) {
227 return Event:Load;
228 } else if (type == RubyRequestType:IFETCH) {
229 return Event:Ifetch;
230 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
231 return Event:Store;
232 } else {
233 error("Invalid RubyRequestType");
234 }
235 }
236
237 MessageBuffer triggerQueue, ordered="true";
238
239 // ** OUT_PORTS **
240
241 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
242 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
243 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
244
245 // ** IN_PORTS **
246
247 // Use Timer
248 in_port(useTimerTable_in, Address, useTimerTable) {
249 if (useTimerTable_in.isReady()) {
250 trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
251 getCacheEntry(useTimerTable.readyAddress()),
252 TBEs[useTimerTable.readyAddress()]);
253 }
254 }
255
256 // Trigger Queue
257 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
258 if (triggerQueue_in.isReady()) {
259 peek(triggerQueue_in, TriggerMsg) {
260 if (in_msg.Type == TriggerType:ALL_ACKS) {
261 trigger(Event:All_acks, in_msg.Address,
262 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
263 } else {
264 error("Unexpected message");
265 }
266 }
267 }
268 }
269
270 // Nothing from the request network
271
272 // Request Network
273 in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
274 if (requestNetwork_in.isReady()) {
275 peek(requestNetwork_in, RequestMsg, block_on="Address") {
276 assert(in_msg.Destination.isElement(machineID));
277 DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
278
279 if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
280 if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
281 trigger(Event:Own_GETX, in_msg.Address,
282 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
283 } else {
284 trigger(Event:Fwd_GETX, in_msg.Address,
285 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
286 }
287 } else if (in_msg.Type == CoherenceRequestType:GETS) {
288 trigger(Event:Fwd_GETS, in_msg.Address,
289 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
290 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
291 trigger(Event:Fwd_DMA, in_msg.Address,
292 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
293 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
294 trigger(Event:Writeback_Ack, in_msg.Address,
295 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
296 } else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
297 trigger(Event:Writeback_Ack_Data, in_msg.Address,
298 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
299 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
300 trigger(Event:Writeback_Nack, in_msg.Address,
301 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
302 } else if (in_msg.Type == CoherenceRequestType:INV) {
303 trigger(Event:Inv, in_msg.Address,
304 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
305 } else {
306 error("Unexpected message");
307 }
308 }
309 }
310 }
311
312 // Response Network
313 in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
314 if (responseToL1Cache_in.isReady()) {
315 peek(responseToL1Cache_in, ResponseMsg, block_on="Address") {
316 if (in_msg.Type == CoherenceResponseType:ACK) {
317 trigger(Event:Ack, in_msg.Address,
318 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
319 } else if (in_msg.Type == CoherenceResponseType:DATA) {
320 trigger(Event:Data, in_msg.Address,
321 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
322 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
323 trigger(Event:Exclusive_Data, in_msg.Address,
324 getCacheEntry(in_msg.Address), TBEs[in_msg.Address]);
325 } else {
326 error("Unexpected message");
327 }
328 }
329 }
330 }
331
332 // Nothing from the unblock network
333 // Mandatory Queue betweens Node's CPU and it's L1 caches
334 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
335 if (mandatoryQueue_in.isReady()) {
336 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
337
338 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
339
340 if (in_msg.Type == RubyRequestType:IFETCH) {
341 // ** INSTRUCTION ACCESS ***
342
343 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
344 if (is_valid(L1Icache_entry)) {
345 // The tag matches for the L1, so the L1 asks the L2 for it.
346 trigger(mandatory_request_type_to_event(in_msg.Type),
347 in_msg.LineAddress, L1Icache_entry,
348 TBEs[in_msg.LineAddress]);
349 } else {
350
351 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
352 // Check to see if it is in the OTHER L1
353 if (is_valid(L1Dcache_entry)) {
354 // The block is in the wrong L1, put the request on the queue to the shared L2
355 trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
356 TBEs[in_msg.LineAddress]);
357 }
358 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
359 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
360 trigger(mandatory_request_type_to_event(in_msg.Type),
361 in_msg.LineAddress, L1Icache_entry,
362 TBEs[in_msg.LineAddress]);
363 } else {
364 // No room in the L1, so we need to make room in the L1
365 trigger(Event:L1_Replacement,
366 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
367 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
368 TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
369 }
370 }
371 } else {
372 // *** DATA ACCESS ***
373
374 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
375 if (is_valid(L1Dcache_entry)) {
376 // The tag matches for the L1, so the L1 ask the L2 for it
377 trigger(mandatory_request_type_to_event(in_msg.Type),
378 in_msg.LineAddress, L1Dcache_entry,
379 TBEs[in_msg.LineAddress]);
380 } else {
381
382 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
383 // Check to see if it is in the OTHER L1
384 if (is_valid(L1Icache_entry)) {
385 // The block is in the wrong L1, put the request on the queue to the shared L2
386 trigger(Event:L1_Replacement, in_msg.LineAddress,
387 L1Icache_entry, TBEs[in_msg.LineAddress]);
388 }
389 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
390 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
391 trigger(mandatory_request_type_to_event(in_msg.Type),
392 in_msg.LineAddress, L1Dcache_entry,
393 TBEs[in_msg.LineAddress]);
394 } else {
395 // No room in the L1, so we need to make room in the L1
396 trigger(Event:L1_Replacement,
397 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
398 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
399 TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
400 }
401 }
402 }
403 }
404 }
405 }
406
407
408 // ACTIONS
409
410 action(a_issueGETS, "a", desc="Issue GETS") {
411 peek(mandatoryQueue_in, RubyRequest) {
412 enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
413 out_msg.Address := address;
414 out_msg.Type := CoherenceRequestType:GETS;
415 out_msg.Requestor := machineID;
416 out_msg.RequestorMachine := MachineType:L1Cache;
417 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
418 l2_select_low_bit, l2_select_num_bits));
419 out_msg.MessageSize := MessageSizeType:Request_Control;
420 out_msg.AccessMode := in_msg.AccessMode;
421 out_msg.Prefetch := in_msg.Prefetch;
422 }
423 }
424 }
425
426 action(b_issueGETX, "b", desc="Issue GETX") {
427 peek(mandatoryQueue_in, RubyRequest) {
428 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
429 out_msg.Address := address;
430 out_msg.Type := CoherenceRequestType:GETX;
431 out_msg.Requestor := machineID;
432 out_msg.RequestorMachine := MachineType:L1Cache;
433 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
434 l2_select_low_bit, l2_select_num_bits));
435 out_msg.MessageSize := MessageSizeType:Request_Control;
436 out_msg.AccessMode := in_msg.AccessMode;
437 out_msg.Prefetch := in_msg.Prefetch;
438 }
439 }
440 }
441
442 action(d_issuePUTX, "d", desc="Issue PUTX") {
443 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
444 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
445 out_msg.Address := address;
446 out_msg.Type := CoherenceRequestType:PUTX;
447 out_msg.Requestor := machineID;
448 out_msg.RequestorMachine := MachineType:L1Cache;
449 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
450 l2_select_low_bit, l2_select_num_bits));
451 out_msg.MessageSize := MessageSizeType:Writeback_Control;
452 }
453 }
454
455 action(dd_issuePUTO, "\d", desc="Issue PUTO") {
456 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
457 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
458 out_msg.Address := address;
459 out_msg.Type := CoherenceRequestType:PUTO;
460 out_msg.Requestor := machineID;
461 out_msg.RequestorMachine := MachineType:L1Cache;
462 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
463 l2_select_low_bit, l2_select_num_bits));
464 out_msg.MessageSize := MessageSizeType:Writeback_Control;
465 }
466 }
467
468 action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
469 // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
470 enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
471 out_msg.Address := address;
472 out_msg.Type := CoherenceRequestType:PUTS;
473 out_msg.Requestor := machineID;
474 out_msg.RequestorMachine := MachineType:L1Cache;
475 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
476 l2_select_low_bit, l2_select_num_bits));
477 out_msg.MessageSize := MessageSizeType:Writeback_Control;
478 }
479 }
480
481 action(e_sendData, "e", desc="Send data from cache to requestor") {
482 peek(requestNetwork_in, RequestMsg) {
483 assert(is_valid(cache_entry));
484 if (in_msg.RequestorMachine == MachineType:L2Cache) {
485 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
486 out_msg.Address := address;
487 out_msg.Type := CoherenceResponseType:DATA;
488 out_msg.Sender := machineID;
489 out_msg.SenderMachine := MachineType:L1Cache;
490 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
491 l2_select_low_bit, l2_select_num_bits));
492 out_msg.DataBlk := cache_entry.DataBlk;
493 // out_msg.Dirty := cache_entry.Dirty;
494 out_msg.Dirty := false;
495 out_msg.Acks := in_msg.Acks;
496 out_msg.MessageSize := MessageSizeType:Response_Data;
497 }
498 DPRINTF(RubySlicc, "Sending data to L2: %s\n", in_msg.Address);
499 }
500 else {
501 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
502 out_msg.Address := address;
503 out_msg.Type := CoherenceResponseType:DATA;
504 out_msg.Sender := machineID;
505 out_msg.SenderMachine := MachineType:L1Cache;
506 out_msg.Destination.add(in_msg.Requestor);
507 out_msg.DataBlk := cache_entry.DataBlk;
508 // out_msg.Dirty := cache_entry.Dirty;
509 out_msg.Dirty := false;
510 out_msg.Acks := in_msg.Acks;
511 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
512 }
513 DPRINTF(RubySlicc, "Sending data to L1\n");
514 }
515 }
516 }
517
518 action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
519 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
520 assert(is_valid(cache_entry));
521 out_msg.Address := address;
522 out_msg.Type := CoherenceResponseType:DATA;
523 out_msg.Sender := machineID;
524 out_msg.SenderMachine := MachineType:L1Cache;
525 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
526 l2_select_low_bit, l2_select_num_bits));
527 out_msg.DataBlk := cache_entry.DataBlk;
528 out_msg.Dirty := cache_entry.Dirty;
529 out_msg.Acks := 0; // irrelevant
530 out_msg.MessageSize := MessageSizeType:Response_Data;
531 }
532 }
533
534 action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
535 peek(requestNetwork_in, RequestMsg) {
536 assert(is_valid(cache_entry));
537 if (in_msg.RequestorMachine == MachineType:L2Cache) {
538 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
539 out_msg.Address := address;
540 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
541 out_msg.Sender := machineID;
542 out_msg.SenderMachine := MachineType:L1Cache;
543 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
544 l2_select_low_bit, l2_select_num_bits));
545 out_msg.DataBlk := cache_entry.DataBlk;
546 out_msg.Dirty := cache_entry.Dirty;
547 out_msg.Acks := in_msg.Acks;
548 out_msg.MessageSize := MessageSizeType:Response_Data;
549 }
550 DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
551 }
552 else {
553 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
554 out_msg.Address := address;
555 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
556 out_msg.Sender := machineID;
557 out_msg.SenderMachine := MachineType:L1Cache;
558 out_msg.Destination.add(in_msg.Requestor);
559 out_msg.DataBlk := cache_entry.DataBlk;
560 out_msg.Dirty := cache_entry.Dirty;
561 out_msg.Acks := in_msg.Acks;
562 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
563 }
564 DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
565 }
566 }
567 }
568
569 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
570 peek(requestNetwork_in, RequestMsg) {
571 if (in_msg.RequestorMachine == MachineType:L1Cache) {
572 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
573 out_msg.Address := address;
574 out_msg.Type := CoherenceResponseType:ACK;
575 out_msg.Sender := machineID;
576 out_msg.SenderMachine := MachineType:L1Cache;
577 out_msg.Destination.add(in_msg.Requestor);
578 out_msg.Acks := 0 - 1; // -1
579 out_msg.MessageSize := MessageSizeType:Response_Control;
580 }
581 }
582 else {
583 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
584 out_msg.Address := address;
585 out_msg.Type := CoherenceResponseType:ACK;
586 out_msg.Sender := machineID;
587 out_msg.SenderMachine := MachineType:L1Cache;
588 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
589 l2_select_low_bit, l2_select_num_bits));
590 out_msg.Acks := 0 - 1; // -1
591 out_msg.MessageSize := MessageSizeType:Response_Control;
592 }
593 }
594 }
595 }
596
597 action(g_sendUnblock, "g", desc="Send unblock to memory") {
598 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
599 out_msg.Address := address;
600 out_msg.Type := CoherenceResponseType:UNBLOCK;
601 out_msg.Sender := machineID;
602 out_msg.SenderMachine := MachineType:L1Cache;
603 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
604 l2_select_low_bit, l2_select_num_bits));
605 out_msg.MessageSize := MessageSizeType:Unblock_Control;
606 }
607 }
608
609 action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
610 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
611 out_msg.Address := address;
612 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
613 out_msg.Sender := machineID;
614 out_msg.SenderMachine := MachineType:L1Cache;
615 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
616 l2_select_low_bit, l2_select_num_bits));
617 out_msg.MessageSize := MessageSizeType:Unblock_Control;
618 }
619 }
620
621 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
622 assert(is_valid(cache_entry));
623 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
624 sequencer.readCallback(address, cache_entry.DataBlk);
625 }
626
627 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
628 assert(is_valid(cache_entry));
629 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
630 sequencer.writeCallback(address, cache_entry.DataBlk);
631 cache_entry.Dirty := true;
632 }
633
634 action(i_allocateTBE, "i", desc="Allocate TBE") {
635 check_allocate(TBEs);
636 TBEs.allocate(address);
637 set_tbe(TBEs[address]);
638 assert(is_valid(cache_entry));
639 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
640 tbe.Dirty := cache_entry.Dirty;
641 }
642
643 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
644 triggerQueue_in.dequeue();
645 }
646
647 action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
648 useTimerTable.unset(address);
649 }
650
651 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
652 mandatoryQueue_in.dequeue();
653 }
654
655 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
656 requestNetwork_in.dequeue();
657 }
658
659 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
660 peek(responseToL1Cache_in, ResponseMsg) {
661 assert(is_valid(tbe));
662 DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
663 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
664 }
665 }
666
667 action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
668 peek(requestNetwork_in, RequestMsg) {
669 assert(is_valid(tbe));
670 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
671 }
672 }
673
674 action(n_popResponseQueue, "n", desc="Pop response queue") {
675 responseToL1Cache_in.dequeue();
676 }
677
678 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
679 assert(is_valid(tbe));
680 if (tbe.NumPendingMsgs == 0) {
681 enqueue(triggerQueue_out, TriggerMsg) {
682 out_msg.Address := address;
683 out_msg.Type := TriggerType:ALL_ACKS;
684 }
685 }
686 }
687
688 action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
689 useTimerTable.set(address, 50);
690 }
691
692 action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
693 peek(requestNetwork_in, RequestMsg) {
694 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
695 out_msg.Address := address;
696 out_msg.Type := CoherenceResponseType:DMA_ACK;
697 out_msg.Sender := machineID;
698 out_msg.SenderMachine := MachineType:L1Cache;
699 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
700 l2_select_low_bit, l2_select_num_bits));
701 out_msg.Dirty := false;
702 out_msg.Acks := 1;
703 out_msg.MessageSize := MessageSizeType:Response_Control;
704 }
705 }
706 }
707
708 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
709 peek(requestNetwork_in, RequestMsg) {
710 assert(is_valid(tbe));
711 if (in_msg.RequestorMachine == MachineType:L1Cache ||
712 in_msg.RequestorMachine == MachineType:DMA) {
713 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
714 out_msg.Address := address;
715 out_msg.Type := CoherenceResponseType:DATA;
716 out_msg.Sender := machineID;
717 out_msg.SenderMachine := MachineType:L1Cache;
718 out_msg.Destination.add(in_msg.Requestor);
719 out_msg.DataBlk := tbe.DataBlk;
720 // out_msg.Dirty := tbe.Dirty;
721 out_msg.Dirty := false;
722 out_msg.Acks := in_msg.Acks;
723 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
724 }
725 }
726 else {
727 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
728 out_msg.Address := address;
729 out_msg.Type := CoherenceResponseType:DATA;
730 out_msg.Sender := machineID;
731 out_msg.SenderMachine := MachineType:L1Cache;
732 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
733 l2_select_low_bit, l2_select_num_bits));
734 out_msg.DataBlk := tbe.DataBlk;
735 // out_msg.Dirty := tbe.Dirty;
736 out_msg.Dirty := false;
737 out_msg.Acks := in_msg.Acks;
738 out_msg.MessageSize := MessageSizeType:Response_Data;
739 }
740 }
741 }
742 }
743
744 action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
745 peek(requestNetwork_in, RequestMsg) {
746 assert(is_valid(tbe));
747 if (in_msg.RequestorMachine == MachineType:L1Cache) {
748 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
749 out_msg.Address := address;
750 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
751 out_msg.Sender := machineID;
752 out_msg.SenderMachine := MachineType:L1Cache;
753 out_msg.Destination.add(in_msg.Requestor);
754 out_msg.DataBlk := tbe.DataBlk;
755 out_msg.Dirty := tbe.Dirty;
756 out_msg.Acks := in_msg.Acks;
757 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
758 }
759 }
760 else {
761 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
762 out_msg.Address := address;
763 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
764 out_msg.Sender := machineID;
765 out_msg.SenderMachine := MachineType:L1Cache;
766 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
767 l2_select_low_bit, l2_select_num_bits));
768 out_msg.DataBlk := tbe.DataBlk;
769 out_msg.Dirty := tbe.Dirty;
770 out_msg.Acks := in_msg.Acks;
771 out_msg.MessageSize := MessageSizeType:Response_Data;
772 }
773 }
774 }
775 }
776
777 // L2 will usually request data for a writeback
778 action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
779 enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
780 assert(is_valid(tbe));
781 out_msg.Address := address;
782 out_msg.Sender := machineID;
783 out_msg.SenderMachine := MachineType:L1Cache;
784 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
785 l2_select_low_bit, l2_select_num_bits));
786 out_msg.Dirty := tbe.Dirty;
787 if (tbe.Dirty) {
788 out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
789 } else {
790 out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN_DATA;
791 }
792 out_msg.DataBlk := tbe.DataBlk;
793 out_msg.MessageSize := MessageSizeType:Writeback_Data;
794 }
795 }
796
797 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
798 TBEs.deallocate(address);
799 unset_tbe();
800 }
801
802 action(u_writeDataToCache, "u", desc="Write data to cache") {
803 peek(responseToL1Cache_in, ResponseMsg) {
804 assert(is_valid(cache_entry));
805 cache_entry.DataBlk := in_msg.DataBlk;
806 cache_entry.Dirty := in_msg.Dirty;
807
808 if (in_msg.Type == CoherenceResponseType:DATA) {
809 //assert(in_msg.Dirty == false);
810 }
811 }
812 }
813
814 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
815 peek(responseToL1Cache_in, ResponseMsg) {
816 assert(is_valid(cache_entry));
817 assert(cache_entry.DataBlk == in_msg.DataBlk);
818 cache_entry.DataBlk := in_msg.DataBlk;
819 cache_entry.Dirty := in_msg.Dirty;
820 }
821 }
822
823 action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
824 if (L1DcacheMemory.isTagPresent(address)) {
825 L1DcacheMemory.deallocate(address);
826 } else {
827 L1IcacheMemory.deallocate(address);
828 }
829 unset_cache_entry();
830 }
831
832 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
833 if ((is_invalid(cache_entry))) {
834 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
835 }
836 }
837
838 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
839 if ((is_invalid(cache_entry))) {
840 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
841 }
842 }
843
844 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
845 if (send_evictions) {
846 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
847 sequencer.evictionCallback(address);
848 }
849 }
850
851 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
852 peek(mandatoryQueue_in, RubyRequest) {
853 // profile_miss(in_msg);
854 }
855 }
856
857 action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
858 requestNetwork_in.recycle();
859 }
860
861 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
862 mandatoryQueue_in.recycle();
863 }
864
865 //*****************************************************
866 // TRANSITIONS
867 //*****************************************************
868
869 // Transitions for Load/Store/L2_Replacement from transient states
870 transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
871 zz_recycleMandatoryQueue;
872 }
873
874 transition({M_W, MM_W}, L1_Replacement) {
875 zz_recycleMandatoryQueue;
876 }
877
878 transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
879 z_recycleRequestQueue;
880 }
881
882 transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
883 zz_recycleMandatoryQueue;
884 }
885
886 // Transitions from Idle
887 transition(I, Load, IS) {
888 ii_allocateL1DCacheBlock;
889 i_allocateTBE;
890 a_issueGETS;
891 // uu_profileMiss;
892 k_popMandatoryQueue;
893 }
894
895 transition(I, Ifetch, IS) {
896 jj_allocateL1ICacheBlock;
897 i_allocateTBE;
898 a_issueGETS;
899 // uu_profileMiss;
900 k_popMandatoryQueue;
901 }
902
903 transition(I, Store, IM) {
904 ii_allocateL1DCacheBlock;
905 i_allocateTBE;
906 b_issueGETX;
907 // uu_profileMiss;
908 k_popMandatoryQueue;
909 }
910
911 transition(I, L1_Replacement) {
912 kk_deallocateL1CacheBlock;
913 }
914
915 transition(I, Inv) {
916 f_sendAck;
917 l_popForwardQueue;
918 }
919
920 // Transitions from Shared
921 transition({S, SM}, {Load, Ifetch}) {
922 h_load_hit;
923 k_popMandatoryQueue;
924 }
925
926 transition(S, Store, SM) {
927 i_allocateTBE;
928 b_issueGETX;
929 // uu_profileMiss;
930 k_popMandatoryQueue;
931 }
932
933 transition(S, L1_Replacement, SI) {
934 i_allocateTBE;
935 dd_issuePUTS;
936 forward_eviction_to_cpu;
937 kk_deallocateL1CacheBlock;
938 }
939
940 transition(S, Inv, I) {
941 f_sendAck;
942 forward_eviction_to_cpu;
943 l_popForwardQueue;
944 }
945
946 transition(S, Fwd_GETS) {
947 e_sendData;
948 l_popForwardQueue;
949 }
950
951 transition(S, Fwd_DMA) {
952 e_sendData;
953 ub_dmaUnblockL2Cache;
954 l_popForwardQueue;
955 }
956
957 // Transitions from Owned
958 transition({O, OM}, {Load, Ifetch}) {
959 h_load_hit;
960 k_popMandatoryQueue;
961 }
962
963 transition(O, Store, OM) {
964 i_allocateTBE;
965 b_issueGETX;
966 // uu_profileMiss;
967 k_popMandatoryQueue;
968 }
969
970 transition(O, L1_Replacement, OI) {
971 i_allocateTBE;
972 dd_issuePUTO;
973 forward_eviction_to_cpu;
974 kk_deallocateL1CacheBlock;
975 }
976
977 transition(O, Fwd_GETX, I) {
978 ee_sendDataExclusive;
979 forward_eviction_to_cpu;
980 l_popForwardQueue;
981 }
982
983 transition(O, Fwd_GETS) {
984 e_sendData;
985 l_popForwardQueue;
986 }
987
988 transition(O, Fwd_DMA) {
989 e_sendData;
990 ub_dmaUnblockL2Cache;
991 l_popForwardQueue;
992 }
993
994 // Transitions from MM
995 transition({MM, MM_W}, {Load, Ifetch}) {
996 h_load_hit;
997 k_popMandatoryQueue;
998 }
999
1000 transition({MM, MM_W}, Store) {
1001 hh_store_hit;
1002 k_popMandatoryQueue;
1003 }
1004
1005 transition(MM, L1_Replacement, MI) {
1006 i_allocateTBE;
1007 d_issuePUTX;
1008 forward_eviction_to_cpu;
1009 kk_deallocateL1CacheBlock;
1010 }
1011
1012 transition(MM, Fwd_GETX, I) {
1013 ee_sendDataExclusive;
1014 forward_eviction_to_cpu;
1015 l_popForwardQueue;
1016 }
1017
1018 transition(MM, Fwd_GETS, I) {
1019 ee_sendDataExclusive;
1020 forward_eviction_to_cpu;
1021 l_popForwardQueue;
1022 }
1023
1024 transition(MM, Fwd_DMA, MM) {
1025 e_sendData;
1026 ub_dmaUnblockL2Cache;
1027 l_popForwardQueue;
1028 }
1029
1030 // Transitions from M
1031 transition({M, M_W}, {Load, Ifetch}) {
1032 h_load_hit;
1033 k_popMandatoryQueue;
1034 }
1035
1036 transition(M, Store, MM) {
1037 hh_store_hit;
1038 k_popMandatoryQueue;
1039 }
1040
1041 transition(M_W, Store, MM_W) {
1042 hh_store_hit;
1043 k_popMandatoryQueue;
1044 }
1045
1046 transition(M, L1_Replacement, MI) {
1047 i_allocateTBE;
1048 d_issuePUTX;
1049 forward_eviction_to_cpu;
1050 kk_deallocateL1CacheBlock;
1051 }
1052
1053 transition(M, Fwd_GETX, I) {
1054 // e_sendData;
1055 ee_sendDataExclusive;
1056 forward_eviction_to_cpu;
1057 l_popForwardQueue;
1058 }
1059
1060 transition(M, Fwd_GETS, O) {
1061 e_sendData;
1062 l_popForwardQueue;
1063 }
1064
1065 transition(M, Fwd_DMA) {
1066 e_sendData;
1067 ub_dmaUnblockL2Cache;
1068 l_popForwardQueue;
1069 }
1070
1071 // Transitions from IM
1072
1073 transition(IM, Inv) {
1074 f_sendAck;
1075 l_popForwardQueue;
1076 }
1077
1078 transition(IM, Ack) {
1079 m_decrementNumberOfMessages;
1080 o_checkForCompletion;
1081 n_popResponseQueue;
1082 }
1083
1084 transition(IM, {Exclusive_Data, Data}, OM) {
1085 u_writeDataToCache;
1086 m_decrementNumberOfMessages;
1087 o_checkForCompletion;
1088 n_popResponseQueue;
1089 }
1090
1091 // Transitions from SM
1092 transition(SM, Inv, IM) {
1093 f_sendAck;
1094 forward_eviction_to_cpu;
1095 l_popForwardQueue;
1096 }
1097
1098 transition(SM, Ack) {
1099 m_decrementNumberOfMessages;
1100 o_checkForCompletion;
1101 n_popResponseQueue;
1102 }
1103
1104 transition(SM, {Data, Exclusive_Data}, OM) {
1105 // v_writeDataToCacheVerify;
1106 m_decrementNumberOfMessages;
1107 o_checkForCompletion;
1108 n_popResponseQueue;
1109 }
1110
1111 transition(SM, Fwd_GETS) {
1112 e_sendData;
1113 l_popForwardQueue;
1114 }
1115
1116 transition(SM, Fwd_DMA) {
1117 e_sendData;
1118 ub_dmaUnblockL2Cache;
1119 l_popForwardQueue;
1120 }
1121
1122 // Transitions from OM
1123 transition(OM, Own_GETX) {
1124 mm_decrementNumberOfMessages;
1125 o_checkForCompletion;
1126 l_popForwardQueue;
1127 }
1128
1129
1130 // transition(OM, Fwd_GETX, OMF) {
1131 transition(OM, Fwd_GETX, IM) {
1132 ee_sendDataExclusive;
1133 l_popForwardQueue;
1134 }
1135
1136 transition(OM, Fwd_GETS) {
1137 e_sendData;
1138 l_popForwardQueue;
1139 }
1140
1141 transition(OM, Fwd_DMA) {
1142 e_sendData;
1143 ub_dmaUnblockL2Cache;
1144 l_popForwardQueue;
1145 }
1146
1147 //transition({OM, OMF}, Ack) {
1148 transition(OM, Ack) {
1149 m_decrementNumberOfMessages;
1150 o_checkForCompletion;
1151 n_popResponseQueue;
1152 }
1153
1154 transition(OM, All_acks, MM_W) {
1155 hh_store_hit;
1156 gg_sendUnblockExclusive;
1157 s_deallocateTBE;
1158 o_scheduleUseTimeout;
1159 j_popTriggerQueue;
1160 }
1161
1162 transition(MM_W, Use_Timeout, MM) {
1163 jj_unsetUseTimer;
1164 }
1165
1166 // Transitions from IS
1167
1168 transition(IS, Inv) {
1169 f_sendAck;
1170 l_popForwardQueue;
1171 }
1172
1173 transition(IS, Data, S) {
1174 u_writeDataToCache;
1175 m_decrementNumberOfMessages;
1176 h_load_hit;
1177 g_sendUnblock;
1178 s_deallocateTBE;
1179 n_popResponseQueue;
1180 }
1181
1182 transition(IS, Exclusive_Data, M_W) {
1183 u_writeDataToCache;
1184 m_decrementNumberOfMessages;
1185 h_load_hit;
1186 gg_sendUnblockExclusive;
1187 o_scheduleUseTimeout;
1188 s_deallocateTBE;
1189 n_popResponseQueue;
1190 }
1191
1192 transition(M_W, Use_Timeout, M) {
1193 jj_unsetUseTimer;
1194 }
1195
1196 // Transitions from OI/MI
1197
1198 transition(MI, Fwd_GETS, OI) {
1199 q_sendDataFromTBEToCache;
1200 l_popForwardQueue;
1201 }
1202
1203 transition(MI, Fwd_DMA) {
1204 q_sendDataFromTBEToCache;
1205 ub_dmaUnblockL2Cache;
1206 l_popForwardQueue;
1207 }
1208
1209 transition(MI, Fwd_GETX, II) {
1210 q_sendExclusiveDataFromTBEToCache;
1211 l_popForwardQueue;
1212 }
1213
1214 transition({SI, OI}, Fwd_GETS) {
1215 q_sendDataFromTBEToCache;
1216 l_popForwardQueue;
1217 }
1218
1219 transition({SI, OI}, Fwd_DMA) {
1220 q_sendDataFromTBEToCache;
1221 ub_dmaUnblockL2Cache;
1222 l_popForwardQueue;
1223 }
1224
1225 transition(OI, Fwd_GETX, II) {
1226 q_sendExclusiveDataFromTBEToCache;
1227 l_popForwardQueue;
1228 }
1229
1230 transition({SI, OI, MI}, Writeback_Ack_Data, I) {
1231 qq_sendWBDataFromTBEToL2; // always send data
1232 s_deallocateTBE;
1233 l_popForwardQueue;
1234 }
1235
1236 transition({SI, OI, MI}, Writeback_Ack, I) {
1237 g_sendUnblock;
1238 s_deallocateTBE;
1239 l_popForwardQueue;
1240 }
1241
1242 transition({MI, OI}, Writeback_Nack, OI) {
1243 // FIXME: This might cause deadlock by re-using the writeback
1244 // channel, we should handle this case differently.
1245 dd_issuePUTO;
1246 l_popForwardQueue;
1247 }
1248
1249 // Transitions from II
1250 transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
1251 g_sendUnblock;
1252 s_deallocateTBE;
1253 l_popForwardQueue;
1254 }
1255
1256 // transition({II, SI}, Writeback_Nack, I) {
1257 transition(II, Writeback_Nack, I) {
1258 s_deallocateTBE;
1259 l_popForwardQueue;
1260 }
1261
1262 transition(SI, Writeback_Nack) {
1263 dd_issuePUTS;
1264 l_popForwardQueue;
1265 }
1266
1267 transition(II, Inv) {
1268 f_sendAck;
1269 l_popForwardQueue;
1270 }
1271
1272 transition(SI, Inv, II) {
1273 f_sendAck;
1274 l_popForwardQueue;
1275 }
1276 }