mem-ruby: fix MOESI_CMP_directory functional reads
[gem5.git] / src / mem / ruby / protocol / MOESI_CMP_directory-L1cache.sm
1 /*
2 * Copyright (c) 2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 machine(MachineType:L1Cache, "L1 cache protocol")
42 : Sequencer * sequencer;
43 CacheMemory * L1Icache;
44 CacheMemory * L1Dcache;
45 Cycles request_latency := 1;
46 Cycles response_latency := 1;
47 Cycles use_timeout_latency := 50;
48 bool send_evictions;
49
50 // Message Queues
51 // From this node's L1 cache TO the network
52 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
53 MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
54 vnet_type="request";
55 // a local L1 -> this L2 bank
56 MessageBuffer * responseFromL1Cache, network="To", virtual_network="2",
57 vnet_type="response";
58
59 // To this node's L1 cache FROM the network
60 // a L2 bank -> this L1
61 MessageBuffer * requestToL1Cache, network="From", virtual_network="0",
62 vnet_type="request";
63 // a L2 bank -> this L1
64 MessageBuffer * responseToL1Cache, network="From", virtual_network="2",
65 vnet_type="response";
66
67 MessageBuffer * triggerQueue;
68
69 MessageBuffer * mandatoryQueue;
70 {
71 // STATES
72 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
73 // Base states
74 I, AccessPermission:Invalid, desc="Idle";
75 S, AccessPermission:Read_Only, desc="Shared";
76 O, AccessPermission:Read_Only, desc="Owned";
77 M, AccessPermission:Read_Write, desc="Modified (dirty)";
78 M_W, AccessPermission:Read_Write, desc="Modified (dirty)";
79 MM, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
80 MM_W, AccessPermission:Read_Write, desc="Modified (dirty and locally modified)";
81
82 // Transient States
83 // Notice we still have a valid copy of the block in most states
84 IM, AccessPermission:Busy, "IM", desc="Issued GetX";
85 IS, AccessPermission:Busy, "IS", desc="Issued GetS";
86 SM, AccessPermission:Read_Only, "SM", desc="Issued GetX, we still have an old copy of the line";
87 OM, AccessPermission:Read_Only, "SM", desc="Issued GetX, received data";
88 SI, AccessPermission:Read_Only, "OI", desc="Issued PutS, waiting for ack";
89 OI, AccessPermission:Read_Only, "OI", desc="Issued PutO, waiting for ack";
90 MI, AccessPermission:Read_Write, "MI", desc="Issued PutX, waiting for ack";
91 II, AccessPermission:Busy, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
92 }
93
94 // EVENTS
95 enumeration(Event, desc="Cache events") {
96 Load, desc="Load request from the processor";
97 Ifetch, desc="I-fetch request from the processor";
98 Store, desc="Store request from the processor";
99 L1_Replacement, desc="Replacement";
100
101 // Requests
102 Own_GETX, desc="We observe our own GetX forwarded back to us";
103 Fwd_GETX, desc="A GetX from another processor";
104 Fwd_GETS, desc="A GetS from another processor";
105 Fwd_DMA, desc="A GetS from another processor";
106 Inv, desc="Invalidations from the directory";
107
108 // Responses
109 Ack, desc="Received an ack message";
110 Data, desc="Received a data message, responder has a shared copy";
111 Exclusive_Data, desc="Received a data message";
112
113 Writeback_Ack, desc="Writeback O.K. from directory";
114 Writeback_Ack_Data, desc="Writeback O.K. from directory";
115 Writeback_Nack, desc="Writeback not O.K. from directory";
116
117 // Triggers
118 All_acks, desc="Received all required data and message acks";
119
120 // Timeouts
121 Use_Timeout, desc="lockout period ended";
122 }
123
124 // TYPES
125
126 // CacheEntry
127 structure(Entry, desc="...", interface="AbstractCacheEntry") {
128 State CacheState, desc="cache state";
129 bool Dirty, desc="Is the data dirty (different than memory)?";
130 DataBlock DataBlk, desc="data for the block";
131 }
132
133 // TBE fields
134 structure(TBE, desc="...") {
135 Addr addr, desc="Physical address for this TBE";
136 State TBEState, desc="Transient state";
137 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
138 bool Dirty, desc="Is the data dirty (different than memory)?";
139 int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
140 }
141
142 structure(TBETable, external ="yes") {
143 TBE lookup(Addr);
144 void allocate(Addr);
145 void deallocate(Addr);
146 bool isPresent(Addr);
147 }
148
149 Tick clockEdge();
150 Tick cyclesToTicks(Cycles c);
151 void set_cache_entry(AbstractCacheEntry b);
152 void unset_cache_entry();
153 void set_tbe(TBE b);
154 void unset_tbe();
155 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
156
157 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
158 TimerTable useTimerTable;
159
160 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
161 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
162 if(is_valid(L1Dcache_entry)) {
163 return L1Dcache_entry;
164 }
165
166 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
167 return L1Icache_entry;
168 }
169
170 Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
171 return static_cast(Entry, "pointer", L1Dcache.lookup(addr));
172 }
173
174 Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
175 return static_cast(Entry, "pointer", L1Icache.lookup(addr));
176 }
177
178 State getState(TBE tbe, Entry cache_entry, Addr addr) {
179 if(is_valid(tbe)) {
180 return tbe.TBEState;
181 } else if (is_valid(cache_entry)) {
182 return cache_entry.CacheState;
183 }
184 return State:I;
185 }
186
187 // L1 hit latency
188 Cycles mandatoryQueueLatency(RubyRequestType type) {
189 if (type == RubyRequestType:IFETCH) {
190 return L1Icache.getTagLatency();
191 } else {
192 return L1Dcache.getTagLatency();
193 }
194 }
195
196 // Latency for responses that fetch data from cache
197 Cycles cacheResponseLatency() {
198 if (L1Dcache.getTagLatency() > response_latency) {
199 return L1Dcache.getTagLatency();
200 } else {
201 return response_latency;
202 }
203 }
204
205 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
206 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
207
208 if (is_valid(tbe)) {
209 tbe.TBEState := state;
210 }
211
212 if (is_valid(cache_entry)) {
213 if ( ((cache_entry.CacheState != State:M) && (state == State:M)) ||
214 ((cache_entry.CacheState != State:MM) && (state == State:MM)) ||
215 ((cache_entry.CacheState != State:S) && (state == State:S)) ||
216 ((cache_entry.CacheState != State:O) && (state == State:O)) ) {
217
218 cache_entry.CacheState := state;
219 }
220 else {
221 cache_entry.CacheState := state;
222 }
223 }
224 }
225
226 AccessPermission getAccessPermission(Addr addr) {
227 TBE tbe := TBEs[addr];
228 if(is_valid(tbe)) {
229 DPRINTF(RubySlicc, "%s,%s\n", tbe.TBEState, L1Cache_State_to_permission(tbe.TBEState));
230 return L1Cache_State_to_permission(tbe.TBEState);
231 }
232
233 Entry cache_entry := getCacheEntry(addr);
234 if(is_valid(cache_entry)) {
235 DPRINTF(RubySlicc, "%s,%s\n", cache_entry.CacheState, L1Cache_State_to_permission(cache_entry.CacheState));
236 return L1Cache_State_to_permission(cache_entry.CacheState);
237 }
238
239 DPRINTF(RubySlicc, "AccessPermission_NotPresent\n");
240 return AccessPermission:NotPresent;
241 }
242
243 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
244 if (is_valid(cache_entry)) {
245 cache_entry.changePermission(L1Cache_State_to_permission(state));
246 }
247 }
248
249 void functionalRead(Addr addr, Packet *pkt) {
250 Entry cache_entry := getCacheEntry(addr);
251 if(is_valid(cache_entry)) {
252 testAndRead(addr, cache_entry.DataBlk, pkt);
253 } else {
254 TBE tbe := TBEs[addr];
255 if(is_valid(tbe)) {
256 testAndRead(addr, tbe.DataBlk, pkt);
257 } else {
258 error("Data block missing!");
259 }
260 }
261 }
262
263 int functionalWrite(Addr addr, Packet *pkt) {
264 int num_functional_writes := 0;
265
266 Entry cache_entry := getCacheEntry(addr);
267 if(is_valid(cache_entry)) {
268 num_functional_writes := num_functional_writes +
269 testAndWrite(addr, cache_entry.DataBlk, pkt);
270 return num_functional_writes;
271 }
272
273 TBE tbe := TBEs[addr];
274 if (is_valid(tbe)){
275 num_functional_writes := num_functional_writes +
276 testAndWrite(addr, tbe.DataBlk, pkt);
277 }
278 return num_functional_writes;
279 }
280
281 Event mandatory_request_type_to_event(RubyRequestType type) {
282 if (type == RubyRequestType:LD) {
283 return Event:Load;
284 } else if (type == RubyRequestType:IFETCH) {
285 return Event:Ifetch;
286 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
287 return Event:Store;
288 } else {
289 error("Invalid RubyRequestType");
290 }
291 }
292
293 // ** OUT_PORTS **
294
295 out_port(requestNetwork_out, RequestMsg, requestFromL1Cache);
296 out_port(responseNetwork_out, ResponseMsg, responseFromL1Cache);
297 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
298
299 // ** IN_PORTS **
300
301 // Use Timer
302 in_port(useTimerTable_in, Addr, useTimerTable, rank=4) {
303 if (useTimerTable_in.isReady(clockEdge())) {
304 Addr readyAddress := useTimerTable.nextAddress();
305 trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
306 TBEs.lookup(readyAddress));
307 }
308 }
309
310 // Trigger Queue
311 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
312 if (triggerQueue_in.isReady(clockEdge())) {
313 peek(triggerQueue_in, TriggerMsg) {
314 if (in_msg.Type == TriggerType:ALL_ACKS) {
315 trigger(Event:All_acks, in_msg.addr,
316 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
317 } else {
318 error("Unexpected message");
319 }
320 }
321 }
322 }
323
324 // Response Network
325 in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache, rank=2) {
326 if (responseToL1Cache_in.isReady(clockEdge())) {
327 peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
328 if (in_msg.Type == CoherenceResponseType:ACK) {
329 trigger(Event:Ack, in_msg.addr,
330 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
331 } else if (in_msg.Type == CoherenceResponseType:DATA) {
332 trigger(Event:Data, in_msg.addr,
333 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
334 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
335 trigger(Event:Exclusive_Data, in_msg.addr,
336 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
337 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
338 trigger(Event:Writeback_Ack, in_msg.addr,
339 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
340 } else if (in_msg.Type == CoherenceResponseType:WB_ACK_DATA) {
341 trigger(Event:Writeback_Ack_Data, in_msg.addr,
342 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
343 } else if (in_msg.Type == CoherenceResponseType:WB_NACK) {
344 trigger(Event:Writeback_Nack, in_msg.addr,
345 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
346 } else {
347 error("Unexpected message");
348 }
349 }
350 }
351 }
352
353
354 // Request Network
355 in_port(requestNetwork_in, RequestMsg, requestToL1Cache, rank=1) {
356 if (requestNetwork_in.isReady(clockEdge())) {
357 peek(requestNetwork_in, RequestMsg, block_on="addr") {
358 assert(in_msg.Destination.isElement(machineID));
359 DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
360
361 if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
362 if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
363 trigger(Event:Own_GETX, in_msg.addr,
364 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
365 } else {
366 trigger(Event:Fwd_GETX, in_msg.addr,
367 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
368 }
369 } else if (in_msg.Type == CoherenceRequestType:GETS) {
370 trigger(Event:Fwd_GETS, in_msg.addr,
371 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
372 } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
373 trigger(Event:Fwd_DMA, in_msg.addr,
374 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
375 } else if (in_msg.Type == CoherenceRequestType:INV) {
376 trigger(Event:Inv, in_msg.addr,
377 getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
378 } else {
379 error("Unexpected message");
380 }
381 }
382 }
383 }
384
385 // Mandatory Queue betweens Node's CPU and it's L1 caches
386 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, rank=0) {
387 if (mandatoryQueue_in.isReady(clockEdge())) {
388 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
389
390 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
391
392 if (in_msg.Type == RubyRequestType:IFETCH) {
393 // ** INSTRUCTION ACCESS ***
394
395 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
396 if (is_valid(L1Icache_entry)) {
397 // The tag matches for the L1, so the L1 asks the L2 for it.
398 trigger(mandatory_request_type_to_event(in_msg.Type),
399 in_msg.LineAddress, L1Icache_entry,
400 TBEs[in_msg.LineAddress]);
401 } else {
402
403 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
404 // Check to see if it is in the OTHER L1
405 if (is_valid(L1Dcache_entry)) {
406 // The block is in the wrong L1, put the request on the queue to the shared L2
407 trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
408 TBEs[in_msg.LineAddress]);
409 }
410 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
411 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
412 trigger(mandatory_request_type_to_event(in_msg.Type),
413 in_msg.LineAddress, L1Icache_entry,
414 TBEs[in_msg.LineAddress]);
415 } else {
416 // No room in the L1, so we need to make room in the L1
417 // Check if the line we want to evict is not locked
418 Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
419 check_on_cache_probe(mandatoryQueue_in, addr);
420 trigger(Event:L1_Replacement,
421 addr,
422 getL1ICacheEntry(addr),
423 TBEs[addr]);
424 }
425 }
426 } else {
427 // *** DATA ACCESS ***
428
429 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
430 if (is_valid(L1Dcache_entry)) {
431 // The tag matches for the L1, so the L1 ask the L2 for it
432 trigger(mandatory_request_type_to_event(in_msg.Type),
433 in_msg.LineAddress, L1Dcache_entry,
434 TBEs[in_msg.LineAddress]);
435 } else {
436
437 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
438 // Check to see if it is in the OTHER L1
439 if (is_valid(L1Icache_entry)) {
440 // The block is in the wrong L1, put the request on the queue to the shared L2
441 trigger(Event:L1_Replacement, in_msg.LineAddress,
442 L1Icache_entry, TBEs[in_msg.LineAddress]);
443 }
444 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
445 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
446 trigger(mandatory_request_type_to_event(in_msg.Type),
447 in_msg.LineAddress, L1Dcache_entry,
448 TBEs[in_msg.LineAddress]);
449 } else {
450 // No room in the L1, so we need to make room in the L1
451 // Check if the line we want to evict is not locked
452 Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
453 check_on_cache_probe(mandatoryQueue_in, addr);
454 trigger(Event:L1_Replacement,
455 addr,
456 getL1DCacheEntry(addr),
457 TBEs[addr]);
458 }
459 }
460 }
461 }
462 }
463 }
464
465
466 // ACTIONS
467
468 action(a_issueGETS, "a", desc="Issue GETS") {
469 peek(mandatoryQueue_in, RubyRequest) {
470 enqueue(requestNetwork_out, RequestMsg, request_latency) {
471 out_msg.addr := address;
472 out_msg.Type := CoherenceRequestType:GETS;
473 out_msg.Requestor := machineID;
474 out_msg.RequestorMachine := MachineType:L1Cache;
475 out_msg.Destination.add(mapAddressToMachine(address,
476 MachineType:L2Cache));
477 out_msg.MessageSize := MessageSizeType:Request_Control;
478 out_msg.AccessMode := in_msg.AccessMode;
479 out_msg.Prefetch := in_msg.Prefetch;
480 }
481 }
482 }
483
484 action(b_issueGETX, "b", desc="Issue GETX") {
485 peek(mandatoryQueue_in, RubyRequest) {
486 enqueue(requestNetwork_out, RequestMsg, request_latency) {
487 out_msg.addr := address;
488 out_msg.Type := CoherenceRequestType:GETX;
489 out_msg.Requestor := machineID;
490 out_msg.RequestorMachine := MachineType:L1Cache;
491 out_msg.Destination.add(mapAddressToMachine(address,
492 MachineType:L2Cache));
493 out_msg.MessageSize := MessageSizeType:Request_Control;
494 out_msg.AccessMode := in_msg.AccessMode;
495 out_msg.Prefetch := in_msg.Prefetch;
496 }
497 }
498 }
499
500 action(d_issuePUTX, "d", desc="Issue PUTX") {
501 enqueue(requestNetwork_out, RequestMsg, request_latency) {
502 out_msg.addr := address;
503 out_msg.Type := CoherenceRequestType:PUTX;
504 out_msg.Requestor := machineID;
505 out_msg.RequestorMachine := MachineType:L1Cache;
506 out_msg.Destination.add(mapAddressToMachine(address,
507 MachineType:L2Cache));
508 out_msg.MessageSize := MessageSizeType:Writeback_Control;
509 }
510 }
511
512 action(dd_issuePUTO, "\d", desc="Issue PUTO") {
513 enqueue(requestNetwork_out, RequestMsg, request_latency) {
514 out_msg.addr := address;
515 out_msg.Type := CoherenceRequestType:PUTO;
516 out_msg.Requestor := machineID;
517 out_msg.RequestorMachine := MachineType:L1Cache;
518 out_msg.Destination.add(mapAddressToMachine(address,
519 MachineType:L2Cache));
520 out_msg.MessageSize := MessageSizeType:Writeback_Control;
521 }
522 }
523
524 action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
525 enqueue(requestNetwork_out, RequestMsg, request_latency) {
526 out_msg.addr := address;
527 out_msg.Type := CoherenceRequestType:PUTS;
528 out_msg.Requestor := machineID;
529 out_msg.RequestorMachine := MachineType:L1Cache;
530 out_msg.Destination.add(mapAddressToMachine(address,
531 MachineType:L2Cache));
532 out_msg.MessageSize := MessageSizeType:Writeback_Control;
533 }
534 }
535
536 action(e_sendData, "e", desc="Send data from cache to requestor") {
537 peek(requestNetwork_in, RequestMsg) {
538 assert(is_valid(cache_entry));
539 if (in_msg.RequestorMachine == MachineType:L2Cache) {
540 enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
541 out_msg.addr := address;
542 out_msg.Type := CoherenceResponseType:DATA;
543 out_msg.Sender := machineID;
544 out_msg.SenderMachine := MachineType:L1Cache;
545 out_msg.Destination.add(mapAddressToMachine(address,
546 MachineType:L2Cache));
547 out_msg.DataBlk := cache_entry.DataBlk;
548 // out_msg.Dirty := cache_entry.Dirty;
549 out_msg.Dirty := false;
550 out_msg.Acks := in_msg.Acks;
551 out_msg.MessageSize := MessageSizeType:Response_Data;
552 }
553 DPRINTF(RubySlicc, "Sending data to L2: %#x\n", in_msg.addr);
554 }
555 else {
556 enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
557 out_msg.addr := address;
558 out_msg.Type := CoherenceResponseType:DATA;
559 out_msg.Sender := machineID;
560 out_msg.SenderMachine := MachineType:L1Cache;
561 out_msg.Destination.add(in_msg.Requestor);
562 out_msg.DataBlk := cache_entry.DataBlk;
563 // out_msg.Dirty := cache_entry.Dirty;
564 out_msg.Dirty := false;
565 out_msg.Acks := in_msg.Acks;
566 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
567 }
568 DPRINTF(RubySlicc, "Sending data to L1\n");
569 }
570 }
571 }
572
573 action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
574 peek(requestNetwork_in, RequestMsg) {
575 assert(is_valid(cache_entry));
576 if (in_msg.RequestorMachine == MachineType:L2Cache) {
577 enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
578 out_msg.addr := address;
579 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
580 out_msg.Sender := machineID;
581 out_msg.SenderMachine := MachineType:L1Cache;
582 out_msg.Destination.add(mapAddressToMachine(address,
583 MachineType:L2Cache));
584 out_msg.DataBlk := cache_entry.DataBlk;
585 out_msg.Dirty := cache_entry.Dirty;
586 out_msg.Acks := in_msg.Acks;
587 out_msg.MessageSize := MessageSizeType:Response_Data;
588 }
589 DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
590 }
591 else {
592 enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
593 out_msg.addr := address;
594 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
595 out_msg.Sender := machineID;
596 out_msg.SenderMachine := MachineType:L1Cache;
597 out_msg.Destination.add(in_msg.Requestor);
598 out_msg.DataBlk := cache_entry.DataBlk;
599 out_msg.Dirty := cache_entry.Dirty;
600 out_msg.Acks := in_msg.Acks;
601 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
602 }
603 DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
604 }
605 }
606 }
607
608 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
609 peek(requestNetwork_in, RequestMsg) {
610 if (in_msg.RequestorMachine == MachineType:L1Cache) {
611 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
612 out_msg.addr := address;
613 out_msg.Type := CoherenceResponseType:ACK;
614 out_msg.Sender := machineID;
615 out_msg.SenderMachine := MachineType:L1Cache;
616 out_msg.Destination.add(in_msg.Requestor);
617 out_msg.Acks := 0 - 1; // -1
618 out_msg.MessageSize := MessageSizeType:Response_Control;
619 }
620 }
621 else {
622 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
623 out_msg.addr := address;
624 out_msg.Type := CoherenceResponseType:ACK;
625 out_msg.Sender := machineID;
626 out_msg.SenderMachine := MachineType:L1Cache;
627 out_msg.Destination.add(mapAddressToMachine(address,
628 MachineType:L2Cache));
629 out_msg.Acks := 0 - 1; // -1
630 out_msg.MessageSize := MessageSizeType:Response_Control;
631 }
632 }
633 }
634 }
635
636 action(g_sendUnblock, "g", desc="Send unblock to memory") {
637 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
638 out_msg.addr := address;
639 out_msg.Type := CoherenceResponseType:UNBLOCK;
640 out_msg.Sender := machineID;
641 out_msg.SenderMachine := MachineType:L1Cache;
642 out_msg.Destination.add(mapAddressToMachine(address,
643 MachineType:L2Cache));
644 out_msg.MessageSize := MessageSizeType:Unblock_Control;
645 }
646 }
647
648 action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
649 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
650 out_msg.addr := address;
651 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
652 out_msg.Sender := machineID;
653 out_msg.SenderMachine := MachineType:L1Cache;
654 out_msg.Destination.add(mapAddressToMachine(address,
655 MachineType:L2Cache));
656 out_msg.MessageSize := MessageSizeType:Unblock_Control;
657 }
658 }
659
660 action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
661 assert(is_valid(cache_entry));
662 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
663 L1Dcache.setMRU(cache_entry);
664 sequencer.readCallback(address, cache_entry.DataBlk);
665 }
666
667 action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
668 assert(is_valid(cache_entry));
669 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
670 L1Icache.setMRU(cache_entry);
671 sequencer.readCallback(address, cache_entry.DataBlk);
672 }
673
674 action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
675 assert(is_valid(cache_entry));
676 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
677 L1Icache.setMRU(address);
678 L1Dcache.setMRU(address);
679 sequencer.readCallback(address, cache_entry.DataBlk, true);
680 }
681
682 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
683 assert(is_valid(cache_entry));
684 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
685 L1Dcache.setMRU(cache_entry);
686 sequencer.writeCallback(address, cache_entry.DataBlk);
687 cache_entry.Dirty := true;
688 }
689
690 action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
691 assert(is_valid(cache_entry));
692 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
693 L1Icache.setMRU(address);
694 L1Dcache.setMRU(address);
695 sequencer.writeCallback(address, cache_entry.DataBlk, true);
696 cache_entry.Dirty := true;
697 }
698
699 action(i_allocateTBE, "i", desc="Allocate TBE") {
700 check_allocate(TBEs);
701 TBEs.allocate(address);
702 set_tbe(TBEs[address]);
703 assert(is_valid(cache_entry));
704 tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
705 tbe.Dirty := cache_entry.Dirty;
706 }
707
708 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
709 triggerQueue_in.dequeue(clockEdge());
710 }
711
712 action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
713 useTimerTable.unset(address);
714 }
715
716 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
717 mandatoryQueue_in.dequeue(clockEdge());
718 }
719
720 action(l_popForwardQueue, "l", desc="Pop forwarded request queue.") {
721 requestNetwork_in.dequeue(clockEdge());
722 }
723
724 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
725 peek(responseToL1Cache_in, ResponseMsg) {
726 assert(is_valid(tbe));
727 DPRINTF(RubySlicc, "L1 decrementNumberOfMessages: %d\n", in_msg.Acks);
728 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
729 }
730 }
731
732 action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
733 peek(requestNetwork_in, RequestMsg) {
734 assert(is_valid(tbe));
735 tbe.NumPendingMsgs := tbe.NumPendingMsgs - in_msg.Acks;
736 }
737 }
738
739 action(n_popResponseQueue, "n", desc="Pop response queue") {
740 responseToL1Cache_in.dequeue(clockEdge());
741 }
742
743 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
744 assert(is_valid(tbe));
745 if (tbe.NumPendingMsgs == 0) {
746 enqueue(triggerQueue_out, TriggerMsg) {
747 out_msg.addr := address;
748 out_msg.Type := TriggerType:ALL_ACKS;
749 }
750 }
751 }
752
753 action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
754 useTimerTable.set(address,
755 clockEdge() + cyclesToTicks(use_timeout_latency));
756 }
757
758 action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
759 peek(requestNetwork_in, RequestMsg) {
760 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
761 out_msg.addr := address;
762 out_msg.Type := CoherenceResponseType:DMA_ACK;
763 out_msg.Sender := machineID;
764 out_msg.SenderMachine := MachineType:L1Cache;
765 out_msg.Destination.add(mapAddressToMachine(address,
766 MachineType:L2Cache));
767 out_msg.Dirty := false;
768 out_msg.Acks := 1;
769 out_msg.MessageSize := MessageSizeType:Response_Control;
770 }
771 }
772 }
773
774 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
775 peek(requestNetwork_in, RequestMsg) {
776 assert(is_valid(tbe));
777 if (in_msg.RequestorMachine == MachineType:L1Cache ||
778 in_msg.RequestorMachine == MachineType:DMA) {
779 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
780 out_msg.addr := address;
781 out_msg.Type := CoherenceResponseType:DATA;
782 out_msg.Sender := machineID;
783 out_msg.SenderMachine := MachineType:L1Cache;
784 out_msg.Destination.add(in_msg.Requestor);
785 out_msg.DataBlk := tbe.DataBlk;
786 // out_msg.Dirty := tbe.Dirty;
787 out_msg.Dirty := false;
788 out_msg.Acks := in_msg.Acks;
789 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
790 }
791 }
792 else {
793 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
794 out_msg.addr := address;
795 out_msg.Type := CoherenceResponseType:DATA;
796 out_msg.Sender := machineID;
797 out_msg.SenderMachine := MachineType:L1Cache;
798 out_msg.Destination.add(mapAddressToMachine(address,
799 MachineType:L2Cache));
800 out_msg.DataBlk := tbe.DataBlk;
801 // out_msg.Dirty := tbe.Dirty;
802 out_msg.Dirty := false;
803 out_msg.Acks := in_msg.Acks;
804 out_msg.MessageSize := MessageSizeType:Response_Data;
805 }
806 }
807 }
808 }
809
810 action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
811 peek(requestNetwork_in, RequestMsg) {
812 assert(is_valid(tbe));
813 if (in_msg.RequestorMachine == MachineType:L1Cache) {
814 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
815 out_msg.addr := address;
816 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
817 out_msg.Sender := machineID;
818 out_msg.SenderMachine := MachineType:L1Cache;
819 out_msg.Destination.add(in_msg.Requestor);
820 out_msg.DataBlk := tbe.DataBlk;
821 out_msg.Dirty := tbe.Dirty;
822 out_msg.Acks := in_msg.Acks;
823 out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
824 }
825 }
826 else {
827 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
828 out_msg.addr := address;
829 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
830 out_msg.Sender := machineID;
831 out_msg.SenderMachine := MachineType:L1Cache;
832 out_msg.Destination.add(mapAddressToMachine(address,
833 MachineType:L2Cache));
834 out_msg.DataBlk := tbe.DataBlk;
835 out_msg.Dirty := tbe.Dirty;
836 out_msg.Acks := in_msg.Acks;
837 out_msg.MessageSize := MessageSizeType:Response_Data;
838 }
839 }
840 }
841 }
842
843 // L2 will usually request data for a writeback
844 action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
845 enqueue(requestNetwork_out, RequestMsg, request_latency) {
846 assert(is_valid(tbe));
847 out_msg.addr := address;
848 out_msg.Requestor := machineID;
849 out_msg.RequestorMachine := MachineType:L1Cache;
850 out_msg.Destination.add(mapAddressToMachine(address,
851 MachineType:L2Cache));
852 if (tbe.Dirty) {
853 out_msg.Type := CoherenceRequestType:WRITEBACK_DIRTY_DATA;
854 } else {
855 out_msg.Type := CoherenceRequestType:WRITEBACK_CLEAN_DATA;
856 }
857 out_msg.DataBlk := tbe.DataBlk;
858 out_msg.MessageSize := MessageSizeType:Writeback_Data;
859 }
860 }
861
862 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
863 TBEs.deallocate(address);
864 unset_tbe();
865 }
866
867 action(u_writeDataToCache, "u", desc="Write data to cache") {
868 peek(responseToL1Cache_in, ResponseMsg) {
869 assert(is_valid(cache_entry));
870 cache_entry.DataBlk := in_msg.DataBlk;
871 cache_entry.Dirty := in_msg.Dirty;
872
873 if (in_msg.Type == CoherenceResponseType:DATA) {
874 //assert(in_msg.Dirty == false);
875 }
876 }
877 }
878
879 action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
880 if (L1Dcache.isTagPresent(address)) {
881 L1Dcache.deallocate(address);
882 } else {
883 L1Icache.deallocate(address);
884 }
885 unset_cache_entry();
886 }
887
888 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
889 if ((is_invalid(cache_entry))) {
890 set_cache_entry(L1Dcache.allocate(address, new Entry));
891 }
892 }
893
894 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
895 if ((is_invalid(cache_entry))) {
896 set_cache_entry(L1Icache.allocate(address, new Entry));
897 }
898 }
899
900 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
901 if (send_evictions) {
902 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
903 sequencer.evictionCallback(address);
904 }
905 }
906
907 action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
908 ++L1Icache.demand_misses;
909 }
910
911 action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
912 ++L1Icache.demand_hits;
913 }
914
915 action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
916 ++L1Dcache.demand_misses;
917 }
918
919 action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
920 ++L1Dcache.demand_hits;
921 }
922
923 action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
924 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
925 }
926
927 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
928 mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
929 }
930
931 //*****************************************************
932 // TRANSITIONS
933 //*****************************************************
934
935 // Transitions for Load/Store/L2_Replacement from transient states
936 transition({IM, SM, OM, IS, OI, SI, MI, II}, {Store, L1_Replacement}) {
937 zz_recycleMandatoryQueue;
938 }
939
940 transition({M_W, MM_W}, L1_Replacement) {
941 zz_recycleMandatoryQueue;
942 }
943
944 transition({M_W, MM_W}, {Fwd_GETS, Fwd_DMA, Fwd_GETX, Own_GETX, Inv}) {
945 z_recycleRequestQueue;
946 }
947
948 transition({IM, IS, OI, MI, SI, II}, {Load, Ifetch}) {
949 zz_recycleMandatoryQueue;
950 }
951
952 // Transitions from Idle
953 transition(I, Load, IS) {
954 ii_allocateL1DCacheBlock;
955 i_allocateTBE;
956 a_issueGETS;
957 uu_profileDataMiss;
958 k_popMandatoryQueue;
959 }
960
961 transition(I, Ifetch, IS) {
962 jj_allocateL1ICacheBlock;
963 i_allocateTBE;
964 a_issueGETS;
965 uu_profileInstMiss;
966 k_popMandatoryQueue;
967 }
968
969 transition(I, Store, IM) {
970 ii_allocateL1DCacheBlock;
971 i_allocateTBE;
972 b_issueGETX;
973 uu_profileDataMiss;
974 k_popMandatoryQueue;
975 }
976
977 transition(I, L1_Replacement) {
978 kk_deallocateL1CacheBlock;
979 }
980
981 transition(I, Inv) {
982 f_sendAck;
983 l_popForwardQueue;
984 }
985
986 transition({S, SM, O, OM, MM, MM_W, M, M_W}, Load) {
987 h_load_hit;
988 uu_profileDataHit;
989 k_popMandatoryQueue;
990 }
991
992 transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
993 h_ifetch_hit;
994 uu_profileInstHit;
995 k_popMandatoryQueue;
996 }
997
998 // Transitions from Shared
999 transition(S, Store, SM) {
1000 i_allocateTBE;
1001 b_issueGETX;
1002 uu_profileDataMiss;
1003 k_popMandatoryQueue;
1004 }
1005
1006 transition(S, L1_Replacement, SI) {
1007 i_allocateTBE;
1008 dd_issuePUTS;
1009 forward_eviction_to_cpu;
1010 kk_deallocateL1CacheBlock;
1011 }
1012
1013 transition(S, Inv, I) {
1014 f_sendAck;
1015 forward_eviction_to_cpu;
1016 l_popForwardQueue;
1017 }
1018
1019 transition(S, Fwd_GETS) {
1020 e_sendData;
1021 l_popForwardQueue;
1022 }
1023
1024 transition(S, Fwd_DMA) {
1025 e_sendData;
1026 ub_dmaUnblockL2Cache;
1027 l_popForwardQueue;
1028 }
1029
1030 // Transitions from Owned
1031 transition(O, Store, OM) {
1032 i_allocateTBE;
1033 b_issueGETX;
1034 uu_profileDataMiss;
1035 k_popMandatoryQueue;
1036 }
1037
1038 transition(O, L1_Replacement, OI) {
1039 i_allocateTBE;
1040 dd_issuePUTO;
1041 forward_eviction_to_cpu;
1042 kk_deallocateL1CacheBlock;
1043 }
1044
1045 transition(O, Fwd_GETX, I) {
1046 ee_sendDataExclusive;
1047 forward_eviction_to_cpu;
1048 l_popForwardQueue;
1049 }
1050
1051 transition(O, Fwd_GETS) {
1052 e_sendData;
1053 l_popForwardQueue;
1054 }
1055
1056 transition(O, Fwd_DMA) {
1057 e_sendData;
1058 ub_dmaUnblockL2Cache;
1059 l_popForwardQueue;
1060 }
1061
1062 // Transitions from MM
1063 transition({MM, MM_W}, Store) {
1064 hh_store_hit;
1065 uu_profileDataHit;
1066 k_popMandatoryQueue;
1067 }
1068
1069 transition(MM, L1_Replacement, MI) {
1070 i_allocateTBE;
1071 d_issuePUTX;
1072 forward_eviction_to_cpu;
1073 kk_deallocateL1CacheBlock;
1074 }
1075
1076 transition(MM, Fwd_GETX, I) {
1077 ee_sendDataExclusive;
1078 forward_eviction_to_cpu;
1079 l_popForwardQueue;
1080 }
1081
1082 transition(MM, Fwd_GETS, I) {
1083 ee_sendDataExclusive;
1084 forward_eviction_to_cpu;
1085 l_popForwardQueue;
1086 }
1087
1088 transition(MM, Fwd_DMA, MM) {
1089 e_sendData;
1090 ub_dmaUnblockL2Cache;
1091 l_popForwardQueue;
1092 }
1093
1094 // Transitions from M
1095 transition(M, Store, MM) {
1096 hh_store_hit;
1097 uu_profileDataHit;
1098 k_popMandatoryQueue;
1099 }
1100
1101 transition(M_W, Store, MM_W) {
1102 hh_store_hit;
1103 uu_profileDataHit;
1104 k_popMandatoryQueue;
1105 }
1106
1107 transition(M, L1_Replacement, MI) {
1108 i_allocateTBE;
1109 d_issuePUTX;
1110 forward_eviction_to_cpu;
1111 kk_deallocateL1CacheBlock;
1112 }
1113
1114 transition(M, Fwd_GETX, I) {
1115 // e_sendData;
1116 ee_sendDataExclusive;
1117 forward_eviction_to_cpu;
1118 l_popForwardQueue;
1119 }
1120
1121 transition(M, Fwd_GETS, O) {
1122 e_sendData;
1123 l_popForwardQueue;
1124 }
1125
1126 transition(M, Fwd_DMA) {
1127 e_sendData;
1128 ub_dmaUnblockL2Cache;
1129 l_popForwardQueue;
1130 }
1131
1132 // Transitions from IM
1133
1134 transition(IM, Inv) {
1135 f_sendAck;
1136 l_popForwardQueue;
1137 }
1138
1139 transition(IM, Ack) {
1140 m_decrementNumberOfMessages;
1141 o_checkForCompletion;
1142 n_popResponseQueue;
1143 }
1144
1145 transition(IM, {Exclusive_Data, Data}, OM) {
1146 u_writeDataToCache;
1147 m_decrementNumberOfMessages;
1148 o_checkForCompletion;
1149 n_popResponseQueue;
1150 }
1151
1152 // Transitions from SM
1153 transition(SM, Inv, IM) {
1154 f_sendAck;
1155 forward_eviction_to_cpu;
1156 l_popForwardQueue;
1157 }
1158
1159 transition(SM, Ack) {
1160 m_decrementNumberOfMessages;
1161 o_checkForCompletion;
1162 n_popResponseQueue;
1163 }
1164
1165 transition(SM, {Data, Exclusive_Data}, OM) {
1166 // v_writeDataToCacheVerify;
1167 m_decrementNumberOfMessages;
1168 o_checkForCompletion;
1169 n_popResponseQueue;
1170 }
1171
1172 transition(SM, Fwd_GETS) {
1173 e_sendData;
1174 l_popForwardQueue;
1175 }
1176
1177 transition(SM, Fwd_DMA) {
1178 e_sendData;
1179 ub_dmaUnblockL2Cache;
1180 l_popForwardQueue;
1181 }
1182
1183 // Transitions from OM
1184 transition(OM, Own_GETX) {
1185 mm_decrementNumberOfMessages;
1186 o_checkForCompletion;
1187 l_popForwardQueue;
1188 }
1189
1190
1191 // transition(OM, Fwd_GETX, OMF) {
1192 transition(OM, Fwd_GETX, IM) {
1193 ee_sendDataExclusive;
1194 l_popForwardQueue;
1195 }
1196
1197 transition(OM, Fwd_GETS) {
1198 e_sendData;
1199 l_popForwardQueue;
1200 }
1201
1202 transition(OM, Fwd_DMA) {
1203 e_sendData;
1204 ub_dmaUnblockL2Cache;
1205 l_popForwardQueue;
1206 }
1207
1208 //transition({OM, OMF}, Ack) {
1209 transition(OM, Ack) {
1210 m_decrementNumberOfMessages;
1211 o_checkForCompletion;
1212 n_popResponseQueue;
1213 }
1214
1215 transition(OM, All_acks, MM_W) {
1216 xx_store_hit;
1217 gg_sendUnblockExclusive;
1218 s_deallocateTBE;
1219 o_scheduleUseTimeout;
1220 j_popTriggerQueue;
1221 }
1222
1223 transition(MM_W, Use_Timeout, MM) {
1224 jj_unsetUseTimer;
1225 }
1226
1227 // Transitions from IS
1228
1229 transition(IS, Inv) {
1230 f_sendAck;
1231 l_popForwardQueue;
1232 }
1233
1234 transition(IS, Data, S) {
1235 u_writeDataToCache;
1236 m_decrementNumberOfMessages;
1237 hx_load_hit;
1238 g_sendUnblock;
1239 s_deallocateTBE;
1240 n_popResponseQueue;
1241 }
1242
1243 transition(IS, Exclusive_Data, M_W) {
1244 u_writeDataToCache;
1245 m_decrementNumberOfMessages;
1246 hx_load_hit;
1247 gg_sendUnblockExclusive;
1248 o_scheduleUseTimeout;
1249 s_deallocateTBE;
1250 n_popResponseQueue;
1251 }
1252
1253 transition(M_W, Use_Timeout, M) {
1254 jj_unsetUseTimer;
1255 }
1256
1257 // Transitions from OI/MI
1258
1259 transition(MI, Fwd_GETS, OI) {
1260 q_sendDataFromTBEToCache;
1261 l_popForwardQueue;
1262 }
1263
1264 transition(MI, Fwd_DMA) {
1265 q_sendDataFromTBEToCache;
1266 ub_dmaUnblockL2Cache;
1267 l_popForwardQueue;
1268 }
1269
1270 transition(MI, Fwd_GETX, II) {
1271 q_sendExclusiveDataFromTBEToCache;
1272 l_popForwardQueue;
1273 }
1274
1275 transition({SI, OI}, Fwd_GETS) {
1276 q_sendDataFromTBEToCache;
1277 l_popForwardQueue;
1278 }
1279
1280 transition({SI, OI}, Fwd_DMA) {
1281 q_sendDataFromTBEToCache;
1282 ub_dmaUnblockL2Cache;
1283 l_popForwardQueue;
1284 }
1285
1286 transition(OI, Fwd_GETX, II) {
1287 q_sendExclusiveDataFromTBEToCache;
1288 l_popForwardQueue;
1289 }
1290
1291 transition({SI, OI, MI}, Writeback_Ack_Data, I) {
1292 qq_sendWBDataFromTBEToL2; // always send data
1293 s_deallocateTBE;
1294 n_popResponseQueue;
1295 }
1296
1297 transition({SI, OI, MI}, Writeback_Ack, I) {
1298 g_sendUnblock;
1299 s_deallocateTBE;
1300 n_popResponseQueue;
1301 }
1302
1303 transition({MI, OI}, Writeback_Nack, OI) {
1304 // FIXME: This might cause deadlock by re-using the writeback
1305 // channel, we should handle this case differently.
1306 dd_issuePUTO;
1307 n_popResponseQueue;
1308 }
1309
1310 // Transitions from II
1311 transition(II, {Writeback_Ack, Writeback_Ack_Data}, I) {
1312 g_sendUnblock;
1313 s_deallocateTBE;
1314 n_popResponseQueue;
1315 }
1316
1317 // transition({II, SI}, Writeback_Nack, I) {
1318 transition(II, Writeback_Nack, I) {
1319 s_deallocateTBE;
1320 n_popResponseQueue;
1321 }
1322
1323 transition(SI, Writeback_Nack) {
1324 dd_issuePUTS;
1325 n_popResponseQueue;
1326 }
1327
1328 transition(II, Inv) {
1329 f_sendAck;
1330 l_popForwardQueue;
1331 }
1332
1333 transition(SI, Inv, II) {
1334 f_sendAck;
1335 l_popForwardQueue;
1336 }
1337 }