2 * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
5 * For use for simulation and test purposes only
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
36 machine(MachineType:L3Cache, "L3")
37 : CacheMemory * L3cache;
38 WireBuffer * reqToDir;
39 WireBuffer * respToDir;
40 WireBuffer * l3UnblockToDir;
42 WireBuffer * probeToL3;
43 WireBuffer * respToL3;
44 Cycles l3_request_latency := 1;
45 Cycles l3_response_latency := 35;
47 // To the general response network
48 MessageBuffer * responseFromL3, network="To", virtual_network="2", ordered="false", vnet_type="response";
50 // From the general response network
51 MessageBuffer * responseToL3, network="From", virtual_network="2", ordered="false", vnet_type="response";
55 enumeration(Event, desc="L3 Events") {
56 // Requests coming from the Cores
57 RdBlk, desc="CPU RdBlk event";
58 RdBlkM, desc="CPU RdBlkM event";
59 RdBlkS, desc="CPU RdBlkS event";
60 CtoD, desc="Change to Dirty request";
61 WrVicBlk, desc="L2 Victim (dirty)";
62 WrVicBlkShared, desc="L2 Victim (dirty)";
63 ClVicBlk, desc="L2 Victim (clean)";
64 ClVicBlkShared, desc="L2 Victim (clean)";
66 CPUData, desc="WB data from CPU";
67 CPUDataShared, desc="WB data from CPU, NBReqShared 1";
68 StaleWB, desc="WB stale; no data";
70 L3_Repl, desc="L3 Replacement";
73 PrbInvData, desc="Invalidating probe, return dirty data";
74 PrbInv, desc="Invalidating probe, no need to return data";
75 PrbShrData, desc="Downgrading probe, return data";
77 // Coming from Memory Controller
78 WBAck, desc="ack from memory";
80 CancelWB, desc="Cancel WB from L2";
85 state_declaration(State, desc="L3 State", default="L3Cache_State_I") {
86 M, AccessPermission:Read_Write, desc="Modified"; // No other cache has copy, memory stale
87 O, AccessPermission:Read_Only, desc="Owned"; // Correct most recent copy, others may exist in S
88 E, AccessPermission:Read_Write, desc="Exclusive"; // Correct, most recent, and only copy (and == Memory)
89 S, AccessPermission:Read_Only, desc="Shared"; // Correct, most recent. If no one in O, then == Memory
90 I, AccessPermission:Invalid, desc="Invalid";
92 I_M, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
93 I_O, AccessPermission:Busy, desc="Invalid, received WrVicBlk, sent Ack, waiting for Data";
94 I_E, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
95 I_S, AccessPermission:Busy, desc="Invalid, receive ClVicBlk, sent Ack, waiting for Data";
96 S_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to M";
97 S_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
98 S_E, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to E";
99 S_S, AccessPermission:Busy, desc="Shared, received ClVicBlk, sent Ack, waiting for Data, then go to S";
100 E_M, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
101 E_O, AccessPermission:Busy, desc="received WrVicBlkShared, sent Ack, waiting for Data, then go to O";
102 E_E, AccessPermission:Busy, desc="received WrVicBlk, sent Ack, waiting for Data, then go to O";
103 E_S, AccessPermission:Busy, desc="Shared, received WrVicBlk, sent Ack, waiting for Data";
104 O_M, AccessPermission:Busy, desc="...";
105 O_O, AccessPermission:Busy, desc="...";
106 O_E, AccessPermission:Busy, desc="...";
107 O_S, AccessPermission:Busy, desc="...";
108 M_M, AccessPermission:Busy, desc="...";
109 M_O, AccessPermission:Busy, desc="...";
110 M_E, AccessPermission:Busy, desc="...";
111 M_S, AccessPermission:Busy, desc="...";
112 D_I, AccessPermission:Invalid, desc="drop WB data on the floor when receive";
113 MOD_I, AccessPermission:Busy, desc="drop WB data on the floor, waiting for WBAck from Mem";
114 MO_I, AccessPermission:Busy, desc="M or O, received L3_Repl, waiting for WBAck from Mem";
115 I_I, AccessPermission:Busy, desc="I_MO received L3_Repl";
116 I_CD, AccessPermission:Busy, desc="I_I received WBAck, now just waiting for CPUData";
117 I_C, AccessPermission:Invalid, desc="sent cancel, just waiting to receive mem wb ack so nothing gets confused";
120 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
121 DataArrayRead, desc="Read the data array";
122 DataArrayWrite, desc="Write the data array";
123 TagArrayRead, desc="Read the data array";
124 TagArrayWrite, desc="Write the data array";
129 structure(Entry, desc="...", interface="AbstractCacheEntry") {
130 State CacheState, desc="cache state";
131 bool Dirty, desc="Is the data dirty (diff from memory?)";
132 DataBlock DataBlk, desc="Data for the block";
135 structure(TBE, desc="...") {
136 State TBEState, desc="Transient state";
137 DataBlock DataBlk, desc="data for the block";
138 bool Dirty, desc="Is the data dirty?";
139 bool Shared, desc="Victim hit by shared probe";
140 MachineID From, desc="Waiting for writeback from...";
143 structure(TBETable, external="yes") {
146 void deallocate(Addr);
147 bool isPresent(Addr);
150 TBETable TBEs, template="<L3Cache_TBE>", constructor="m_number_of_TBEs";
152 void set_cache_entry(AbstractCacheEntry b);
153 void unset_cache_entry();
156 void wakeUpAllBuffers();
157 void wakeUpBuffers(Addr a);
158 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
160 // FUNCTION DEFINITIONS
162 Tick cyclesToTicks(Cycles c);
164 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
165 return static_cast(Entry, "pointer", L3cache.lookup(addr));
168 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
169 return getCacheEntry(addr).DataBlk;
172 bool presentOrAvail(Addr addr) {
173 return L3cache.isTagPresent(addr) || L3cache.cacheAvail(addr);
176 State getState(TBE tbe, Entry cache_entry, Addr addr) {
179 } else if (is_valid(cache_entry)) {
180 return cache_entry.CacheState;
185 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
187 tbe.TBEState := state;
190 if (is_valid(cache_entry)) {
191 cache_entry.CacheState := state;
195 void functionalRead(Addr addr, Packet *pkt) {
196 TBE tbe := TBEs.lookup(addr);
198 testAndRead(addr, tbe.DataBlk, pkt);
200 functionalMemoryRead(pkt);
204 int functionalWrite(Addr addr, Packet *pkt) {
205 int num_functional_writes := 0;
207 TBE tbe := TBEs.lookup(addr);
209 num_functional_writes := num_functional_writes +
210 testAndWrite(addr, tbe.DataBlk, pkt);
213 num_functional_writes := num_functional_writes +
214 functionalMemoryWrite(pkt);
215 return num_functional_writes;
218 AccessPermission getAccessPermission(Addr addr) {
219 TBE tbe := TBEs.lookup(addr);
221 return L3Cache_State_to_permission(tbe.TBEState);
224 Entry cache_entry := getCacheEntry(addr);
225 if(is_valid(cache_entry)) {
226 return L3Cache_State_to_permission(cache_entry.CacheState);
229 return AccessPermission:NotPresent;
232 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
233 if (is_valid(cache_entry)) {
234 cache_entry.changePermission(L3Cache_State_to_permission(state));
238 void recordRequestType(RequestType request_type, Addr addr) {
242 bool checkResourceAvailable(RequestType request_type, Addr addr) {
248 out_port(requestNetwork_out, CPURequestMsg, reqToDir);
249 out_port(L3Resp_out, ResponseMsg, respToDir);
250 out_port(responseNetwork_out, ResponseMsg, responseFromL3);
251 out_port(unblockNetwork_out, UnblockMsg, l3UnblockToDir);
254 in_port(NBResponse_in, ResponseMsg, respToL3) {
255 if (NBResponse_in.isReady(clockEdge())) {
256 peek(NBResponse_in, ResponseMsg) {
257 Entry cache_entry := getCacheEntry(in_msg.addr);
258 TBE tbe := TBEs.lookup(in_msg.addr);
259 if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
260 trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
262 DPRINTF(RubySlicc, "%s\n", in_msg);
263 error("Error on NBResponse Type");
270 in_port(responseNetwork_in, ResponseMsg, responseToL3) {
271 if (responseNetwork_in.isReady(clockEdge())) {
272 peek(responseNetwork_in, ResponseMsg) {
273 Entry cache_entry := getCacheEntry(in_msg.addr);
274 TBE tbe := TBEs.lookup(in_msg.addr);
275 if (in_msg.Type == CoherenceResponseType:CPUData) {
276 if (in_msg.NbReqShared) {
277 trigger(Event:CPUDataShared, in_msg.addr, cache_entry, tbe);
279 trigger(Event:CPUData, in_msg.addr, cache_entry, tbe);
281 } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
282 trigger(Event:StaleWB, in_msg.addr, cache_entry, tbe);
284 DPRINTF(RubySlicc, "%s\n", in_msg);
285 error("Error on NBResponse Type");
292 in_port(probeNetwork_in, NBProbeRequestMsg, probeToL3) {
293 if (probeNetwork_in.isReady(clockEdge())) {
294 peek(probeNetwork_in, NBProbeRequestMsg) {
295 Entry cache_entry := getCacheEntry(in_msg.addr);
296 TBE tbe := TBEs.lookup(in_msg.addr);
297 if (in_msg.Type == ProbeRequestType:PrbInv) {
298 if (in_msg.ReturnData) {
299 trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
301 trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
303 } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
304 if (in_msg.ReturnData) {
305 trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
307 error("Don't think I should get any of these");
315 in_port(requestNetwork_in, CPURequestMsg, reqToL3) {
316 if (requestNetwork_in.isReady(clockEdge())) {
317 peek(requestNetwork_in, CPURequestMsg) {
318 assert(in_msg.Destination.isElement(machineID));
319 Entry cache_entry := getCacheEntry(in_msg.addr);
320 TBE tbe := TBEs.lookup(in_msg.addr);
321 if (in_msg.Type == CoherenceRequestType:RdBlk) {
322 trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
323 } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
324 trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
325 } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
326 trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
327 } else if (in_msg.Type == CoherenceRequestType:VicClean) {
328 if (presentOrAvail(in_msg.addr)) {
330 trigger(Event:ClVicBlkShared, in_msg.addr, cache_entry, tbe);
332 trigger(Event:ClVicBlk, in_msg.addr, cache_entry, tbe);
335 Addr victim := L3cache.cacheProbe(in_msg.addr);
336 trigger(Event:L3_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
338 } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
339 if (presentOrAvail(in_msg.addr)) {
341 trigger(Event:WrVicBlkShared, in_msg.addr, cache_entry, tbe);
343 trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
346 Addr victim := L3cache.cacheProbe(in_msg.addr);
347 trigger(Event:L3_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
349 } else if (in_msg.Type == CoherenceRequestType:WrCancel) {
350 if (is_valid(tbe) && tbe.From == in_msg.Requestor) {
351 trigger(Event:CancelWB, in_msg.addr, cache_entry, tbe);
353 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
362 action(i_invL3, "i", desc="invalidate L3 cache block") {
363 if (is_valid(cache_entry)) {
364 L3cache.deallocate(address);
369 action(rm_sendResponseM, "rm", desc="send Modified response") {
370 peek(requestNetwork_in, CPURequestMsg) {
371 enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
372 out_msg.addr := address;
373 out_msg.Type := CoherenceResponseType:NBSysResp;
374 out_msg.Sender := machineID;
375 out_msg.Destination.add(in_msg.Requestor);
376 out_msg.DataBlk := cache_entry.DataBlk;
377 out_msg.MessageSize := MessageSizeType:Response_Data;
378 out_msg.Dirty := cache_entry.Dirty;
379 out_msg.State := CoherenceState:Modified;
380 DPRINTF(RubySlicc, "%s\n", out_msg);
385 action(rs_sendResponseS, "rs", desc="send Shared response") {
386 peek(requestNetwork_in, CPURequestMsg) {
387 enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
388 out_msg.addr := address;
389 out_msg.Type := CoherenceResponseType:NBSysResp;
390 out_msg.Sender := machineID;
391 out_msg.Destination.add(in_msg.Requestor);
392 out_msg.DataBlk := cache_entry.DataBlk;
393 out_msg.MessageSize := MessageSizeType:Response_Data;
394 out_msg.Dirty := cache_entry.Dirty;
395 out_msg.State := CoherenceState:Shared;
396 DPRINTF(RubySlicc, "%s\n", out_msg);
402 action(r_requestToMem, "r", desc="Miss in L3, pass on") {
403 peek(requestNetwork_in, CPURequestMsg) {
404 enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
405 out_msg.addr := address;
406 out_msg.Type := in_msg.Type;
407 out_msg.Requestor := in_msg.Requestor;
408 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
409 out_msg.Shared := false; // unneeded for this request
410 out_msg.MessageSize := in_msg.MessageSize;
411 DPRINTF(RubySlicc, "%s\n", out_msg);
416 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
417 TBEs.allocate(address);
418 set_tbe(TBEs.lookup(address));
419 if (is_valid(cache_entry)) {
420 tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
421 tbe.Dirty := cache_entry.Dirty;
423 tbe.From := machineID;
426 action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
427 TBEs.deallocate(address);
431 action(vd_vicDirty, "vd", desc="Victimize dirty L3 data") {
432 enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
433 out_msg.addr := address;
434 out_msg.Type := CoherenceRequestType:VicDirty;
435 out_msg.Requestor := machineID;
436 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
437 out_msg.MessageSize := MessageSizeType:Request_Control;
441 action(w_sendResponseWBAck, "w", desc="send WB Ack") {
442 peek(requestNetwork_in, CPURequestMsg) {
443 enqueue(responseNetwork_out, ResponseMsg, l3_response_latency) {
444 out_msg.addr := address;
445 out_msg.Type := CoherenceResponseType:NBSysWBAck;
446 out_msg.Destination.add(in_msg.Requestor);
447 out_msg.Sender := machineID;
448 out_msg.MessageSize := MessageSizeType:Writeback_Control;
453 action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
454 enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
455 out_msg.addr := address;
456 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
457 out_msg.Sender := machineID;
458 // will this always be ok? probably not for multisocket
459 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
460 out_msg.Dirty := false;
461 out_msg.Hit := false;
462 out_msg.Ntsl := true;
463 out_msg.State := CoherenceState:NA;
464 out_msg.MessageSize := MessageSizeType:Response_Control;
468 action(ph_sendProbeResponseHit, "ph", desc="send probe ack, no data") {
469 enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
470 out_msg.addr := address;
471 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
472 out_msg.Sender := machineID;
473 // will this always be ok? probably not for multisocket
474 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
475 out_msg.Dirty := false;
477 out_msg.Ntsl := false;
478 out_msg.State := CoherenceState:NA;
479 out_msg.MessageSize := MessageSizeType:Response_Control;
483 action(pm_sendProbeResponseMiss, "pm", desc="send probe ack, no data") {
484 enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
485 out_msg.addr := address;
486 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
487 out_msg.Sender := machineID;
488 // will this always be ok? probably not for multisocket
489 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
490 out_msg.Dirty := false;
491 out_msg.Hit := false;
492 out_msg.Ntsl := false;
493 out_msg.State := CoherenceState:NA;
494 out_msg.MessageSize := MessageSizeType:Response_Control;
498 action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
499 enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
500 out_msg.addr := address;
501 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
502 out_msg.Sender := machineID;
503 // will this always be ok? probably not for multisocket
504 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
505 out_msg.DataBlk := cache_entry.DataBlk;
506 assert(cache_entry.Dirty);
507 out_msg.Dirty := true;
509 out_msg.State := CoherenceState:NA;
510 out_msg.MessageSize := MessageSizeType:Response_Data;
514 action(pdt_sendProbeResponseDataFromTBE, "pdt", desc="send probe ack with data") {
515 enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
516 out_msg.addr := address;
517 out_msg.Type := CoherenceResponseType:CPUPrbResp;
518 out_msg.Sender := machineID;
519 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
520 out_msg.DataBlk := tbe.DataBlk;
522 out_msg.Dirty := true;
524 out_msg.MessageSize := MessageSizeType:Response_Data;
525 out_msg.State := CoherenceState:NA;
526 DPRINTF(RubySlicc, "%s\n", out_msg);
530 action(mc_cancelMemWriteback, "mc", desc="send writeback cancel to memory") {
531 enqueue(requestNetwork_out, CPURequestMsg, l3_request_latency) {
532 out_msg.addr := address;
533 out_msg.Type := CoherenceRequestType:WrCancel;
534 out_msg.Requestor := machineID;
535 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
536 out_msg.MessageSize := MessageSizeType:Request_Control;
540 action(a_allocateBlock, "a", desc="allocate L3 block") {
541 if (is_invalid(cache_entry)) {
542 set_cache_entry(L3cache.allocate(address, new Entry));
546 action(d_writeData, "d", desc="write data to L3") {
547 peek(responseNetwork_in, ResponseMsg) {
549 cache_entry.Dirty := in_msg.Dirty;
551 cache_entry.DataBlk := in_msg.DataBlk;
552 DPRINTF(RubySlicc, "Writing to L3: %s\n", in_msg);
556 action(rd_copyDataFromRequest, "rd", desc="write data to L3") {
557 peek(requestNetwork_in, CPURequestMsg) {
558 cache_entry.DataBlk := in_msg.DataBlk;
559 cache_entry.Dirty := true;
563 action(f_setFrom, "f", desc="set who WB is expected to come from") {
564 peek(requestNetwork_in, CPURequestMsg) {
565 tbe.From := in_msg.Requestor;
569 action(rf_resetFrom, "rf", desc="reset From") {
570 tbe.From := machineID;
573 action(wb_data, "wb", desc="write back data") {
574 enqueue(L3Resp_out, ResponseMsg, l3_request_latency) {
575 out_msg.addr := address;
576 out_msg.Type := CoherenceResponseType:CPUData;
577 out_msg.Sender := machineID;
578 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
579 out_msg.DataBlk := tbe.DataBlk;
580 out_msg.Dirty := tbe.Dirty;
582 out_msg.NbReqShared := true;
584 out_msg.NbReqShared := false;
586 out_msg.State := CoherenceState:Shared; // faux info
587 out_msg.MessageSize := MessageSizeType:Writeback_Data;
588 DPRINTF(RubySlicc, "%s\n", out_msg);
592 action(wt_writeDataToTBE, "wt", desc="write WB data to TBE") {
593 peek(responseNetwork_in, ResponseMsg) {
594 tbe.DataBlk := in_msg.DataBlk;
595 tbe.Dirty := in_msg.Dirty;
599 action(uu_sendUnblock, "uu", desc="state changed, unblock") {
600 enqueue(unblockNetwork_out, UnblockMsg, l3_request_latency) {
601 out_msg.addr := address;
602 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
603 out_msg.MessageSize := MessageSizeType:Unblock_Control;
604 DPRINTF(RubySlicc, "%s\n", out_msg);
608 action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
609 L3cache.setMRU(address);
612 action(p_popRequestQueue, "p", desc="pop request queue") {
613 requestNetwork_in.dequeue(clockEdge());
616 action(pr_popResponseQueue, "pr", desc="pop response queue") {
617 responseNetwork_in.dequeue(clockEdge());
620 action(pn_popNBResponseQueue, "pn", desc="pop NB response queue") {
621 NBResponse_in.dequeue(clockEdge());
624 action(pp_popProbeQueue, "pp", desc="pop probe queue") {
625 probeNetwork_in.dequeue(clockEdge());
628 action(zz_recycleRequestQueue, "\z", desc="recycle request queue") {
629 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
637 // transitions from base
639 transition({I, I_C}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {TagArrayRead} {
644 transition(O, RdBlk ) {TagArrayRead, DataArrayRead} {
649 transition(M, RdBlk, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
655 transition(S, RdBlk) {TagArrayRead, DataArrayRead} {
660 transition(E, RdBlk, S) {TagArrayRead, DataArrayRead, TagArrayWrite} {
666 transition({M, O}, RdBlkS, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
672 transition({E, S}, RdBlkS, S) {TagArrayRead, DataArrayRead, TagArrayWrite} {
678 transition(M, RdBlkM, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
684 transition({O, S}, {RdBlkM, CtoD}) {TagArrayRead} {
685 r_requestToMem; // can't handle this, just forward
689 transition(E, RdBlkM, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
695 transition({I}, WrVicBlk, I_M) {TagArrayRead, TagArrayWrite} {
699 // rd_copyDataFromRequest;
704 transition(I_C, {WrVicBlk, WrVicBlkShared, ClVicBlk, ClVicBlkShared}) {} {
705 zz_recycleRequestQueue;
708 transition({I}, WrVicBlkShared, I_O) {TagArrayRead, TagArrayWrite} {
712 // rd_copyDataFromRequest;
717 transition(S, WrVicBlkShared, S_O) {TagArrayRead, TagArrayWrite} {
718 // rd_copyDataFromRequest;
725 transition(S, WrVicBlk, S_M) {TagArrayRead, TagArrayWrite} { // should be technically not possible, but assume the data comes back with shared bit flipped
726 // rd_copyDataFromRequest;
733 transition(E, WrVicBlk, E_M) {TagArrayRead, TagArrayWrite} {
740 transition(E, WrVicBlkShared, E_O) {TagArrayRead, TagArrayWrite} {
747 transition(O, WrVicBlk, O_M) {TagArrayRead, TagArrayWrite} {
754 transition(O, WrVicBlkShared, O_O) {TagArrayRead, TagArrayWrite} {
761 transition(M, WrVicBlk, M_M) {TagArrayRead, TagArrayWrite} {
768 transition(M, WrVicBlkShared, M_O) {TagArrayRead, TagArrayWrite} {
775 transition({I}, ClVicBlk, I_E) {TagArrayRead, TagArrayWrite} {
783 transition({I}, ClVicBlkShared, I_S) {TagArrayRead, TagArrayWrite} {
791 transition(S, ClVicBlk, S_E) {TagArrayRead, TagArrayWrite} { // technically impossible, assume data comes back with shared bit flipped
798 transition(S, ClVicBlkShared, S_S) {TagArrayRead, TagArrayWrite} {
805 transition(E, ClVicBlk, E_E) {TagArrayRead, TagArrayWrite} {
812 transition(E, ClVicBlkShared, E_S) {TagArrayRead, TagArrayWrite} {
819 transition(O, ClVicBlk, O_E) {TagArrayRead, TagArrayWrite} { // technically impossible, but assume data comes back with shared bit flipped
826 transition(O, ClVicBlkShared, O_S) {TagArrayRead, TagArrayWrite} {
833 transition(M, ClVicBlk, M_E) {TagArrayRead, TagArrayWrite} {
840 transition(M, ClVicBlkShared, M_S) {TagArrayRead, TagArrayWrite} {
847 transition({MO_I}, {RdBlk, RdBlkS, RdBlkM, CtoD}) {} {
852 transition(MO_I, {WrVicBlkShared, WrVicBlk, ClVicBlk, ClVicBlkShared}, MOD_I) {TagArrayWrite} {
858 transition(I_M, CPUData, M) {DataArrayWrite, TagArrayWrite} {
865 transition(I_M, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
872 transition(I_O, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
879 transition(I_E, CPUData, E) {DataArrayWrite, TagArrayWrite} {
886 transition(I_E, CPUDataShared, S) {DataArrayWrite, TagArrayWrite} {
893 transition(I_S, {CPUData, CPUDataShared}, S) {DataArrayWrite, TagArrayWrite} {
900 transition(S_M, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
904 ut_updateTag; // update tag on writeback hits.
908 transition(S_O, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
912 ut_updateTag; // update tag on writeback hits.
916 transition(S_E, CPUDataShared, S) {DataArrayWrite, TagArrayWrite} {
920 ut_updateTag; // update tag on writeback hits.
924 transition(S_S, {CPUData, CPUDataShared}, S) {DataArrayWrite, TagArrayWrite} {
928 ut_updateTag; // update tag on writeback hits.
932 transition(O_E, CPUDataShared, O) {DataArrayWrite, TagArrayWrite} {
936 ut_updateTag; // update tag on writeback hits.
940 transition(O_S, {CPUData, CPUDataShared}, O) {DataArrayWrite, TagArrayWrite} {
944 ut_updateTag; // update tag on writeback hits.
948 transition({D_I}, {CPUData, CPUDataShared}, I) {TagArrayWrite} {
954 transition(MOD_I, {CPUData, CPUDataShared}, MO_I) {TagArrayWrite} {
960 transition(I_I, {CPUData, CPUDataShared}, MO_I) {TagArrayWrite, DataArrayRead} {
967 transition(I_CD, {CPUData, CPUDataShared}, I) {DataArrayRead, TagArrayWrite} {
975 transition({M, O}, L3_Repl, MO_I) {TagArrayRead, TagArrayWrite} {
981 transition({E, S,}, L3_Repl, I) {TagArrayRead, TagArrayWrite} {
985 transition({I_M, I_O, S_M, S_O, E_M, E_O}, L3_Repl) {} {
986 zz_recycleRequestQueue;
989 transition({O_M, O_O, O_E, O_S, M_M, M_O, M_E, M_S}, L3_Repl) {} {
990 zz_recycleRequestQueue;
993 transition({I_E, I_S, S_E, S_S, E_E, E_S}, L3_Repl) {} {
994 zz_recycleRequestQueue;
997 transition({M, O}, PrbInvData, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
998 pd_sendProbeResponseData;
1003 transition({E, S, I}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
1004 pi_sendProbeResponseInv;
1005 i_invL3; // nothing will happen in I
1009 transition({M, O, E, S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
1010 pi_sendProbeResponseInv;
1011 i_invL3; // nothing will happen in I
1015 transition({M, O}, PrbShrData, O) {TagArrayRead, DataArrayRead, TagArrayWrite} {
1016 pd_sendProbeResponseData;
1020 transition({E, S}, PrbShrData, S) {TagArrayRead, TagArrayWrite} {
1021 ph_sendProbeResponseHit;
1025 transition(I, PrbShrData) {TagArrayRead} {
1026 pm_sendProbeResponseMiss;
1030 transition(MO_I, PrbInvData, I_C) {TagArrayWrite, DataArrayRead} {
1031 pdt_sendProbeResponseDataFromTBE;
1032 mc_cancelMemWriteback;
1036 transition(MO_I, PrbInv, I_C) {TagArrayWrite} {
1037 pi_sendProbeResponseInv;
1038 mc_cancelMemWriteback;
1042 transition(MO_I, PrbShrData) {DataArrayRead} {
1043 pdt_sendProbeResponseDataFromTBE;
1047 transition(I_C, {PrbInvData, PrbInv}) {} {
1048 pi_sendProbeResponseInv;
1052 transition(I_C, PrbShrData) {} {
1053 pm_sendProbeResponseMiss;
1057 transition(I_I, {WBAck}, I_CD) {TagArrayWrite} {
1058 pn_popNBResponseQueue;
1061 transition(MOD_I, WBAck, D_I) {DataArrayRead} {
1063 pn_popNBResponseQueue;
1066 transition(MO_I, WBAck, I) {DataArrayRead, TagArrayWrite} {
1069 pn_popNBResponseQueue;
1072 transition(I_C, {WBAck}, I) {TagArrayWrite} {
1074 pn_popNBResponseQueue;
1077 transition({I_M, I_O, I_E, I_S}, CancelWB, I) {TagArrayWrite} {
1084 transition({S_S, S_O, S_M, S_E}, CancelWB, S) {TagArrayWrite} {
1090 transition({E_M, E_O, E_E, E_S}, CancelWB, E) {TagArrayWrite} {
1096 transition({O_M, O_O, O_E, O_S}, CancelWB, O) {TagArrayWrite} {
1102 transition({M_M, M_O, M_E, M_S}, CancelWB, M) {TagArrayWrite} {
1108 transition(D_I, CancelWB, I) {TagArrayWrite} {
1114 transition(MOD_I, CancelWB, MO_I) {TagArrayWrite} {
1120 transition(I_I, CancelWB, I_C) {TagArrayWrite} {
1123 mc_cancelMemWriteback;
1127 transition(I_CD, CancelWB, I) {TagArrayWrite} {
1130 mc_cancelMemWriteback;