2 * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
5 * For use for simulation and test purposes only
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
36 machine(MachineType:Directory, "AMD_Base-like protocol")
37 : DirectoryMemory * directory;
38 CacheMemory * L3CacheMemory;
39 Cycles response_latency := 5;
40 Cycles response_latency_regionDir := 1;
41 Cycles l3_hit_latency := 30;
42 bool useL3OnWT := "False";
43 Cycles to_memory_controller_latency := 1;
46 MessageBuffer * requestFromCores, network="From", virtual_network="0", vnet_type="request";
47 MessageBuffer * responseFromCores, network="From", virtual_network="2", vnet_type="response";
48 MessageBuffer * unblockFromCores, network="From", virtual_network="4", vnet_type="unblock";
51 MessageBuffer * probeToCore, network="To", virtual_network="0", vnet_type="request";
52 MessageBuffer * responseToCore, network="To", virtual_network="2", vnet_type="response";
55 MessageBuffer * reqFromRegBuf, network="From", virtual_network="7", vnet_type="request";
57 // To Region directory
58 MessageBuffer * reqToRegDir, network="To", virtual_network="5", vnet_type="request";
59 MessageBuffer * reqFromRegDir, network="From", virtual_network="5", vnet_type="request";
60 MessageBuffer * unblockToRegDir, network="To", virtual_network="4", vnet_type="unblock";
62 MessageBuffer * triggerQueue;
63 MessageBuffer * L3triggerQueue;
64 MessageBuffer * responseFromMemory;
67 state_declaration(State, desc="Directory states", default="Directory_State_U") {
68 U, AccessPermission:Backing_Store, desc="unblocked";
69 BR, AccessPermission:Backing_Store, desc="got CPU read request, blocked while sent to L3";
70 BW, AccessPermission:Backing_Store, desc="got CPU write request, blocked while sent to L3";
71 BL, AccessPermission:Busy, desc="got L3 WB request";
72 // BL is Busy because it's possible for the data only to be in the network
73 // in the WB, L3 has sent it and gone on with its business in possibly I
75 BI, AccessPermission:Backing_Store, desc="Blocked waiting for inv ack from core";
76 BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
77 BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
78 B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory";
79 BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
80 BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
81 BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
82 B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory";
83 BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
84 BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
85 B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
86 B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack";
88 // These are needed for when a private requests was issued before an inv was received
90 BS_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
91 BM_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
92 B_Pm_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
93 BP_BL, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
95 BS_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
96 BM_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
97 B_Pm_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory";
98 BP_B, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory";
102 enumeration(Event, desc="Directory events") {
107 WriteThrough, desc="WriteThrough Message";
108 Atomic, desc="Atomic Message";
113 VicDirtyP, desc="...";
114 VicCleanP, desc="...";
115 WriteThroughP, desc="WriteThrough Message";
116 AtomicP, desc="Atomic Message";
119 VicDirty, desc="...";
120 VicClean, desc="...";
121 CPUData, desc="WB data from CPU";
122 StaleWB, desc="WB response for a no longer valid request";
125 CPUPrbResp, desc="Probe Response Msg";
126 LastCPUPrbResp, desc="Last Probe Response Msg";
128 ProbeAcksComplete, desc="Probe Acks Complete";
130 L3Hit, desc="Hit in L3 return data to core";
133 MemData, desc="Fetched data from memory arrives";
134 WBAck, desc="Writeback Ack from memory arrives";
136 CoreUnblock, desc="Core received data, unblock";
137 UnblockWriteThrough, desc="unblock, self triggered";
139 StaleVicDirty, desc="Core invalidated before VicDirty processed";
140 StaleVicDirtyP, desc="Core invalidated before VicDirty processed";
142 // For region protocol
143 CPUReq, desc="Generic CPU request";
144 Inv, desc="Region dir needs a block invalidated";
145 Downgrade, desc="Region dir needs a block downgraded";
147 // For private accesses (bypassed reg-dir)
148 CPUReadP, desc="Initial req from core, sent to L3";
149 CPUWriteP, desc="Initial req from core, sent to L3";
152 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
153 L3DataArrayRead, desc="Read the data array";
154 L3DataArrayWrite, desc="Write the data array";
155 L3TagArrayRead, desc="Read the data array";
156 L3TagArrayWrite, desc="Write the data array";
162 structure(Entry, desc="...", interface="AbstractEntry") {
163 State DirectoryState, desc="Directory state";
164 DataBlock DataBlk, desc="data for the block";
165 NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore";
168 structure(CacheEntry, desc="...", interface="AbstractCacheEntry") {
169 DataBlock DataBlk, desc="data for the block";
170 MachineID LastSender, desc="Mach which this block came from";
173 structure(TBE, desc="...") {
174 State TBEState, desc="Transient state";
175 DataBlock DataBlk, desc="data for the block";
176 DataBlock DataBlkAux, desc="Auxiliary data for the block";
177 bool Dirty, desc="Is the data dirty?";
178 int NumPendingAcks, desc="num acks expected";
179 MachineID OriginalRequestor, desc="Original Requestor";
180 MachineID WTRequestor, desc="WT Requestor";
181 bool Cached, desc="data hit in Cache";
182 bool MemData, desc="Got MemData?",default="false";
183 bool wtData, desc="Got write through data?",default="false";
184 bool atomicData, desc="Got Atomic op?",default="false";
185 Cycles InitialRequestTime, desc="...";
186 Cycles ForwardRequestTime, desc="...";
187 Cycles ProbeRequestStartTime, desc="...";
188 bool DemandRequest, desc="for profiling";
189 MachineID LastSender, desc="Mach which this block came from";
190 bool L3Hit, default="false", desc="Was this an L3 hit?";
191 bool TriggeredAcksComplete, default="false", desc="True if already triggered acks complete";
192 WriteMask writeMask, desc="outstanding write through mask";
195 structure(TBETable, external="yes") {
198 void deallocate(Addr);
199 bool isPresent(Addr);
202 TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
205 Tick cyclesToTicks(Cycles c);
209 void wakeUpAllBuffers();
210 void wakeUpBuffers(Addr a);
213 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
215 Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
216 Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
218 if (is_valid(dir_entry)) {
219 //DPRINTF(RubySlicc, "Getting entry %s: %s\n", addr, dir_entry.DataBlk);
223 dir_entry := static_cast(Entry, "pointer",
224 directory.allocate(addr, new Entry));
228 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
229 TBE tbe := TBEs.lookup(addr);
230 if (is_valid(tbe) && tbe.MemData) {
231 DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe);
234 DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr));
235 return getDirectoryEntry(addr).DataBlk;
238 State getState(TBE tbe, CacheEntry entry, Addr addr) {
239 return getDirectoryEntry(addr).DirectoryState;
242 State getStateFromAddr(Addr addr) {
243 return getDirectoryEntry(addr).DirectoryState;
246 void setState(TBE tbe, CacheEntry entry, Addr addr, State state) {
247 getDirectoryEntry(addr).DirectoryState := state;
250 AccessPermission getAccessPermission(Addr addr) {
251 // For this Directory, all permissions are just tracked in Directory, since
252 // it's not possible to have something in TBE but not Dir, just keep track
253 // of state all in one place.
254 if(directory.isPresent(addr)) {
255 return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
258 return AccessPermission:NotPresent;
261 void functionalRead(Addr addr, Packet *pkt) {
262 TBE tbe := TBEs.lookup(addr);
264 testAndRead(addr, tbe.DataBlk, pkt);
266 functionalMemoryRead(pkt);
270 int functionalWrite(Addr addr, Packet *pkt) {
271 int num_functional_writes := 0;
273 TBE tbe := TBEs.lookup(addr);
275 num_functional_writes := num_functional_writes +
276 testAndWrite(addr, tbe.DataBlk, pkt);
279 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
280 return num_functional_writes;
283 void setAccessPermission(CacheEntry entry, Addr addr, State state) {
284 getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
287 void recordRequestType(RequestType request_type, Addr addr) {
288 if (request_type == RequestType:L3DataArrayRead) {
289 L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
290 } else if (request_type == RequestType:L3DataArrayWrite) {
291 L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
292 } else if (request_type == RequestType:L3TagArrayRead) {
293 L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
294 } else if (request_type == RequestType:L3TagArrayWrite) {
295 L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
299 bool checkResourceAvailable(RequestType request_type, Addr addr) {
300 if (request_type == RequestType:L3DataArrayRead) {
301 return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
302 } else if (request_type == RequestType:L3DataArrayWrite) {
303 return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
304 } else if (request_type == RequestType:L3TagArrayRead) {
305 return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
306 } else if (request_type == RequestType:L3TagArrayWrite) {
307 return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
309 error("Invalid RequestType type in checkResourceAvailable");
315 out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore);
316 out_port(responseNetwork_out, ResponseMsg, responseToCore);
318 out_port(requestNetworkReg_out, CPURequestMsg, reqToRegDir);
319 out_port(regAckNetwork_out, UnblockMsg, unblockToRegDir);
321 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
322 out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue);
327 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=7) {
328 if (triggerQueue_in.isReady(clockEdge())) {
329 peek(triggerQueue_in, TriggerMsg) {
330 TBE tbe := TBEs.lookup(in_msg.addr);
331 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
332 if (in_msg.Type == TriggerType:AcksComplete) {
333 trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe);
334 } else if (in_msg.Type == TriggerType:UnblockWriteThrough) {
335 trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe);
337 error("Unknown trigger msg");
343 in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=6) {
344 if (L3TriggerQueue_in.isReady(clockEdge())) {
345 peek(L3TriggerQueue_in, TriggerMsg) {
346 TBE tbe := TBEs.lookup(in_msg.addr);
347 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
348 if (in_msg.Type == TriggerType:L3Hit) {
349 trigger(Event:L3Hit, in_msg.addr, entry, tbe);
351 error("Unknown trigger msg");
358 in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=5) {
359 if (unblockNetwork_in.isReady(clockEdge())) {
360 peek(unblockNetwork_in, UnblockMsg) {
361 TBE tbe := TBEs.lookup(in_msg.addr);
362 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
363 trigger(Event:CoreUnblock, in_msg.addr, entry, tbe);
368 // Core response network
369 in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=4) {
370 if (responseNetwork_in.isReady(clockEdge())) {
371 peek(responseNetwork_in, ResponseMsg) {
372 DPRINTF(RubySlicc, "core responses %s\n", in_msg);
373 TBE tbe := TBEs.lookup(in_msg.addr);
374 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
375 if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
376 if (is_valid(tbe) && tbe.NumPendingAcks == 1
377 && tbe.TriggeredAcksComplete == false) {
378 trigger(Event:LastCPUPrbResp, in_msg.addr, entry, tbe);
380 trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe);
382 } else if (in_msg.Type == CoherenceResponseType:CPUData) {
383 trigger(Event:CPUData, in_msg.addr, entry, tbe);
384 } else if (in_msg.Type == CoherenceResponseType:StaleNotif) {
385 trigger(Event:StaleWB, in_msg.addr, entry, tbe);
387 error("Unexpected response type");
393 // off-chip memory request/response is done
394 in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=3) {
395 if (memQueue_in.isReady(clockEdge())) {
396 peek(memQueue_in, MemoryMsg) {
397 TBE tbe := TBEs.lookup(in_msg.addr);
398 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
399 if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
400 trigger(Event:MemData, in_msg.addr, entry, tbe);
401 DPRINTF(RubySlicc, "%s\n", in_msg);
402 } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
403 trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them.
405 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
406 error("Invalid message");
412 in_port(regBuf_in, CPURequestMsg, reqFromRegBuf, rank=2) {
413 if (regBuf_in.isReady(clockEdge())) {
414 peek(regBuf_in, CPURequestMsg) {
415 TBE tbe := TBEs.lookup(in_msg.addr);
416 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
417 if (in_msg.Type == CoherenceRequestType:ForceInv) {
418 trigger(Event:Inv, in_msg.addr, entry, tbe);
419 } else if (in_msg.Type == CoherenceRequestType:ForceDowngrade) {
420 trigger(Event:Downgrade, in_msg.addr, entry, tbe);
422 error("Bad request from region buffer");
428 in_port(regDir_in, CPURequestMsg, reqFromRegDir, rank=1) {
429 if (regDir_in.isReady(clockEdge())) {
430 peek(regDir_in, CPURequestMsg) {
431 TBE tbe := TBEs.lookup(in_msg.addr);
432 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
433 if (in_msg.Type == CoherenceRequestType:RdBlk) {
434 trigger(Event:RdBlk, in_msg.addr, entry, tbe);
435 } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
436 trigger(Event:RdBlkS, in_msg.addr, entry, tbe);
437 } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
438 trigger(Event:RdBlkM, in_msg.addr, entry, tbe);
439 } else if (in_msg.Type == CoherenceRequestType:Atomic) {
440 trigger(Event:Atomic, in_msg.addr, entry, tbe);
441 } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
442 trigger(Event:WriteThrough, in_msg.addr, entry, tbe);
443 } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
444 if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
445 DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr);
446 trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
448 trigger(Event:VicDirty, in_msg.addr, entry, tbe);
450 } else if (in_msg.Type == CoherenceRequestType:VicClean) {
451 if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
452 DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr);
453 trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe);
455 trigger(Event:VicClean, in_msg.addr, entry, tbe);
458 error("Bad message type fwded from Region Dir");
464 in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) {
465 if (requestNetwork_in.isReady(clockEdge())) {
466 peek(requestNetwork_in, CPURequestMsg) {
467 TBE tbe := TBEs.lookup(in_msg.addr);
468 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr));
469 if (in_msg.Private) {
470 // Bypass the region dir
471 if (in_msg.Type == CoherenceRequestType:RdBlk) {
472 trigger(Event:RdBlkP, in_msg.addr, entry, tbe);
473 } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
474 trigger(Event:RdBlkSP, in_msg.addr, entry, tbe);
475 } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
476 trigger(Event:RdBlkMP, in_msg.addr, entry, tbe);
477 } else if (in_msg.Type == CoherenceRequestType:Atomic) {
478 trigger(Event:AtomicP, in_msg.addr, entry, tbe);
479 } else if (in_msg.Type == CoherenceRequestType:WriteThrough) {
480 trigger(Event:WriteThroughP, in_msg.addr, entry, tbe);
481 } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
482 if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
483 DPRINTF(RubySlicc, "Dropping VicDirtyP for address %s\n", in_msg.addr);
484 trigger(Event:StaleVicDirtyP, in_msg.addr, entry, tbe);
486 DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr);
487 trigger(Event:VicDirtyP, in_msg.addr, entry, tbe);
489 } else if (in_msg.Type == CoherenceRequestType:VicClean) {
490 if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) {
491 DPRINTF(RubySlicc, "Dropping VicCleanP for address %s\n", in_msg.addr);
492 trigger(Event:StaleVicDirtyP, in_msg.addr, entry, tbe);
494 DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr);
495 trigger(Event:VicCleanP, in_msg.addr, entry, tbe);
498 error("Bad message type for private access");
501 trigger(Event:CPUReq, in_msg.addr, entry, tbe);
508 action(s_sendResponseS, "s", desc="send Shared response") {
509 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
510 out_msg.addr := address;
511 out_msg.Type := CoherenceResponseType:NBSysResp;
513 out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
515 out_msg.Sender := machineID;
517 out_msg.Destination.add(tbe.OriginalRequestor);
518 out_msg.DataBlk := tbe.DataBlk;
519 out_msg.MessageSize := MessageSizeType:Response_Data;
520 out_msg.Dirty := false;
521 out_msg.State := CoherenceState:Shared;
522 out_msg.InitialRequestTime := tbe.InitialRequestTime;
523 out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
524 out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
525 out_msg.OriginalResponder := tbe.LastSender;
526 out_msg.DemandRequest := tbe.DemandRequest;
527 out_msg.L3Hit := tbe.L3Hit;
528 DPRINTF(RubySlicc, "%s\n", out_msg);
532 action(es_sendResponseES, "es", desc="send Exclusive or Shared response") {
533 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
534 out_msg.addr := address;
535 out_msg.Type := CoherenceResponseType:NBSysResp;
537 out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
539 out_msg.Sender := machineID;
541 out_msg.Destination.add(tbe.OriginalRequestor);
542 out_msg.DataBlk := tbe.DataBlk;
543 out_msg.MessageSize := MessageSizeType:Response_Data;
544 out_msg.Dirty := tbe.Dirty;
546 out_msg.State := CoherenceState:Shared;
548 out_msg.State := CoherenceState:Exclusive;
550 out_msg.InitialRequestTime := tbe.InitialRequestTime;
551 out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
552 out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
553 out_msg.OriginalResponder := tbe.LastSender;
554 out_msg.DemandRequest := tbe.DemandRequest;
555 out_msg.L3Hit := tbe.L3Hit;
556 DPRINTF(RubySlicc, "%s\n", out_msg);
560 action(m_sendResponseM, "m", desc="send Modified response") {
562 enqueue(triggerQueue_out, TriggerMsg, 1) {
563 out_msg.addr := address;
564 out_msg.Type := TriggerType:UnblockWriteThrough;
567 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
568 out_msg.addr := address;
569 out_msg.Type := CoherenceResponseType:NBSysResp;
571 out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
573 out_msg.Sender := machineID;
575 out_msg.Destination.add(tbe.OriginalRequestor);
576 out_msg.DataBlk := tbe.DataBlk;
577 out_msg.MessageSize := MessageSizeType:Response_Data;
578 out_msg.Dirty := tbe.Dirty;
579 out_msg.State := CoherenceState:Modified;
580 out_msg.CtoD := false;
581 out_msg.InitialRequestTime := tbe.InitialRequestTime;
582 out_msg.ForwardRequestTime := tbe.ForwardRequestTime;
583 out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
584 out_msg.OriginalResponder := tbe.LastSender;
585 out_msg.DemandRequest := tbe.DemandRequest;
586 out_msg.L3Hit := tbe.L3Hit;
587 if (tbe.atomicData) {
588 out_msg.WTRequestor := tbe.WTRequestor;
590 DPRINTF(RubySlicc, "%s\n", out_msg);
592 if (tbe.atomicData) {
593 enqueue(triggerQueue_out, TriggerMsg, 1) {
594 out_msg.addr := address;
595 out_msg.Type := TriggerType:UnblockWriteThrough;
601 action(sb_sendResponseSBypass, "sb", desc="send Shared response") {
602 peek(requestNetwork_in, CPURequestMsg) {
603 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
604 out_msg.addr := address;
605 out_msg.Type := CoherenceResponseType:NBSysResp;
607 out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
609 out_msg.Sender := machineID;
611 out_msg.Destination.add(in_msg.Requestor);
612 out_msg.DataBlk := tbe.DataBlk;
613 out_msg.MessageSize := MessageSizeType:Response_Data;
614 out_msg.Dirty := false;
615 out_msg.State := CoherenceState:Shared;
616 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
617 out_msg.ForwardRequestTime := curCycle();
618 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
619 out_msg.OriginalResponder := tbe.LastSender;
620 out_msg.DemandRequest := false;
621 out_msg.L3Hit := tbe.L3Hit;
622 DPRINTF(RubySlicc, "%s\n", out_msg);
627 action(esb_sendResponseESBypass, "esb", desc="send Exclusive or Shared response") {
628 peek(requestNetwork_in, CPURequestMsg) {
629 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
630 out_msg.addr := address;
631 out_msg.Type := CoherenceResponseType:NBSysResp;
633 out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
635 out_msg.Sender := machineID;
637 out_msg.Destination.add(in_msg.Requestor);
638 out_msg.DataBlk := tbe.DataBlk;
639 out_msg.MessageSize := MessageSizeType:Response_Data;
640 out_msg.Dirty := tbe.Dirty;
641 if (tbe.Cached || in_msg.ForceShared) {
642 out_msg.State := CoherenceState:Shared;
644 out_msg.State := CoherenceState:Exclusive;
646 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
647 out_msg.ForwardRequestTime := curCycle();
648 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
649 out_msg.OriginalResponder := tbe.LastSender;
650 out_msg.DemandRequest := false;
651 out_msg.L3Hit := tbe.L3Hit;
652 DPRINTF(RubySlicc, "%s\n", out_msg);
657 action(mbwt_sendResponseWriteThroughBypass, "mbwt", desc="send write through response") {
658 peek(requestNetwork_in, CPURequestMsg) {
659 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
660 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
661 out_msg.addr := address;
662 out_msg.Type := CoherenceResponseType:NBSysWBAck;
663 out_msg.Destination.add(in_msg.Requestor);
664 out_msg.WTRequestor := in_msg.WTRequestor;
665 out_msg.Sender := machineID;
666 out_msg.MessageSize := MessageSizeType:Writeback_Control;
667 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
668 out_msg.ForwardRequestTime := curCycle();
669 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
670 out_msg.DemandRequest := false;
673 assert(in_msg.Type == CoherenceRequestType:Atomic);
674 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
675 out_msg.addr := address;
676 out_msg.Type := CoherenceResponseType:NBSysResp;
678 out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
680 out_msg.Sender := machineID;
682 out_msg.Destination.add(in_msg.Requestor);
683 out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
684 out_msg.MessageSize := MessageSizeType:Response_Data;
685 out_msg.Dirty := in_msg.Dirty;
686 out_msg.State := CoherenceState:Modified;
687 out_msg.CtoD := false;
688 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
689 out_msg.ForwardRequestTime := curCycle();
690 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
691 out_msg.OriginalResponder := tbe.LastSender;
692 out_msg.DemandRequest := false;
693 out_msg.L3Hit := tbe.L3Hit;
694 out_msg.WTRequestor := in_msg.WTRequestor;
695 DPRINTF(RubySlicc, "%s\n", out_msg);
698 enqueue(triggerQueue_out, TriggerMsg, 1) {
699 out_msg.addr := address;
700 out_msg.Type := TriggerType:UnblockWriteThrough;
705 action(mb_sendResponseMBypass, "mb", desc="send Modified response") {
706 peek(requestNetwork_in, CPURequestMsg) {
707 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
708 out_msg.addr := address;
709 out_msg.Type := CoherenceResponseType:NBSysResp;
711 out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0));
713 out_msg.Sender := machineID;
715 out_msg.Destination.add(in_msg.Requestor);
716 out_msg.DataBlk := tbe.DataBlk;
717 out_msg.MessageSize := MessageSizeType:Response_Data;
718 out_msg.Dirty := tbe.Dirty;
719 out_msg.State := CoherenceState:Modified;
720 out_msg.CtoD := false;
721 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
722 out_msg.ForwardRequestTime := curCycle();
723 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
724 out_msg.OriginalResponder := tbe.LastSender;
725 out_msg.DemandRequest := false;
726 out_msg.L3Hit := tbe.L3Hit;
727 DPRINTF(RubySlicc, "%s\n", out_msg);
732 action(c_sendResponseCtoD, "c", desc="send CtoD Ack") {
733 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
734 out_msg.addr := address;
735 out_msg.Type := CoherenceResponseType:NBSysResp;
736 out_msg.Sender := machineID;
737 out_msg.Destination.add(tbe.OriginalRequestor);
738 out_msg.MessageSize := MessageSizeType:Response_Control;
739 out_msg.Dirty := false;
740 out_msg.State := CoherenceState:Modified;
741 out_msg.CtoD := true;
742 out_msg.InitialRequestTime := tbe.InitialRequestTime;
743 out_msg.ForwardRequestTime := curCycle();
744 out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime;
745 out_msg.DemandRequest := tbe.DemandRequest;
746 DPRINTF(RubySlicc, "%s\n", out_msg);
750 action(cp_sendResponseCtoDP, "cp", desc="send CtoD Ack") {
751 peek(requestNetwork_in, CPURequestMsg) {
752 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
753 out_msg.addr := address;
754 out_msg.Type := CoherenceResponseType:NBSysResp;
755 out_msg.Sender := machineID;
756 out_msg.Destination.add(in_msg.Requestor);
757 out_msg.MessageSize := MessageSizeType:Response_Control;
758 out_msg.Dirty := false;
759 out_msg.State := CoherenceState:Modified;
760 out_msg.CtoD := true;
761 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
762 out_msg.ForwardRequestTime := curCycle();
763 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
764 out_msg.DemandRequest := false;
765 DPRINTF(RubySlicc, "%s\n", out_msg);
770 action(w_sendResponseWBAck, "w", desc="send WB Ack") {
771 peek(regDir_in, CPURequestMsg) {
772 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
773 out_msg.addr := address;
774 out_msg.Type := CoherenceResponseType:NBSysWBAck;
775 out_msg.Destination.add(in_msg.Requestor);
776 out_msg.WTRequestor := in_msg.WTRequestor;
777 out_msg.Sender := machineID;
778 out_msg.MessageSize := MessageSizeType:Writeback_Control;
779 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
780 out_msg.ForwardRequestTime := in_msg.ForwardRequestTime;
781 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
782 out_msg.DemandRequest := false;
787 action(wp_sendResponseWBAckP, "wp", desc="send WB Ack") {
788 peek(requestNetwork_in, CPURequestMsg) {
789 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
790 out_msg.addr := address;
791 out_msg.Type := CoherenceResponseType:NBSysWBAck;
792 out_msg.Destination.add(in_msg.Requestor);
793 out_msg.WTRequestor := in_msg.WTRequestor;
794 out_msg.Sender := machineID;
795 out_msg.MessageSize := MessageSizeType:Writeback_Control;
796 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
797 out_msg.ForwardRequestTime := curCycle();
798 out_msg.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
799 out_msg.DemandRequest := false;
804 action(wc_sendResponseWBAck, "wc", desc="send WB Ack for cancel") {
805 peek(responseNetwork_in, ResponseMsg) {
806 enqueue(responseNetwork_out, ResponseMsg, response_latency) {
807 out_msg.addr := address;
808 out_msg.Type := CoherenceResponseType:NBSysWBAck;
809 out_msg.Destination.add(in_msg.Sender);
810 out_msg.Sender := machineID;
811 out_msg.MessageSize := MessageSizeType:Writeback_Control;
816 action(ra_ackRegionDir, "ra", desc="Ack region dir") {
817 peek(regDir_in, CPURequestMsg) {
818 if (in_msg.NoAckNeeded == false) {
819 enqueue(responseNetwork_out, ResponseMsg, response_latency_regionDir) {
820 out_msg.addr := address;
821 out_msg.Type := CoherenceResponseType:DirReadyAck;
822 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
823 out_msg.Sender := machineID;
824 out_msg.MessageSize := MessageSizeType:Writeback_Control;
830 action(l_queueMemRdReq, "lr", desc="Read data from memory") {
831 peek(regDir_in, CPURequestMsg) {
832 if (L3CacheMemory.isTagPresent(address)) {
833 enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
834 out_msg.addr := address;
835 out_msg.Type := TriggerType:L3Hit;
836 DPRINTF(RubySlicc, "%s\n", out_msg);
838 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
839 tbe.DataBlk := entry.DataBlk;
840 tbe.LastSender := entry.LastSender;
843 DPRINTF(RubySlicc, "L3 data is %s\n", entry.DataBlk);
844 L3CacheMemory.deallocate(address);
846 queueMemoryRead(machineID, address, to_memory_controller_latency);
851 action(lrp_queueMemRdReqP, "lrp", desc="Read data from memory") {
852 peek(requestNetwork_in, CPURequestMsg) {
853 if (L3CacheMemory.isTagPresent(address)) {
854 enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) {
855 out_msg.addr := address;
856 out_msg.Type := TriggerType:L3Hit;
857 DPRINTF(RubySlicc, "%s\n", out_msg);
859 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
860 tbe.DataBlk := entry.DataBlk;
861 tbe.LastSender := entry.LastSender;
864 DPRINTF(RubySlicc, "L3 data is %s\n", entry.DataBlk);
865 L3CacheMemory.deallocate(address);
867 queueMemoryRead(machineID, address, to_memory_controller_latency);
872 action(dcr_probeInvCoreData, "dcr", desc="probe inv cores, return data") {
873 peek(regBuf_in, CPURequestMsg) {
874 enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
875 out_msg.addr := address;
876 out_msg.Type := ProbeRequestType:PrbInv;
877 out_msg.ReturnData := true;
878 out_msg.MessageSize := MessageSizeType:Control;
879 out_msg.Destination := in_msg.Sharers;
880 tbe.NumPendingAcks := tbe.NumPendingAcks + in_msg.Sharers.count();
881 DPRINTF(RubySlicc, "%s\n", out_msg);
882 APPEND_TRANSITION_COMMENT(" dcr: Acks remaining: ");
883 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
884 tbe.ProbeRequestStartTime := curCycle();
889 action(ddr_probeDownCoreData, "ddr", desc="probe inv cores, return data") {
890 peek(regBuf_in, CPURequestMsg) {
891 enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
892 out_msg.addr := address;
893 out_msg.Type := ProbeRequestType:PrbDowngrade;
894 out_msg.ReturnData := true;
895 out_msg.MessageSize := MessageSizeType:Control;
896 out_msg.Destination := in_msg.Sharers;
897 tbe.NumPendingAcks := tbe.NumPendingAcks + in_msg.Sharers.count();
898 DPRINTF(RubySlicc, "%s\n", out_msg);
899 APPEND_TRANSITION_COMMENT(" dcr: Acks remaining: ");
900 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
901 tbe.ProbeRequestStartTime := curCycle();
906 action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
907 peek(requestNetwork_in, CPURequestMsg) { // not the right network?
908 enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
909 out_msg.addr := address;
910 out_msg.Type := ProbeRequestType:PrbDowngrade;
911 out_msg.ReturnData := true;
912 out_msg.MessageSize := MessageSizeType:Control;
913 out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
914 tbe.NumPendingAcks := tbe.NumPendingAcks +machineCount(MachineType:CorePair) - 1;
915 out_msg.Destination.broadcast(MachineType:TCP);
916 tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:TCP);
917 out_msg.Destination.broadcast(MachineType:SQC);
918 tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:SQC);
919 out_msg.Destination.remove(in_msg.Requestor);
920 DPRINTF(RubySlicc, "%s\n", (out_msg));
921 APPEND_TRANSITION_COMMENT(" sc: Acks remaining: ");
922 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
923 tbe.ProbeRequestStartTime := curCycle();
928 action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") {
929 peek(requestNetwork_in, CPURequestMsg) { // not the right network?
930 enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) {
931 out_msg.addr := address;
932 out_msg.Type := ProbeRequestType:PrbInv;
933 out_msg.ReturnData := false;
934 out_msg.MessageSize := MessageSizeType:Control;
935 out_msg.Destination.broadcast(MachineType:CorePair); // won't be realistic for multisocket
936 tbe.NumPendingAcks := tbe.NumPendingAcks +machineCount(MachineType:CorePair) - 1;
937 out_msg.Destination.broadcast(MachineType:TCP);
938 tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:TCP);
939 out_msg.Destination.broadcast(MachineType:SQC);
940 tbe.NumPendingAcks := tbe.NumPendingAcks + machineCount(MachineType:SQC);
941 out_msg.Destination.remove(in_msg.Requestor);
942 APPEND_TRANSITION_COMMENT(" ic: Acks remaining: ");
943 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
944 DPRINTF(RubySlicc, "%s\n", out_msg);
945 tbe.ProbeRequestStartTime := curCycle();
950 action(d_writeDataToMemory, "d", desc="Write data to memory") {
951 peek(responseNetwork_in, ResponseMsg) {
952 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
953 DPRINTF(RubySlicc, "Writing Data: %s to address %s\n", in_msg.DataBlk,
958 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
959 check_allocate(TBEs);
960 peek(regDir_in, CPURequestMsg) {
961 TBEs.allocate(address);
962 set_tbe(TBEs.lookup(address));
963 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
964 tbe.writeMask.clear();
965 tbe.writeMask.orMask(in_msg.writeMask);
967 tbe.WTRequestor := in_msg.WTRequestor;
968 tbe.LastSender := in_msg.Requestor;
970 if (in_msg.Type == CoherenceRequestType:Atomic) {
971 tbe.writeMask.clear();
972 tbe.writeMask.orMask(in_msg.writeMask);
973 tbe.atomicData := true;
974 tbe.WTRequestor := in_msg.WTRequestor;
975 tbe.LastSender := in_msg.Requestor;
977 tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
979 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
980 tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
983 tbe.OriginalRequestor := in_msg.Requestor;
984 tbe.NumPendingAcks := 0;
985 tbe.Cached := in_msg.ForceShared;
986 tbe.InitialRequestTime := in_msg.InitialRequestTime;
987 tbe.ForwardRequestTime := curCycle();
988 tbe.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
989 tbe.DemandRequest := in_msg.DemandRequest;
993 action(tp_allocateTBEP, "tp", desc="allocate TBE Entry") {
994 check_allocate(TBEs);
995 peek(requestNetwork_in, CPURequestMsg) {
996 TBEs.allocate(address);
997 set_tbe(TBEs.lookup(address));
998 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
999 tbe.writeMask.clear();
1000 tbe.writeMask.orMask(in_msg.writeMask);
1002 tbe.WTRequestor := in_msg.WTRequestor;
1003 tbe.LastSender := in_msg.Requestor;
1005 if (in_msg.Type == CoherenceRequestType:Atomic) {
1006 tbe.writeMask.clear();
1007 tbe.writeMask.orMask(in_msg.writeMask);
1008 tbe.atomicData := true;
1009 tbe.WTRequestor := in_msg.WTRequestor;
1010 tbe.LastSender := in_msg.Requestor;
1012 tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs
1014 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
1015 tbe.DataBlk.copyPartial(in_msg.DataBlk,tbe.writeMask);
1018 tbe.OriginalRequestor := in_msg.Requestor;
1019 tbe.NumPendingAcks := 0;
1020 tbe.Cached := in_msg.ForceShared;
1021 tbe.InitialRequestTime := in_msg.InitialRequestTime;
1022 tbe.ForwardRequestTime := curCycle();
1023 tbe.ProbeRequestStartTime := in_msg.ProbeRequestStartTime;
1024 tbe.DemandRequest := false;
1028 action(sa_setAcks, "sa", desc="setAcks") {
1029 peek(regDir_in, CPURequestMsg) {
1030 tbe.NumPendingAcks := in_msg.Acks;
1031 APPEND_TRANSITION_COMMENT(" waiting for acks ");
1032 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
1036 action(tr_allocateTBE, "tr", desc="allocate TBE Entry for Region inv") {
1037 check_allocate(TBEs);
1038 TBEs.allocate(address);
1039 set_tbe(TBEs.lookup(address));
1040 tbe.NumPendingAcks := 0;
1043 action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
1044 TBEs.deallocate(address);
1048 action(wdp_writeBackDataPrivate, "wdp", desc="Write back data if needed") {
1049 peek(requestNetwork_in, CPURequestMsg) {
1050 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
1051 tbe.DataBlkAux := getDirectoryEntry(address).DataBlk;
1052 tbe.DataBlkAux.copyPartial(in_msg.DataBlk,in_msg.writeMask);
1053 getDirectoryEntry(address).DataBlk := tbe.DataBlkAux;
1055 assert(in_msg.Type == CoherenceRequestType:Atomic);
1056 tbe.DataBlkAux.atomicPartial(getDirectoryEntry(address).DataBlk,in_msg.writeMask);
1057 getDirectoryEntry(address).DataBlk := tbe.DataBlkAux;
1062 action(wd_writeBackData, "wd", desc="Write back data if needed") {
1064 DataBlock tmp := getDirectoryEntry(address).DataBlk;
1065 tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
1067 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1068 } else if (tbe.atomicData) {
1069 tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,tbe.writeMask);
1070 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1071 } else if (tbe.Dirty == true) {
1072 APPEND_TRANSITION_COMMENT(" Wrote data back ");
1073 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1077 action(wdi_writeBackDataInv, "wdi", desc="Write back inv data if needed") {
1078 // Kind of opposite from above...?
1079 if (tbe.Dirty == true) {
1080 getDirectoryEntry(address).DataBlk := tbe.DataBlk;
1081 APPEND_TRANSITION_COMMENT("Writing dirty data to dir");
1082 DPRINTF(RubySlicc, "Data %s: %s\n", address, tbe.DataBlk);
1084 APPEND_TRANSITION_COMMENT("NOT!!! Writing dirty data to dir");
1088 action(wdt_writeBackDataInvNoTBE, "wdt", desc="Write back inv data if needed no TBE") {
1089 // Kind of opposite from above...?
1090 peek(responseNetwork_in, ResponseMsg) {
1091 if (in_msg.Dirty == true) {
1092 getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
1093 APPEND_TRANSITION_COMMENT("Writing dirty data to dir");
1094 DPRINTF(RubySlicc, "Data %s: %s\n", address, in_msg.DataBlk);
1096 APPEND_TRANSITION_COMMENT("NOT!!! Writing dirty data to dir");
1101 action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") {
1102 peek(memQueue_in, MemoryMsg) {
1103 if (tbe.Dirty == false) {
1104 tbe.DataBlk := getDirectoryEntry(address).DataBlk;
1106 tbe.MemData := true;
1110 action(ml_writeL3DataToTBE, "ml", desc="write L3 data to TBE") {
1111 assert(tbe.Dirty == false);
1112 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
1113 tbe.DataBlk := entry.DataBlk;
1114 tbe.LastSender := entry.LastSender;
1116 tbe.MemData := true;
1119 action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") {
1120 peek(responseNetwork_in, ResponseMsg) {
1122 DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
1123 DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
1125 DataBlock tmp := in_msg.DataBlk;
1126 tmp.copyPartial(tbe.DataBlk,tbe.writeMask);
1128 } else if (tbe.Dirty) {
1129 if(tbe.atomicData == false && tbe.wtData == false) {
1130 DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
1131 assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
1134 tbe.DataBlk := in_msg.DataBlk;
1135 tbe.Dirty := in_msg.Dirty;
1136 tbe.LastSender := in_msg.Sender;
1145 action(yc_writeCPUDataToTBE, "yc", desc="write CPU Data to TBE") {
1146 peek(responseNetwork_in, ResponseMsg) {
1148 DPRINTF(RubySlicc, "Got dirty data for %s from %s\n", address, in_msg.Sender);
1149 DPRINTF(RubySlicc, "Data is %s\n", in_msg.DataBlk);
1151 DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender);
1152 assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data
1154 tbe.DataBlk := in_msg.DataBlk;
1156 tbe.LastSender := in_msg.Sender;
1161 action(x_decrementAcks, "x", desc="decrement Acks pending") {
1162 if (tbe.NumPendingAcks > 0) {
1163 tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
1165 APPEND_TRANSITION_COMMENT(" Double ack! ");
1167 assert(tbe.NumPendingAcks >= 0);
1168 APPEND_TRANSITION_COMMENT(" Acks remaining: ");
1169 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
1172 action(o_checkForCompletion, "o", desc="check for ack completion") {
1173 if (tbe.NumPendingAcks == 0 && tbe.TriggeredAcksComplete == false) {
1174 enqueue(triggerQueue_out, TriggerMsg, 1) {
1175 out_msg.addr := address;
1176 out_msg.Type := TriggerType:AcksComplete;
1178 tbe.TriggeredAcksComplete := true;
1180 APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
1181 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
1184 action(ont_checkForCompletionNoTrigger, "ont", desc="check for ack completion, no trigger") {
1185 if (tbe.NumPendingAcks == 0 && tbe.TriggeredAcksComplete == false) {
1186 tbe.TriggeredAcksComplete := true;
1188 APPEND_TRANSITION_COMMENT(" Check: Acks remaining: ");
1189 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
1192 action(rvp_removeVicDirtyIgnore, "rvp", desc="Remove ignored core") {
1193 peek(requestNetwork_in, CPURequestMsg) {
1194 getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
1198 action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") {
1199 peek(regDir_in, CPURequestMsg) {
1200 getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor);
1204 action(r_sendRequestToRegionDir, "r", desc="send request to Region Directory") {
1205 peek(requestNetwork_in, CPURequestMsg) {
1206 enqueue(requestNetworkReg_out, CPURequestMsg, 1) {
1207 out_msg.addr := address;
1208 out_msg.Type := in_msg.Type;
1209 out_msg.Requestor := in_msg.Requestor;
1210 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
1211 out_msg.Shared := in_msg.Shared;
1212 out_msg.MessageSize := in_msg.MessageSize;
1213 DPRINTF(RubySlicc, "out dest: %s\n", mapAddressToMachine(address, MachineType:RegionDir));
1218 action(ai_ackInvalidate, "ai", desc="Ack to let the reg-dir know that the inv is ordered") {
1219 peek(regBuf_in, CPURequestMsg) {
1220 enqueue(regAckNetwork_out, UnblockMsg, 1) {
1221 out_msg.addr := address;
1222 out_msg.Destination.add(in_msg.Requestor);
1223 out_msg.MessageSize := MessageSizeType:Response_Control;
1224 DPRINTF(RubySlicc, "ai out_msg: %s\n", out_msg);
1229 action(aic_ackInvalidate, "aic", desc="Ack to let the reg-dir know that the inv is ordered") {
1230 peek(responseNetwork_in, ResponseMsg) {
1231 if (in_msg.NoAckNeeded == false) {
1232 enqueue(regAckNetwork_out, UnblockMsg, 1) {
1233 out_msg.addr := address;
1234 if (machineIDToMachineType(in_msg.Sender) == MachineType:CorePair) {
1235 out_msg.Destination.add(createMachineID(MachineType:RegionBuffer, intToID(0)));
1237 out_msg.Destination.add(createMachineID(MachineType:RegionBuffer, intToID(1)));
1239 out_msg.MessageSize := MessageSizeType:Response_Control;
1240 DPRINTF(RubySlicc, "ai out_msg: %s\n", out_msg);
1241 out_msg.wasValid := in_msg.isValid;
1247 action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") {
1248 peek(responseNetwork_in, ResponseMsg) {
1249 if (L3CacheMemory.isTagPresent(address)) {
1250 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
1251 APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
1252 entry.DataBlk := in_msg.DataBlk;
1253 entry.LastSender := in_msg.Sender;
1255 if (L3CacheMemory.cacheAvail(address) == false) {
1256 Addr victim := L3CacheMemory.cacheProbe(address);
1257 CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
1258 L3CacheMemory.lookup(victim));
1259 queueMemoryWrite(machineID, victim, to_memory_controller_latency,
1260 victim_entry.DataBlk);
1261 L3CacheMemory.deallocate(victim);
1263 assert(L3CacheMemory.cacheAvail(address));
1264 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
1265 APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
1266 entry.DataBlk := in_msg.DataBlk;
1267 entry.LastSender := in_msg.Sender;
1272 action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") {
1273 if ((tbe.wtData || tbe.atomicData) && useL3OnWT) {
1274 if (L3CacheMemory.isTagPresent(address)) {
1275 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
1276 APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
1277 entry.DataBlk := tbe.DataBlk;
1278 entry.LastSender := tbe.LastSender;
1280 if (L3CacheMemory.cacheAvail(address) == false) {
1281 Addr victim := L3CacheMemory.cacheProbe(address);
1282 CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
1283 L3CacheMemory.lookup(victim));
1284 queueMemoryWrite(machineID, victim, to_memory_controller_latency,
1285 victim_entry.DataBlk);
1286 L3CacheMemory.deallocate(victim);
1288 assert(L3CacheMemory.cacheAvail(address));
1289 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
1290 APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
1291 entry.DataBlk := tbe.DataBlk;
1292 entry.LastSender := tbe.LastSender;
1297 action(ali_allocateL3Block, "ali", desc="allocate the L3 block on ForceInv") {
1298 if (tbe.Dirty == true) {
1299 if (L3CacheMemory.isTagPresent(address)) {
1300 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
1301 APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) ");
1302 entry.DataBlk := tbe.DataBlk;
1303 entry.LastSender := tbe.LastSender;
1305 if (L3CacheMemory.cacheAvail(address) == false) {
1306 Addr victim := L3CacheMemory.cacheProbe(address);
1307 CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
1308 L3CacheMemory.lookup(victim));
1309 queueMemoryWrite(machineID, victim, to_memory_controller_latency,
1310 victim_entry.DataBlk);
1311 L3CacheMemory.deallocate(victim);
1313 assert(L3CacheMemory.cacheAvail(address));
1314 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
1315 APPEND_TRANSITION_COMMENT(" al wrote data to L3 ");
1316 entry.DataBlk := tbe.DataBlk;
1317 entry.LastSender := tbe.LastSender;
1322 action(ali_allocateL3BlockNoTBE, "alt", desc="allocate the L3 block on ForceInv no TBE") {
1323 peek(responseNetwork_in, ResponseMsg) {
1325 if (L3CacheMemory.isTagPresent(address)) {
1326 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address));
1327 APPEND_TRANSITION_COMMENT(" ali wrote data to L3 (hit) ");
1328 entry.DataBlk := in_msg.DataBlk;
1329 entry.LastSender := in_msg.Sender;
1331 if (L3CacheMemory.cacheAvail(address) == false) {
1332 Addr victim := L3CacheMemory.cacheProbe(address);
1333 CacheEntry victim_entry := static_cast(CacheEntry, "pointer",
1334 L3CacheMemory.lookup(victim));
1335 queueMemoryWrite(machineID, victim, to_memory_controller_latency,
1336 victim_entry.DataBlk);
1337 L3CacheMemory.deallocate(victim);
1339 assert(L3CacheMemory.cacheAvail(address));
1340 CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry));
1341 APPEND_TRANSITION_COMMENT(" ali wrote data to L3 ");
1342 entry.DataBlk := in_msg.DataBlk;
1343 entry.LastSender := in_msg.Sender;
1349 action(dl_deallocateL3, "dl", desc="deallocate the L3 block") {
1350 L3CacheMemory.deallocate(address);
1353 action(p_popRequestQueue, "p", desc="pop request queue") {
1354 requestNetwork_in.dequeue(clockEdge());
1357 action(prd_popRegionQueue, "prd", desc="pop request queue") {
1358 regDir_in.dequeue(clockEdge());
1361 action(prb_popRegionBufQueue, "prb", desc="pop request queue") {
1362 regBuf_in.dequeue(clockEdge());
1365 action(pr_popResponseQueue, "pr", desc="pop response queue") {
1366 responseNetwork_in.dequeue(clockEdge());
1369 action(pm_popMemQueue, "pm", desc="pop mem queue") {
1370 memQueue_in.dequeue(clockEdge());
1373 action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
1374 triggerQueue_in.dequeue(clockEdge());
1377 action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") {
1378 L3TriggerQueue_in.dequeue(clockEdge());
1381 action(pu_popUnblockQueue, "pu", desc="pop unblock queue") {
1382 unblockNetwork_in.dequeue(clockEdge());
1385 action(yy_recycleResponseQueue, "yy", desc="recycle response queue") {
1386 responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1389 action(ww_stallAndWaitRegRequestQueue, "ww", desc="recycle region dir request queue") {
1390 stall_and_wait(regDir_in, address);
1393 action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") {
1394 stall_and_wait(requestNetwork_in, address);
1397 action(wa_wakeUpDependents, "wa", desc="Wake up any requests waiting for this address") {
1398 wakeUpBuffers(address);
1401 action(wa_wakeUpAllDependents, "waa", desc="Wake up any requests waiting for this region") {
1405 action(z_stall, "z", desc="...") {
1410 // transitions from U
1412 transition({BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {Inv, Downgrade}) {
1413 ww_stallAndWaitRegRequestQueue;
1416 transition(U, Inv, BI){L3TagArrayRead} {
1418 dcr_probeInvCoreData; // only need to invalidate sharers
1420 prb_popRegionBufQueue;
1423 transition(U, Downgrade, BI){L3TagArrayRead} {
1425 ddr_probeDownCoreData; // only need to invalidate sharers
1427 prb_popRegionBufQueue;
1430 // The next 2 transistions are needed in the event that an invalidation
1431 // is waiting for its ack from the core, but the event makes it through
1432 // the region directory before the acks. This wouldn't be needed if
1433 // we waited to ack the region dir until the directory got all the acks
1434 transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {RdBlkS, RdBlkM, RdBlk, WriteThrough, Atomic}) {
1435 ww_stallAndWaitRegRequestQueue;
1438 transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {RdBlkSP, RdBlkMP, RdBlkP}) {
1439 st_stallAndWaitRequest;
1442 transition({BR, BW, BI, BL, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {WriteThroughP,AtomicP}) {
1443 st_stallAndWaitRequest;
1446 transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead} {
1450 o_checkForCompletion;
1455 transition(U, WriteThrough, BM_PM){L3TagArrayRead} {
1457 w_sendResponseWBAck;
1460 o_checkForCompletion;
1465 transition(U, {RdBlkM,Atomic}, BM_PM){L3TagArrayRead} {
1469 o_checkForCompletion;
1474 transition(U, RdBlk, B_PM){L3TagArrayRead} {
1478 o_checkForCompletion;
1483 transition(U, {RdBlkSP}, BS_M) {L3TagArrayRead} {
1489 transition(U, WriteThroughP, BM_M) {L3TagArrayRead} {
1491 wp_sendResponseWBAckP;
1496 transition(U, {RdBlkMP,AtomicP}, BM_M) {L3TagArrayRead} {
1502 transition(U, RdBlkP, B_M) {L3TagArrayRead} {
1508 transition(U, VicDirtyP, BL) {L3TagArrayRead} {
1510 wp_sendResponseWBAckP;
1514 transition(U, VicCleanP, BL) {L3TagArrayRead} {
1516 wp_sendResponseWBAckP;
1520 transition(BM_Pm, RdBlkSP, BM_Pm_B) {L3DataArrayWrite} {
1521 sb_sendResponseSBypass;
1525 transition(BS_Pm, RdBlkSP, BS_Pm_B) {L3DataArrayWrite} {
1526 sb_sendResponseSBypass;
1530 transition(B_Pm, RdBlkSP, B_Pm_B) {L3DataArrayWrite} {
1531 sb_sendResponseSBypass;
1535 transition(BP, RdBlkSP, BP_B) {L3DataArrayWrite} {
1536 sb_sendResponseSBypass;
1540 transition(BM_Pm, RdBlkMP, BM_Pm_B) {L3DataArrayWrite} {
1541 mb_sendResponseMBypass;
1545 transition(BS_Pm, RdBlkMP, BS_Pm_B) {L3DataArrayWrite} {
1546 mb_sendResponseMBypass;
1550 transition(B_Pm, RdBlkMP, B_Pm_B) {L3DataArrayWrite} {
1551 mb_sendResponseMBypass;
1555 transition(BP, RdBlkMP, BP_B) {L3DataArrayWrite} {
1556 mb_sendResponseMBypass;
1560 transition(BM_Pm, {WriteThroughP,AtomicP}, BM_Pm_B) {L3DataArrayWrite} {
1561 wdp_writeBackDataPrivate;
1562 mbwt_sendResponseWriteThroughBypass;
1566 transition(BS_Pm, {WriteThroughP,AtomicP}, BS_Pm_B) {L3DataArrayWrite} {
1567 wdp_writeBackDataPrivate;
1568 mbwt_sendResponseWriteThroughBypass;
1572 transition(B_Pm, {WriteThroughP,AtomicP}, B_Pm_B) {L3DataArrayWrite} {
1573 wdp_writeBackDataPrivate;
1574 mbwt_sendResponseWriteThroughBypass;
1578 transition(BP, {WriteThroughP,AtomicP}, BP_B) {L3DataArrayWrite} {
1579 wdp_writeBackDataPrivate;
1580 mbwt_sendResponseWriteThroughBypass;
1584 transition(BM_Pm, RdBlkP, BM_Pm_B) {L3DataArrayWrite} {
1585 esb_sendResponseESBypass;
1589 transition(BS_Pm, RdBlkP, BS_Pm_B) {L3DataArrayWrite} {
1590 esb_sendResponseESBypass;
1594 transition(B_Pm, RdBlkP, B_Pm_B) {L3DataArrayWrite}{
1595 esb_sendResponseESBypass;
1599 transition(BP, RdBlkP, BP_B) {L3DataArrayWrite}{
1600 esb_sendResponseESBypass;
1604 transition(BM_Pm_B, CoreUnblock, BM_Pm) {
1605 wa_wakeUpDependents;
1609 transition(BS_Pm_B, CoreUnblock, BS_Pm) {
1610 wa_wakeUpDependents;
1614 transition(B_Pm_B, CoreUnblock, B_Pm) {
1615 wa_wakeUpDependents;
1619 transition(BP_B, CoreUnblock, BP) {
1620 wa_wakeUpDependents;
1624 transition(BM_Pm_B, UnblockWriteThrough, BM_Pm) {
1625 wa_wakeUpDependents;
1629 transition(BS_Pm_B, UnblockWriteThrough, BS_Pm) {
1630 wa_wakeUpDependents;
1634 transition(B_Pm_B, UnblockWriteThrough, B_Pm) {
1635 wa_wakeUpDependents;
1639 transition(BP_B, UnblockWriteThrough, BP) {
1640 wa_wakeUpDependents;
1644 transition(BM_Pm, VicDirtyP, BM_Pm_BL) {
1645 wp_sendResponseWBAckP;
1649 transition(BS_Pm, VicDirtyP, BS_Pm_BL) {
1650 wp_sendResponseWBAckP;
1654 transition(B_Pm, VicDirtyP, B_Pm_BL) {
1655 wp_sendResponseWBAckP;
1659 transition(BP, VicDirtyP, BP_BL) {
1660 wp_sendResponseWBAckP;
1664 transition(BM_Pm, VicCleanP, BM_Pm_BL) {
1665 wp_sendResponseWBAckP;
1669 transition(BS_Pm, VicCleanP, BS_Pm_BL) {
1670 wp_sendResponseWBAckP;
1674 transition(B_Pm, VicCleanP, B_Pm_BL) {
1675 wp_sendResponseWBAckP;
1679 transition(BP, VicCleanP, BP_BL) {
1680 wp_sendResponseWBAckP;
1684 transition(BM_Pm_BL, CPUData, BM_Pm) {
1685 yc_writeCPUDataToTBE;
1686 d_writeDataToMemory;
1687 wa_wakeUpDependents;
1688 pr_popResponseQueue;
1691 transition(BS_Pm_BL, CPUData, BS_Pm) {
1692 yc_writeCPUDataToTBE;
1693 d_writeDataToMemory;
1694 wa_wakeUpDependents;
1695 pr_popResponseQueue;
1698 transition(B_Pm_BL, CPUData, B_Pm) {
1699 yc_writeCPUDataToTBE;
1700 d_writeDataToMemory;
1701 wa_wakeUpDependents;
1702 pr_popResponseQueue;
1705 transition(BP_BL, CPUData, BP) {
1706 yc_writeCPUDataToTBE;
1707 d_writeDataToMemory;
1708 wa_wakeUpDependents;
1709 pr_popResponseQueue;
1712 transition({BR, BW, BL}, {VicDirtyP, VicCleanP}) {
1713 st_stallAndWaitRequest;
1716 transition({BR, BW, BL}, {VicDirty, VicClean}) {
1717 ww_stallAndWaitRegRequestQueue;
1720 transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} {
1722 d_writeDataToMemory;
1724 wa_wakeUpDependents;
1725 pr_popResponseQueue;
1728 transition(BL, StaleWB, U) {L3TagArrayWrite} {
1730 wa_wakeUpAllDependents;
1731 pr_popResponseQueue;
1734 transition({BI, B, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {VicDirty, VicClean}) {
1735 ww_stallAndWaitRegRequestQueue;
1738 transition({BI, B, BS_M, BM_M, B_M, BS_PM, BM_PM, B_PM, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {VicDirtyP, VicCleanP}) {
1739 st_stallAndWaitRequest;
1742 transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, WBAck) {
1746 transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, StaleVicDirtyP) {
1747 rvp_removeVicDirtyIgnore;
1748 wp_sendResponseWBAckP;
1752 transition({U, BR, BW, BL, BI, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B, BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, StaleVicDirty) {
1753 rv_removeVicDirtyIgnore;
1754 w_sendResponseWBAck;
1758 transition(U, VicDirty, BL) {L3TagArrayRead} {
1761 w_sendResponseWBAck;
1765 transition(U, VicClean, BL) {L3TagArrayRead} {
1768 w_sendResponseWBAck;
1772 transition({B, BR}, CoreUnblock, U) {
1773 wa_wakeUpDependents;
1777 transition({B, BR}, UnblockWriteThrough, U) {
1778 wa_wakeUpDependents;
1782 transition(BS_M, MemData, B) {L3TagArrayWrite, L3DataArrayWrite} {
1783 mt_writeMemDataToTBE;
1786 alwt_allocateL3BlockOnWT;
1791 transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
1792 mt_writeMemDataToTBE;
1795 alwt_allocateL3BlockOnWT;
1800 transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} {
1801 mt_writeMemDataToTBE;
1804 alwt_allocateL3BlockOnWT;
1809 transition(BS_PM, MemData, BS_Pm) {} {
1810 mt_writeMemDataToTBE;
1811 wa_wakeUpDependents;
1815 transition(BM_PM, MemData, BM_Pm){} {
1816 mt_writeMemDataToTBE;
1817 wa_wakeUpDependents;
1821 transition(B_PM, MemData, B_Pm){} {
1822 mt_writeMemDataToTBE;
1823 wa_wakeUpDependents;
1827 transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
1830 alwt_allocateL3BlockOnWT;
1832 ptl_popTriggerQueue;
1835 transition(BM_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
1838 alwt_allocateL3BlockOnWT;
1840 ptl_popTriggerQueue;
1843 transition(B_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} {
1846 alwt_allocateL3BlockOnWT;
1848 ptl_popTriggerQueue;
1851 transition(BS_PM, L3Hit, BS_Pm) {
1852 wa_wakeUpDependents;
1853 ptl_popTriggerQueue;
1856 transition(BM_PM, L3Hit, BM_Pm) {
1857 wa_wakeUpDependents;
1858 ptl_popTriggerQueue;
1861 transition(B_PM, L3Hit, B_Pm) {
1862 wa_wakeUpDependents;
1863 ptl_popTriggerQueue;
1866 transition({BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, BP, BI}, CPUPrbResp) {
1868 y_writeProbeDataToTBE;
1870 ont_checkForCompletionNoTrigger;
1871 pr_popResponseQueue;
1874 transition({B, B_M, BS_M, BM_M}, {CPUPrbResp, LastCPUPrbResp}) {
1878 transition({BS_Pm_BL, BM_Pm_BL, B_Pm_BL, BP_BL, BS_Pm_B, BM_Pm_B, B_Pm_B, BP_B}, {CPUPrbResp, LastCPUPrbResp}) {
1879 // recycling because PrbResponse and data come on the same network
1880 yy_recycleResponseQueue;
1883 transition(U, {CPUPrbResp, LastCPUPrbResp}) {L3TagArrayRead, L3DataArrayWrite} {
1885 wdt_writeBackDataInvNoTBE;
1886 ali_allocateL3BlockNoTBE;
1887 pr_popResponseQueue;
1890 transition(BL, {CPUPrbResp, LastCPUPrbResp}) {} {
1892 y_writeProbeDataToTBE;
1893 wdi_writeBackDataInv;
1894 ali_allocateL3Block;
1895 pr_popResponseQueue;
1898 transition(BS_PM, LastCPUPrbResp, BS_M) {
1900 y_writeProbeDataToTBE;
1902 ont_checkForCompletionNoTrigger;
1903 pr_popResponseQueue;
1906 transition(BS_PM, ProbeAcksComplete, BS_M) {} {
1910 transition(BM_PM, LastCPUPrbResp, BM_M) {
1912 y_writeProbeDataToTBE;
1914 ont_checkForCompletionNoTrigger;
1915 pr_popResponseQueue;
1918 transition(BM_PM, ProbeAcksComplete, BM_M) {} {
1922 transition(B_PM, LastCPUPrbResp, B_M) {
1924 y_writeProbeDataToTBE;
1926 ont_checkForCompletionNoTrigger;
1927 pr_popResponseQueue;
1930 transition(B_PM, ProbeAcksComplete, B_M){} {
1934 transition(BS_Pm, LastCPUPrbResp, B) {
1936 y_writeProbeDataToTBE;
1938 ont_checkForCompletionNoTrigger;
1941 alwt_allocateL3BlockOnWT;
1942 ali_allocateL3Block;
1944 pr_popResponseQueue;
1947 transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
1950 alwt_allocateL3BlockOnWT;
1951 ali_allocateL3Block;
1956 transition(BM_Pm, LastCPUPrbResp, B) {
1958 y_writeProbeDataToTBE;
1960 ont_checkForCompletionNoTrigger;
1963 alwt_allocateL3BlockOnWT;
1964 ali_allocateL3Block;
1966 pr_popResponseQueue;
1969 transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
1972 alwt_allocateL3BlockOnWT;
1973 ali_allocateL3Block;
1978 transition(B_Pm, LastCPUPrbResp, B) {
1980 y_writeProbeDataToTBE;
1982 ont_checkForCompletionNoTrigger;
1985 alwt_allocateL3BlockOnWT;
1986 ali_allocateL3Block;
1988 pr_popResponseQueue;
1991 transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} {
1994 alwt_allocateL3BlockOnWT;
1995 ali_allocateL3Block;
2000 transition(BP, LastCPUPrbResp, B) {
2002 y_writeProbeDataToTBE;
2004 ont_checkForCompletionNoTrigger;
2007 alwt_allocateL3BlockOnWT;
2009 pr_popResponseQueue;
2012 transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} {
2015 alwt_allocateL3BlockOnWT;
2020 transition(BI, LastCPUPrbResp, B) {
2022 y_writeProbeDataToTBE;
2024 ont_checkForCompletionNoTrigger;
2025 wa_wakeUpDependents;
2026 wdi_writeBackDataInv;
2027 ali_allocateL3Block;
2029 pr_popResponseQueue;
2032 transition(BI, ProbeAcksComplete, U) {L3TagArrayWrite, L3DataArrayWrite}{
2033 wa_wakeUpDependents;
2034 wdi_writeBackDataInv;
2035 ali_allocateL3Block;