2 * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
5 * For use for simulation and test purposes only
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Author: Sooraj Puthoor, Blake Hechtman
37 * This file is inherited from GPU_VIPER-TCC.sm and retains its structure.
38 * There are very few modifications in this file from the original VIPER TCC
41 machine(MachineType:TCC, "TCC Cache")
42 : CacheMemory * L2cache;
43 bool WB; /*is this cache Writeback?*/
45 Cycles l2_request_latency := 50;
46 Cycles l2_response_latency := 20;
48 // From the TCPs or SQCs
49 MessageBuffer * requestFromTCP, network="From", virtual_network="1", ordered="true", vnet_type="request";
50 // To the Cores. TCC deals only with TCPs/SQCs. CP cores do not communicate directly with TCC.
51 MessageBuffer * responseToCore, network="To", virtual_network="3", ordered="true", vnet_type="response";
53 MessageBuffer * probeFromNB, network="From", virtual_network="0", ordered="false", vnet_type="request";
54 MessageBuffer * responseFromNB, network="From", virtual_network="2", ordered="false", vnet_type="response";
56 MessageBuffer * requestToNB, network="To", virtual_network="0", ordered="false", vnet_type="request";
57 MessageBuffer * responseToNB, network="To", virtual_network="2", ordered="false", vnet_type="response";
58 MessageBuffer * unblockToNB, network="To", virtual_network="4", ordered="false", vnet_type="unblock";
60 MessageBuffer * triggerQueue, ordered="true", random="false";
63 enumeration(Event, desc="TCC Events") {
64 // Requests coming from the Cores
65 RdBlk, desc="RdBlk event";
66 WrVicBlk, desc="L1 Write Through";
67 WrVicBlkBack, desc="L1 Write Back(dirty cache)";
68 Atomic, desc="Atomic Op";
69 AtomicDone, desc="AtomicOps Complete";
70 AtomicNotDone, desc="AtomicOps not Complete";
71 Data, desc="data messgae";
72 // Coming from this TCC
73 L2_Repl, desc="L2 Replacement";
75 PrbInv, desc="Invalidating probe";
76 // Coming from Memory Controller
77 WBAck, desc="writethrough ack from memory";
81 state_declaration(State, desc="TCC State", default="TCC_State_I") {
82 M, AccessPermission:Read_Write, desc="Modified(dirty cache only)";
83 W, AccessPermission:Read_Write, desc="Written(dirty cache only)";
84 V, AccessPermission:Read_Only, desc="Valid";
85 I, AccessPermission:Invalid, desc="Invalid";
86 IV, AccessPermission:Busy, desc="Waiting for Data";
87 WI, AccessPermission:Busy, desc="Waiting on Writethrough Ack";
88 A, AccessPermission:Busy, desc="Invalid waiting on atomic Data";
91 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
92 DataArrayRead, desc="Read the data array";
93 DataArrayWrite, desc="Write the data array";
94 TagArrayRead, desc="Read the data array";
95 TagArrayWrite, desc="Write the data array";
101 structure(Entry, desc="...", interface="AbstractCacheEntry") {
102 State CacheState, desc="cache state";
103 bool Dirty, desc="Is the data dirty (diff from memory?)";
104 DataBlock DataBlk, desc="Data for the block";
105 WriteMask writeMask, desc="Dirty byte mask";
108 structure(TBE, desc="...") {
109 State TBEState, desc="Transient state";
110 DataBlock DataBlk, desc="data for the block";
111 bool Dirty, desc="Is the data dirty?";
112 bool Shared, desc="Victim hit by shared probe";
113 MachineID From, desc="Waiting for writeback from...";
114 NetDest Destination, desc="Data destination";
115 int numAtomics, desc="number remaining atomics";
118 structure(TBETable, external="yes") {
121 void deallocate(Addr);
122 bool isPresent(Addr);
125 TBETable TBEs, template="<TCC_TBE>", constructor="m_number_of_TBEs";
127 void set_cache_entry(AbstractCacheEntry b);
128 void unset_cache_entry();
131 void wakeUpAllBuffers();
132 void wakeUpBuffers(Addr a);
134 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
136 // FUNCTION DEFINITIONS
139 Tick cyclesToTicks(Cycles c);
141 MachineID getPeer(MachineID mach) {
142 return createMachineID(MachineType:RegionBuffer, intToID(regionBufferNum));
145 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
146 return static_cast(Entry, "pointer", L2cache.lookup(addr));
149 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
150 return getCacheEntry(addr).DataBlk;
153 bool presentOrAvail(Addr addr) {
154 return L2cache.isTagPresent(addr) || L2cache.cacheAvail(addr);
157 State getState(TBE tbe, Entry cache_entry, Addr addr) {
160 } else if (is_valid(cache_entry)) {
161 return cache_entry.CacheState;
166 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
168 tbe.TBEState := state;
171 if (is_valid(cache_entry)) {
172 cache_entry.CacheState := state;
176 void functionalRead(Addr addr, Packet *pkt) {
177 TBE tbe := TBEs.lookup(addr);
179 testAndRead(addr, tbe.DataBlk, pkt);
181 functionalMemoryRead(pkt);
185 int functionalWrite(Addr addr, Packet *pkt) {
186 int num_functional_writes := 0;
188 TBE tbe := TBEs.lookup(addr);
190 num_functional_writes := num_functional_writes +
191 testAndWrite(addr, tbe.DataBlk, pkt);
194 num_functional_writes := num_functional_writes +
195 functionalMemoryWrite(pkt);
196 return num_functional_writes;
199 AccessPermission getAccessPermission(Addr addr) {
200 TBE tbe := TBEs.lookup(addr);
202 return TCC_State_to_permission(tbe.TBEState);
205 Entry cache_entry := getCacheEntry(addr);
206 if(is_valid(cache_entry)) {
207 return TCC_State_to_permission(cache_entry.CacheState);
210 return AccessPermission:NotPresent;
213 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
214 if (is_valid(cache_entry)) {
215 cache_entry.changePermission(TCC_State_to_permission(state));
219 void recordRequestType(RequestType request_type, Addr addr) {
220 if (request_type == RequestType:DataArrayRead) {
221 L2cache.recordRequestType(CacheRequestType:DataArrayRead,addr);
222 } else if (request_type == RequestType:DataArrayWrite) {
223 L2cache.recordRequestType(CacheRequestType:DataArrayWrite,addr);
224 } else if (request_type == RequestType:TagArrayRead) {
225 L2cache.recordRequestType(CacheRequestType:TagArrayRead,addr);
226 } else if (request_type == RequestType:TagArrayWrite) {
227 L2cache.recordRequestType(CacheRequestType:TagArrayWrite,addr);
231 bool checkResourceAvailable(RequestType request_type, Addr addr) {
232 if (request_type == RequestType:DataArrayRead) {
233 return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
234 } else if (request_type == RequestType:DataArrayWrite) {
235 return L2cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
236 } else if (request_type == RequestType:TagArrayRead) {
237 return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
238 } else if (request_type == RequestType:TagArrayWrite) {
239 return L2cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
241 error("Invalid RequestType type in checkResourceAvailable");
249 // Three classes of ports
250 // Class 1: downward facing network links to NB
251 out_port(requestToNB_out, CPURequestMsg, requestToNB);
252 out_port(responseToNB_out, ResponseMsg, responseToNB);
253 out_port(unblockToNB_out, UnblockMsg, unblockToNB);
255 // Class 2: upward facing ports to GPU cores
256 out_port(responseToCore_out, ResponseMsg, responseToCore);
258 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
260 // request queue going to NB
265 in_port(triggerQueue_in, TiggerMsg, triggerQueue) {
266 if (triggerQueue_in.isReady(clockEdge())) {
267 peek(triggerQueue_in, TriggerMsg) {
268 TBE tbe := TBEs.lookup(in_msg.addr);
269 Entry cache_entry := getCacheEntry(in_msg.addr);
270 if (tbe.numAtomics == 0) {
271 trigger(Event:AtomicDone, in_msg.addr, cache_entry, tbe);
273 trigger(Event:AtomicNotDone, in_msg.addr, cache_entry, tbe);
281 in_port(responseFromNB_in, ResponseMsg, responseFromNB) {
282 if (responseFromNB_in.isReady(clockEdge())) {
283 peek(responseFromNB_in, ResponseMsg, block_on="addr") {
284 TBE tbe := TBEs.lookup(in_msg.addr);
285 Entry cache_entry := getCacheEntry(in_msg.addr);
286 if (in_msg.Type == CoherenceResponseType:NBSysResp) {
287 if(presentOrAvail(in_msg.addr)) {
288 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
290 Addr victim := L2cache.cacheProbe(in_msg.addr);
291 trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
293 } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
294 trigger(Event:WBAck, in_msg.addr, cache_entry, tbe);
296 error("Unexpected Response Message to Core");
302 // Finally handling incoming requests (from TCP) and probes (from NB).
304 in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB) {
305 if (probeNetwork_in.isReady(clockEdge())) {
306 peek(probeNetwork_in, NBProbeRequestMsg) {
307 DPRINTF(RubySlicc, "%s\n", in_msg);
308 DPRINTF(RubySlicc, "machineID: %s\n", machineID);
309 Entry cache_entry := getCacheEntry(in_msg.addr);
310 TBE tbe := TBEs.lookup(in_msg.addr);
311 trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
317 in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
318 if (coreRequestNetwork_in.isReady(clockEdge())) {
319 peek(coreRequestNetwork_in, CPURequestMsg) {
320 TBE tbe := TBEs.lookup(in_msg.addr);
321 Entry cache_entry := getCacheEntry(in_msg.addr);
322 if (in_msg.Type == CoherenceRequestType:WriteThrough) {
324 if(presentOrAvail(in_msg.addr)) {
325 trigger(Event:WrVicBlkBack, in_msg.addr, cache_entry, tbe);
327 Addr victim := L2cache.cacheProbe(in_msg.addr);
328 trigger(Event:L2_Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
331 trigger(Event:WrVicBlk, in_msg.addr, cache_entry, tbe);
333 } else if (in_msg.Type == CoherenceRequestType:Atomic) {
334 trigger(Event:Atomic, in_msg.addr, cache_entry, tbe);
335 } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
336 trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
338 DPRINTF(RubySlicc, "%s\n", in_msg);
339 error("Unexpected Response Message to Core");
346 action(i_invL2, "i", desc="invalidate TCC cache block") {
347 if (is_valid(cache_entry)) {
348 L2cache.deallocate(address);
353 // Data available at TCC. Send the DATA to TCP
354 action(sd_sendData, "sd", desc="send Shared response") {
355 peek(coreRequestNetwork_in, CPURequestMsg) {
356 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
357 out_msg.addr := address;
358 out_msg.Type := CoherenceResponseType:TDSysResp;
359 out_msg.Sender := machineID;
360 out_msg.Destination.add(in_msg.Requestor);
361 out_msg.DataBlk := cache_entry.DataBlk;
362 out_msg.MessageSize := MessageSizeType:Response_Data;
363 out_msg.Dirty := false;
364 out_msg.State := CoherenceState:Shared;
365 DPRINTF(RubySlicc, "%s\n", out_msg);
371 // Data was not available at TCC. So, TCC forwarded the request to
372 // directory and directory responded back with data. Now, forward the
373 // DATA to TCP and send the unblock ack back to directory.
374 action(sdr_sendDataResponse, "sdr", desc="send Shared response") {
375 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
376 out_msg.addr := address;
377 out_msg.Type := CoherenceResponseType:TDSysResp;
378 out_msg.Sender := machineID;
379 out_msg.Destination := tbe.Destination;
380 out_msg.DataBlk := cache_entry.DataBlk;
381 out_msg.MessageSize := MessageSizeType:Response_Data;
382 out_msg.Dirty := false;
383 out_msg.State := CoherenceState:Shared;
384 DPRINTF(RubySlicc, "%s\n", out_msg);
386 enqueue(unblockToNB_out, UnblockMsg, 1) {
387 out_msg.addr := address;
388 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
389 out_msg.MessageSize := MessageSizeType:Unblock_Control;
390 DPRINTF(RubySlicc, "%s\n", out_msg);
395 action(rd_requestData, "r", desc="Miss in L2, pass on") {
396 if(tbe.Destination.count()==1){
397 peek(coreRequestNetwork_in, CPURequestMsg) {
398 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
399 out_msg.addr := address;
400 out_msg.Type := in_msg.Type;
401 out_msg.Requestor := machineID;
402 out_msg.Destination.add(getPeer(machineID));
403 out_msg.Shared := false; // unneeded for this request
404 out_msg.MessageSize := in_msg.MessageSize;
405 DPRINTF(RubySlicc, "%s\n", out_msg);
411 action(w_sendResponseWBAck, "w", desc="send WB Ack") {
412 peek(responseFromNB_in, ResponseMsg) {
413 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
414 out_msg.addr := address;
415 out_msg.Type := CoherenceResponseType:TDSysWBAck;
416 out_msg.Destination.clear();
417 out_msg.Destination.add(in_msg.WTRequestor);
418 out_msg.Sender := machineID;
419 out_msg.MessageSize := MessageSizeType:Writeback_Control;
424 action(swb_sendWBAck, "swb", desc="send WB Ack") {
425 peek(coreRequestNetwork_in, CPURequestMsg) {
426 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
427 out_msg.addr := address;
428 out_msg.Type := CoherenceResponseType:TDSysWBAck;
429 out_msg.Destination.clear();
430 out_msg.Destination.add(in_msg.Requestor);
431 out_msg.Sender := machineID;
432 out_msg.MessageSize := MessageSizeType:Writeback_Control;
437 action(ar_sendAtomicResponse, "ar", desc="send Atomic Ack") {
438 peek(responseFromNB_in, ResponseMsg) {
439 enqueue(responseToCore_out, ResponseMsg, l2_response_latency) {
440 out_msg.addr := address;
441 out_msg.Type := CoherenceResponseType:TDSysResp;
442 out_msg.Destination.add(in_msg.WTRequestor);
443 out_msg.Sender := machineID;
444 out_msg.MessageSize := in_msg.MessageSize;
445 out_msg.DataBlk := in_msg.DataBlk;
449 action(sd2rb_sendDone2RegionBuffer, "sd2rb", desc="Request finished, send done ack") {
450 enqueue(unblockToNB_out, UnblockMsg, 1) {
451 out_msg.addr := address;
452 out_msg.Destination.add(getPeer(machineID));
453 out_msg.DoneAck := true;
454 out_msg.MessageSize := MessageSizeType:Unblock_Control;
456 out_msg.Dirty := tbe.Dirty;
458 out_msg.Dirty := false;
460 DPRINTF(RubySlicc, "%s\n", out_msg);
464 action(a_allocateBlock, "a", desc="allocate TCC block") {
465 if (is_invalid(cache_entry)) {
466 set_cache_entry(L2cache.allocate(address, new Entry));
467 cache_entry.writeMask.clear();
471 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
472 if (is_invalid(tbe)) {
473 check_allocate(TBEs);
474 TBEs.allocate(address);
475 set_tbe(TBEs.lookup(address));
476 tbe.Destination.clear();
479 if (coreRequestNetwork_in.isReady(clockEdge())) {
480 peek(coreRequestNetwork_in, CPURequestMsg) {
481 if(in_msg.Type == CoherenceRequestType:RdBlk || in_msg.Type == CoherenceRequestType:Atomic){
482 tbe.Destination.add(in_msg.Requestor);
488 action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
489 tbe.Destination.clear();
490 TBEs.deallocate(address);
494 action(wcb_writeCacheBlock, "wcb", desc="write data to TCC") {
495 peek(responseFromNB_in, ResponseMsg) {
496 cache_entry.DataBlk := in_msg.DataBlk;
497 DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
501 action(wdb_writeDirtyBytes, "wdb", desc="write data to TCC") {
502 peek(coreRequestNetwork_in, CPURequestMsg) {
503 cache_entry.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask);
504 cache_entry.writeMask.orMask(in_msg.writeMask);
505 DPRINTF(RubySlicc, "Writing to TCC: %s\n", in_msg);
509 action(wt_writeThrough, "wt", desc="write through data") {
510 peek(coreRequestNetwork_in, CPURequestMsg) {
511 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
512 out_msg.addr := address;
513 out_msg.Requestor := machineID;
514 out_msg.WTRequestor := in_msg.Requestor;
515 out_msg.Destination.add(getPeer(machineID));
516 out_msg.MessageSize := MessageSizeType:Data;
517 out_msg.Type := CoherenceRequestType:WriteThrough;
518 out_msg.Dirty := true;
519 out_msg.DataBlk := in_msg.DataBlk;
520 out_msg.writeMask.orMask(in_msg.writeMask);
525 action(wb_writeBack, "wb", desc="write back data") {
526 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
527 out_msg.addr := address;
528 out_msg.Requestor := machineID;
529 out_msg.WTRequestor := machineID;
530 out_msg.Destination.add(getPeer(machineID));
531 out_msg.MessageSize := MessageSizeType:Data;
532 out_msg.Type := CoherenceRequestType:WriteThrough;
533 out_msg.Dirty := true;
534 out_msg.DataBlk := cache_entry.DataBlk;
535 out_msg.writeMask.orMask(cache_entry.writeMask);
539 action(at_atomicThrough, "at", desc="write back data") {
540 peek(coreRequestNetwork_in, CPURequestMsg) {
541 enqueue(requestToNB_out, CPURequestMsg, l2_request_latency) {
542 out_msg.addr := address;
543 out_msg.Requestor := machineID;
544 out_msg.WTRequestor := in_msg.Requestor;
545 out_msg.Destination.add(getPeer(machineID));
546 out_msg.MessageSize := MessageSizeType:Data;
547 out_msg.Type := CoherenceRequestType:Atomic;
548 out_msg.Dirty := true;
549 out_msg.writeMask.orMask(in_msg.writeMask);
554 action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
555 enqueue(responseToNB_out, ResponseMsg, 1) {
556 out_msg.addr := address;
557 out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
558 out_msg.Sender := machineID;
559 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
560 out_msg.Dirty := false;
561 out_msg.Hit := false;
562 out_msg.Ntsl := true;
563 out_msg.State := CoherenceState:NA;
564 out_msg.MessageSize := MessageSizeType:Response_Control;
567 action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
568 L2cache.setMRU(address);
571 action(p_popRequestQueue, "p", desc="pop request queue") {
572 coreRequestNetwork_in.dequeue(clockEdge());
575 action(pr_popResponseQueue, "pr", desc="pop response queue") {
576 responseFromNB_in.dequeue(clockEdge());
579 action(pp_popProbeQueue, "pp", desc="pop probe queue") {
580 probeNetwork_in.dequeue(clockEdge());
582 action(zz_recycleRequestQueue, "z", desc="stall"){
583 coreRequestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
587 action(ina_incrementNumAtomics, "ina", desc="inc num atomics") {
588 tbe.numAtomics := tbe.numAtomics + 1;
592 action(dna_decrementNumAtomics, "dna", desc="dec num atomics") {
593 tbe.numAtomics := tbe.numAtomics - 1;
594 if (tbe.numAtomics==0) {
595 enqueue(triggerQueue_out, TriggerMsg, 1) {
596 out_msg.addr := address;
597 out_msg.Type := TriggerType:AtomicDone;
602 action(ptr_popTriggerQueue, "ptr", desc="pop Trigger") {
603 triggerQueue_in.dequeue(clockEdge());
609 // transitions from base
610 // Assumptions for ArrayRead/Write
611 // TBE checked before tags
612 // Data Read/Write requires Tag Read
614 transition(WI, {RdBlk, WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
615 zz_recycleRequestQueue;
617 transition(A, {RdBlk, WrVicBlk, WrVicBlkBack}) {TagArrayRead} {
618 zz_recycleRequestQueue;
620 transition(IV, {WrVicBlk, Atomic, WrVicBlkBack}) {TagArrayRead} {
621 zz_recycleRequestQueue;
623 transition({M, V}, RdBlk) {TagArrayRead, DataArrayRead} {
628 transition(W, RdBlk, WI) {TagArrayRead, DataArrayRead} {
633 transition(I, RdBlk, IV) {TagArrayRead} {
639 transition(IV, RdBlk) {
645 transition({V, I},Atomic, A) {TagArrayRead} {
649 ina_incrementNumAtomics;
653 transition(A, Atomic) {
655 ina_incrementNumAtomics;
659 transition({M, W}, Atomic, WI) {TagArrayRead} {
664 // Cahceblock stays in I state which implies
665 // this TCC is a write-no-allocate cache
666 transition(I, WrVicBlk) {TagArrayRead} {
671 transition(V, WrVicBlk) {TagArrayRead, DataArrayWrite} {
678 transition({V, M}, WrVicBlkBack, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
685 transition(W, WrVicBlkBack) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
692 transition(I, WrVicBlkBack, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
700 transition({W, M}, L2_Repl, WI) {TagArrayRead, DataArrayRead} {
706 transition({I, V}, L2_Repl, I) {TagArrayRead, TagArrayWrite} {
710 transition({A, IV, WI}, L2_Repl) {
714 transition({I, V}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
715 pi_sendProbeResponseInv;
719 transition(M, PrbInv, W) {TagArrayRead, TagArrayWrite} {
720 pi_sendProbeResponseInv;
724 transition(W, PrbInv) {TagArrayRead} {
725 pi_sendProbeResponseInv;
729 transition({A, IV, WI}, PrbInv) {
730 pi_sendProbeResponseInv;
734 transition(IV, Data, V) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
738 sdr_sendDataResponse;
739 sd2rb_sendDone2RegionBuffer;
744 transition(A, Data) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
746 ar_sendAtomicResponse;
747 sd2rb_sendDone2RegionBuffer;
748 dna_decrementNumAtomics;
752 transition(A, AtomicDone, I) {TagArrayRead, TagArrayWrite} {
757 transition(A, AtomicNotDone) {TagArrayRead} {
761 //M,W should not see WBAck as the cache is in WB mode
762 //WBAcks do not need to check tags
763 transition({I, V, IV, A}, WBAck) {
765 sd2rb_sendDone2RegionBuffer;
769 transition(WI, WBAck,I) {
770 sd2rb_sendDone2RegionBuffer;