gpu-compute, mem-ruby, configs: Add GCN3 ISA support to GPU model
[gem5.git] / src / mem / ruby / protocol / GPU_VIPER-TCP.sm
1 /*
2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Blake Hechtman
34 */
35
36 machine(MachineType:TCP, "GPU TCP (L1 Data Cache)")
37 : VIPERCoalescer* coalescer;
38 Sequencer* sequencer;
39 bool use_seq_not_coal;
40 CacheMemory * L1cache;
41 bool WB; /*is this cache Writeback?*/
42 bool disableL1; /* bypass L1 cache? */
43 int TCC_select_num_bits;
44 Cycles issue_latency := 40; // time to send data down to TCC
45 Cycles l2_hit_latency := 18;
46
47 MessageBuffer * requestFromTCP, network="To", virtual_network="1", vnet_type="request";
48 MessageBuffer * responseFromTCP, network="To", virtual_network="3", vnet_type="response";
49 MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
50
51 MessageBuffer * probeToTCP, network="From", virtual_network="1", vnet_type="request";
52 MessageBuffer * responseToTCP, network="From", virtual_network="3", vnet_type="response";
53 MessageBuffer * mandatoryQueue;
54
55 {
56 state_declaration(State, desc="TCP Cache States", default="TCP_State_I") {
57 I, AccessPermission:Invalid, desc="Invalid";
58 V, AccessPermission:Read_Only, desc="Valid";
59 W, AccessPermission:Read_Write, desc="Written";
60 M, AccessPermission:Read_Write, desc="Written and Valid";
61 L, AccessPermission:Read_Write, desc="Local access is modifable";
62 A, AccessPermission:Invalid, desc="Waiting on Atomic";
63 }
64
65 enumeration(Event, desc="TCP Events") {
66 // Core initiated
67 Load, desc="Load";
68 Store, desc="Store to L1 (L1 is dirty)";
69 StoreThrough, desc="Store directly to L2(L1 is clean)";
70 StoreLocal, desc="Store to L1 but L1 is clean";
71 Atomic, desc="Atomic";
72 Flush, desc="Flush if dirty(wbL1 for Store Release)";
73 Evict, desc="Evict if clean(invL1 for Load Acquire)";
74 // Mem sys initiated
75 Repl, desc="Replacing block from cache";
76
77 // TCC initiated
78 TCC_Ack, desc="TCC Ack to Core Request";
79 TCC_AckWB, desc="TCC Ack for WB";
80 // Disable L1 cache
81 Bypass, desc="Bypass the entire L1 cache";
82 }
83
84 enumeration(RequestType,
85 desc="To communicate stats from transitions to recordStats") {
86 DataArrayRead, desc="Read the data array";
87 DataArrayWrite, desc="Write the data array";
88 TagArrayRead, desc="Read the data array";
89 TagArrayWrite, desc="Write the data array";
90 TagArrayFlash, desc="Flash clear the data array";
91 }
92
93
94 structure(Entry, desc="...", interface="AbstractCacheEntry") {
95 State CacheState, desc="cache state";
96 bool Dirty, desc="Is the data dirty (diff than memory)?";
97 DataBlock DataBlk, desc="data for the block";
98 bool FromL2, default="false", desc="block just moved from L2";
99 WriteMask writeMask, desc="written bytes masks";
100 }
101
102 structure(TBE, desc="...") {
103 State TBEState, desc="Transient state";
104 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
105 bool Dirty, desc="Is the data dirty (different than memory)?";
106 int NumPendingMsgs,desc="Number of acks/data messages that this processor is waiting for";
107 bool Shared, desc="Victim hit by shared probe";
108 }
109
110 structure(TBETable, external="yes") {
111 TBE lookup(Addr);
112 void allocate(Addr);
113 void deallocate(Addr);
114 bool isPresent(Addr);
115 }
116
117 TBETable TBEs, template="<TCP_TBE>", constructor="m_number_of_TBEs";
118 int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
119 int WTcnt, default="0";
120 int Fcnt, default="0";
121 bool inFlush, default="false";
122
123 void set_cache_entry(AbstractCacheEntry b);
124 void unset_cache_entry();
125 void set_tbe(TBE b);
126 void unset_tbe();
127 void wakeUpAllBuffers();
128 void wakeUpBuffers(Addr a);
129 Cycles curCycle();
130
131 // Internal functions
132 Tick clockEdge();
133 Tick cyclesToTicks(Cycles c);
134 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
135 Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
136 return cache_entry;
137 }
138
139 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
140 TBE tbe := TBEs.lookup(addr);
141 if(is_valid(tbe)) {
142 return tbe.DataBlk;
143 } else {
144 return getCacheEntry(addr).DataBlk;
145 }
146 }
147
148 State getState(TBE tbe, Entry cache_entry, Addr addr) {
149 if (is_valid(tbe)) {
150 return tbe.TBEState;
151 } else if (is_valid(cache_entry)) {
152 return cache_entry.CacheState;
153 }
154 return State:I;
155 }
156
157 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
158 if (is_valid(tbe)) {
159 tbe.TBEState := state;
160 }
161
162 if (is_valid(cache_entry)) {
163 cache_entry.CacheState := state;
164 }
165 }
166
167 void functionalRead(Addr addr, Packet *pkt) {
168 TBE tbe := TBEs.lookup(addr);
169 if(is_valid(tbe)) {
170 testAndRead(addr, tbe.DataBlk, pkt);
171 } else {
172 functionalMemoryRead(pkt);
173 }
174 }
175
176 int functionalWrite(Addr addr, Packet *pkt) {
177 int num_functional_writes := 0;
178
179 TBE tbe := TBEs.lookup(addr);
180 if(is_valid(tbe)) {
181 num_functional_writes := num_functional_writes +
182 testAndWrite(addr, tbe.DataBlk, pkt);
183 }
184
185 num_functional_writes := num_functional_writes +
186 functionalMemoryWrite(pkt);
187 return num_functional_writes;
188 }
189
190 AccessPermission getAccessPermission(Addr addr) {
191 TBE tbe := TBEs.lookup(addr);
192 if(is_valid(tbe)) {
193 return TCP_State_to_permission(tbe.TBEState);
194 }
195
196 Entry cache_entry := getCacheEntry(addr);
197 if(is_valid(cache_entry)) {
198 return TCP_State_to_permission(cache_entry.CacheState);
199 }
200
201 return AccessPermission:NotPresent;
202 }
203
204 bool isValid(Addr addr) {
205 AccessPermission perm := getAccessPermission(addr);
206 if (perm == AccessPermission:NotPresent ||
207 perm == AccessPermission:Invalid ||
208 perm == AccessPermission:Busy) {
209 return false;
210 } else {
211 return true;
212 }
213 }
214
215 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
216 if (is_valid(cache_entry)) {
217 cache_entry.changePermission(TCP_State_to_permission(state));
218 }
219 }
220
221 void recordRequestType(RequestType request_type, Addr addr) {
222 if (request_type == RequestType:DataArrayRead) {
223 L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
224 } else if (request_type == RequestType:DataArrayWrite) {
225 L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
226 } else if (request_type == RequestType:TagArrayRead) {
227 L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
228 } else if (request_type == RequestType:TagArrayFlash) {
229 L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
230 } else if (request_type == RequestType:TagArrayWrite) {
231 L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
232 }
233 }
234
235 bool checkResourceAvailable(RequestType request_type, Addr addr) {
236 if (request_type == RequestType:DataArrayRead) {
237 return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
238 } else if (request_type == RequestType:DataArrayWrite) {
239 return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
240 } else if (request_type == RequestType:TagArrayRead) {
241 return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
242 } else if (request_type == RequestType:TagArrayWrite) {
243 return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
244 } else if (request_type == RequestType:TagArrayFlash) {
245 // FIXME should check once per cache, rather than once per cacheline
246 return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
247 } else {
248 error("Invalid RequestType type in checkResourceAvailable");
249 return true;
250 }
251 }
252
253 // Out Ports
254
255 out_port(requestNetwork_out, CPURequestMsg, requestFromTCP);
256
257 // In Ports
258
259 in_port(responseToTCP_in, ResponseMsg, responseToTCP) {
260 if (responseToTCP_in.isReady(clockEdge())) {
261 peek(responseToTCP_in, ResponseMsg, block_on="addr") {
262 Entry cache_entry := getCacheEntry(in_msg.addr);
263 TBE tbe := TBEs.lookup(in_msg.addr);
264 if (in_msg.Type == CoherenceResponseType:TDSysResp) {
265 // disable L1 cache
266 if (disableL1) {
267 trigger(Event:Bypass, in_msg.addr, cache_entry, tbe);
268 } else {
269 if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.addr)) {
270 trigger(Event:TCC_Ack, in_msg.addr, cache_entry, tbe);
271 } else {
272 Addr victim := L1cache.cacheProbe(in_msg.addr);
273 trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
274 }
275 }
276 } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck ||
277 in_msg.Type == CoherenceResponseType:NBSysWBAck) {
278 trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
279 } else {
280 error("Unexpected Response Message to Core");
281 }
282 }
283 }
284 }
285
286 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
287 if (mandatoryQueue_in.isReady(clockEdge())) {
288 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
289 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
290 TBE tbe := TBEs.lookup(in_msg.LineAddress);
291 DPRINTF(RubySlicc, "%s\n", in_msg);
292 if (in_msg.Type == RubyRequestType:LD) {
293 trigger(Event:Load, in_msg.LineAddress, cache_entry, tbe);
294 } else if (in_msg.Type == RubyRequestType:ATOMIC) {
295 trigger(Event:Atomic, in_msg.LineAddress, cache_entry, tbe);
296 } else if (in_msg.Type == RubyRequestType:ST) {
297 if(disableL1) {
298 trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
299 } else {
300 if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
301 if (WB) {
302 trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
303 } else {
304 trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
305 }
306 } else {
307 Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
308 trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
309 }
310 } // end if (disableL1)
311 } else if (in_msg.Type == RubyRequestType:FLUSH) {
312 trigger(Event:Flush, in_msg.LineAddress, cache_entry, tbe);
313 } else if (in_msg.Type == RubyRequestType:REPLACEMENT){
314 trigger(Event:Evict, in_msg.LineAddress, cache_entry, tbe);
315 } else {
316 error("Unexpected Request Message from VIC");
317 if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
318 if (WB) {
319 trigger(Event:Store, in_msg.LineAddress, cache_entry, tbe);
320 } else {
321 trigger(Event:StoreThrough, in_msg.LineAddress, cache_entry, tbe);
322 }
323 } else {
324 Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
325 trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
326 }
327 }
328 }
329 }
330 }
331
332 // Actions
333
334 action(ic_invCache, "ic", desc="invalidate cache") {
335 if(is_valid(cache_entry)) {
336 cache_entry.writeMask.clear();
337 L1cache.deallocate(address);
338 }
339 unset_cache_entry();
340 }
341
342 action(n_issueRdBlk, "n", desc="Issue RdBlk") {
343 enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
344 out_msg.addr := address;
345 out_msg.Type := CoherenceRequestType:RdBlk;
346 out_msg.Requestor := machineID;
347 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
348 TCC_select_low_bit, TCC_select_num_bits));
349 out_msg.MessageSize := MessageSizeType:Request_Control;
350 out_msg.InitialRequestTime := curCycle();
351 }
352 }
353
354 action(rb_bypassDone, "rb", desc="bypass L1 of read access") {
355 peek(responseToTCP_in, ResponseMsg) {
356 DataBlock tmp:= in_msg.DataBlk;
357 if (use_seq_not_coal) {
358 sequencer.readCallback(address, tmp, false, MachineType:L1Cache);
359 } else {
360 coalescer.readCallback(address, MachineType:L1Cache, tmp);
361 }
362 if(is_valid(cache_entry)) {
363 unset_cache_entry();
364 }
365 }
366 }
367
368 action(wab_bypassDone, "wab", desc="bypass L1 of write access") {
369 peek(responseToTCP_in, ResponseMsg) {
370 DataBlock tmp := in_msg.DataBlk;
371 if (use_seq_not_coal) {
372 sequencer.writeCallback(address, tmp, false, MachineType:L1Cache);
373 } else {
374 coalescer.writeCallback(address, MachineType:L1Cache, tmp);
375 }
376 }
377 }
378
379 action(norl_issueRdBlkOrloadDone, "norl", desc="local load done") {
380 peek(mandatoryQueue_in, RubyRequest){
381 if (cache_entry.writeMask.cmpMask(in_msg.writeMask)) {
382 if (use_seq_not_coal) {
383 sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
384 } else {
385 coalescer.readCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
386 }
387 } else {
388 enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
389 out_msg.addr := address;
390 out_msg.Type := CoherenceRequestType:RdBlk;
391 out_msg.Requestor := machineID;
392 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
393 TCC_select_low_bit, TCC_select_num_bits));
394 out_msg.MessageSize := MessageSizeType:Request_Control;
395 out_msg.InitialRequestTime := curCycle();
396 }
397 }
398 }
399 }
400
401 action(wt_writeThrough, "wt", desc="Flush dirty data") {
402 WTcnt := WTcnt + 1;
403 APPEND_TRANSITION_COMMENT("write++ = ");
404 APPEND_TRANSITION_COMMENT(WTcnt);
405 enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
406 out_msg.addr := address;
407 out_msg.Requestor := machineID;
408 assert(is_valid(cache_entry));
409 out_msg.DataBlk := cache_entry.DataBlk;
410 out_msg.writeMask.clear();
411 out_msg.writeMask.orMask(cache_entry.writeMask);
412 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
413 TCC_select_low_bit, TCC_select_num_bits));
414 out_msg.MessageSize := MessageSizeType:Data;
415 out_msg.Type := CoherenceRequestType:WriteThrough;
416 out_msg.InitialRequestTime := curCycle();
417 out_msg.Shared := false;
418 }
419 }
420
421 action(at_atomicThrough, "at", desc="send Atomic") {
422 peek(mandatoryQueue_in, RubyRequest) {
423 enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
424 out_msg.addr := address;
425 out_msg.Requestor := machineID;
426 out_msg.writeMask.clear();
427 out_msg.writeMask.orMask(in_msg.writeMask);
428 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
429 TCC_select_low_bit, TCC_select_num_bits));
430 out_msg.MessageSize := MessageSizeType:Data;
431 out_msg.Type := CoherenceRequestType:Atomic;
432 out_msg.InitialRequestTime := curCycle();
433 out_msg.Shared := false;
434 }
435 }
436 }
437
438 action(a_allocate, "a", desc="allocate block") {
439 if (is_invalid(cache_entry)) {
440 set_cache_entry(L1cache.allocate(address, new Entry));
441 }
442 cache_entry.writeMask.clear();
443 }
444
445 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
446 check_allocate(TBEs);
447 TBEs.allocate(address);
448 set_tbe(TBEs.lookup(address));
449 }
450
451 action(d_deallocateTBE, "d", desc="Deallocate TBE") {
452 TBEs.deallocate(address);
453 unset_tbe();
454 }
455
456 action(sf_setFlush, "sf", desc="set flush") {
457 inFlush := true;
458 APPEND_TRANSITION_COMMENT(" inFlush is true");
459 }
460
461 action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
462 mandatoryQueue_in.dequeue(clockEdge());
463 }
464
465 action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
466 responseToTCP_in.dequeue(clockEdge());
467 }
468
469 action(l_loadDone, "l", desc="local load done") {
470 assert(is_valid(cache_entry));
471 if (use_seq_not_coal) {
472 sequencer.readCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
473 } else {
474 coalescer.readCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
475 }
476 }
477
478 action(s_storeDone, "s", desc="local store done") {
479 assert(is_valid(cache_entry));
480
481 if (use_seq_not_coal) {
482 sequencer.writeCallback(address, cache_entry.DataBlk, false, MachineType:L1Cache);
483 } else {
484 coalescer.writeCallback(address, MachineType:L1Cache, cache_entry.DataBlk);
485 }
486 cache_entry.Dirty := true;
487 }
488
489 action(inv_invDone, "inv", desc="local inv done") {
490 if (use_seq_not_coal) {
491 DPRINTF(RubySlicc, "Sequencer does not define invCallback!\n");
492 assert(false);
493 } else {
494 coalescer.invCallback(address);
495 }
496 }
497
498 action(wb_wbDone, "wb", desc="local wb done") {
499 if (inFlush == true) {
500 Fcnt := Fcnt + 1;
501 if (Fcnt > WTcnt) {
502 if (use_seq_not_coal) {
503 DPRINTF(RubySlicc, "Sequencer does not define wbCallback!\n");
504 assert(false);
505 } else {
506 coalescer.wbCallback(address);
507 }
508 Fcnt := Fcnt - 1;
509 }
510 if (WTcnt == 0 && Fcnt == 0) {
511 inFlush := false;
512 APPEND_TRANSITION_COMMENT(" inFlush is false");
513 }
514 }
515 }
516
517 action(wd_wtDone, "wd", desc="writethrough done") {
518 WTcnt := WTcnt - 1;
519 if (inFlush == true) {
520 Fcnt := Fcnt -1;
521 }
522 assert(WTcnt >= 0);
523 APPEND_TRANSITION_COMMENT("write-- = ");
524 APPEND_TRANSITION_COMMENT(WTcnt);
525 }
526
527 action(dw_dirtyWrite, "dw", desc="update write mask"){
528 peek(mandatoryQueue_in, RubyRequest) {
529 cache_entry.DataBlk.copyPartial(in_msg.WTData,in_msg.writeMask);
530 cache_entry.writeMask.orMask(in_msg.writeMask);
531 }
532 }
533 action(w_writeCache, "w", desc="write data to cache") {
534 peek(responseToTCP_in, ResponseMsg) {
535 assert(is_valid(cache_entry));
536 DataBlock tmp := in_msg.DataBlk;
537 tmp.copyPartial(cache_entry.DataBlk,cache_entry.writeMask);
538 cache_entry.DataBlk := tmp;
539 }
540 }
541
542 action(mru_updateMRU, "mru", desc="Touch block for replacement policy") {
543 L1cache.setMRU(address);
544 }
545
546 // action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
547 // mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
548 // }
549
550 action(z_stall, "z", desc="stall; built-in") {
551 // built-int action
552 }
553
554 // Transitions
555 // ArrayRead/Write assumptions:
556 // All requests read Tag Array
557 // TBE allocation write the TagArray to I
558 // TBE only checked on misses
559 // Stores will also write dirty bits in the tag
560 // WriteThroughs still need to use cache entry as staging buffer for wavefront
561
562 // Stalling transitions do NOT check the tag array...and if they do,
563 // they can cause a resource stall deadlock!
564
565 transition({A}, {Load, Store, Atomic, StoreThrough}) { //TagArrayRead} {
566 z_stall;
567 }
568
569 transition({M, V, L}, Load) {TagArrayRead, DataArrayRead} {
570 l_loadDone;
571 mru_updateMRU;
572 p_popMandatoryQueue;
573 }
574
575 transition(I, Load) {TagArrayRead} {
576 n_issueRdBlk;
577 p_popMandatoryQueue;
578 }
579
580 transition({V, I}, Atomic, A) {TagArrayRead, TagArrayWrite} {
581 t_allocateTBE;
582 mru_updateMRU;
583 at_atomicThrough;
584 p_popMandatoryQueue;
585 }
586
587 transition({M, W}, Atomic, A) {TagArrayRead, TagArrayWrite} {
588 wt_writeThrough;
589 t_allocateTBE;
590 at_atomicThrough;
591 ic_invCache;
592 }
593
594 transition(W, Load, I) {TagArrayRead, DataArrayRead} {
595 wt_writeThrough;
596 norl_issueRdBlkOrloadDone;
597 p_popMandatoryQueue;
598 }
599
600 transition({I}, StoreLocal, L) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
601 a_allocate;
602 dw_dirtyWrite;
603 s_storeDone;
604 p_popMandatoryQueue;
605 }
606
607 transition({L, V}, StoreLocal, L) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
608 dw_dirtyWrite;
609 mru_updateMRU;
610 s_storeDone;
611 p_popMandatoryQueue;
612 }
613
614 transition(I, Store, W) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
615 a_allocate;
616 dw_dirtyWrite;
617 s_storeDone;
618 p_popMandatoryQueue;
619 }
620
621 transition(V, Store, M) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
622 dw_dirtyWrite;
623 mru_updateMRU;
624 s_storeDone;
625 p_popMandatoryQueue;
626 }
627
628 transition({M, W}, Store) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
629 dw_dirtyWrite;
630 mru_updateMRU;
631 s_storeDone;
632 p_popMandatoryQueue;
633 }
634
635 //M,W should not see storeThrough
636 transition(I, StoreThrough) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
637 a_allocate;
638 dw_dirtyWrite;
639 s_storeDone;
640 wt_writeThrough;
641 ic_invCache;
642 p_popMandatoryQueue;
643 }
644
645 transition({V,L}, StoreThrough, I) {TagArrayRead, TagArrayWrite, DataArrayWrite} {
646 dw_dirtyWrite;
647 s_storeDone;
648 wt_writeThrough;
649 ic_invCache;
650 p_popMandatoryQueue;
651 }
652
653 transition(I, TCC_Ack, V) {TagArrayRead, TagArrayWrite, DataArrayRead, DataArrayWrite} {
654 a_allocate;
655 w_writeCache;
656 l_loadDone;
657 pr_popResponseQueue;
658 }
659
660 transition(I, Bypass, I) {
661 rb_bypassDone;
662 pr_popResponseQueue;
663 }
664
665 transition(A, Bypass, I){
666 d_deallocateTBE;
667 wab_bypassDone;
668 pr_popResponseQueue;
669 }
670
671 transition(A, TCC_Ack, I) {TagArrayRead, DataArrayRead, DataArrayWrite} {
672 d_deallocateTBE;
673 a_allocate;
674 w_writeCache;
675 s_storeDone;
676 pr_popResponseQueue;
677 ic_invCache;
678 }
679
680 transition(V, TCC_Ack, V) {TagArrayRead, DataArrayRead, DataArrayWrite} {
681 w_writeCache;
682 l_loadDone;
683 pr_popResponseQueue;
684 }
685
686 transition({W, M}, TCC_Ack, M) {TagArrayRead, TagArrayWrite, DataArrayRead, DataArrayWrite} {
687 w_writeCache;
688 l_loadDone;
689 pr_popResponseQueue;
690 }
691
692 transition({I, V}, Repl, I) {TagArrayRead, TagArrayWrite} {
693 ic_invCache;
694 }
695
696 transition({A}, Repl) {TagArrayRead, TagArrayWrite} {
697 ic_invCache;
698 }
699
700 transition({W, M}, Repl, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
701 wt_writeThrough;
702 ic_invCache;
703 }
704
705 transition(L, Repl, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
706 wt_writeThrough;
707 ic_invCache;
708 }
709
710 transition({W, M}, Flush, I) {TagArrayRead, TagArrayWrite, DataArrayRead} {
711 sf_setFlush;
712 wt_writeThrough;
713 ic_invCache;
714 p_popMandatoryQueue;
715 }
716
717 transition({V, I, A, L},Flush) {TagArrayFlash} {
718 sf_setFlush;
719 wb_wbDone;
720 p_popMandatoryQueue;
721 }
722
723 transition({I, V}, Evict, I) {TagArrayFlash} {
724 inv_invDone;
725 p_popMandatoryQueue;
726 ic_invCache;
727 }
728
729 transition({W, M}, Evict, W) {TagArrayFlash} {
730 inv_invDone;
731 p_popMandatoryQueue;
732 }
733
734 transition({A, L}, Evict) {TagArrayFlash} {
735 inv_invDone;
736 p_popMandatoryQueue;
737 }
738
739 // TCC_AckWB only snoops TBE
740 transition({V, I, A, M, W, L}, TCC_AckWB) {
741 wd_wtDone;
742 wb_wbDone;
743 pr_popResponseQueue;
744 }
745 }