mem-cache: Fix non-virtual base destructor of Repl Entry
[gem5.git] / src / mem / protocol / GPU_RfO-SQC.sm
1 /*
2 * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Authors: Lisa Hsu
34 */
35
36 machine(MachineType:SQC, "GPU SQC (L1 I Cache)")
37 : Sequencer* sequencer;
38 CacheMemory * L1cache;
39 int TCC_select_num_bits;
40 Cycles issue_latency := 80; // time to send data down to TCC
41 Cycles l2_hit_latency := 18;
42
43 MessageBuffer * requestFromSQC, network="To", virtual_network="1", vnet_type="request";
44 MessageBuffer * responseFromSQC, network="To", virtual_network="3", vnet_type="response";
45 MessageBuffer * unblockFromCore, network="To", virtual_network="5", vnet_type="unblock";
46
47 MessageBuffer * probeToSQC, network="From", virtual_network="1", vnet_type="request";
48 MessageBuffer * responseToSQC, network="From", virtual_network="3", vnet_type="response";
49
50 MessageBuffer * mandatoryQueue;
51 {
52 state_declaration(State, desc="SQC Cache States", default="SQC_State_I") {
53 I, AccessPermission:Invalid, desc="Invalid";
54 S, AccessPermission:Read_Only, desc="Shared";
55
56 I_S, AccessPermission:Busy, desc="Invalid, issued RdBlkS, have not seen response yet";
57 S_I, AccessPermission:Read_Only, desc="L1 replacement, waiting for clean WB ack";
58 I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from TCCdir for canceled WB";
59 }
60
61 enumeration(Event, desc="SQC Events") {
62 // Core initiated
63 Fetch, desc="Fetch";
64
65 //TCC initiated
66 TCC_AckS, desc="TCC Ack to Core Request";
67 TCC_AckWB, desc="TCC Ack for WB";
68 TCC_NackWB, desc="TCC Nack for WB";
69
70 // Mem sys initiated
71 Repl, desc="Replacing block from cache";
72
73 // Probe Events
74 PrbInvData, desc="probe, return M data";
75 PrbInv, desc="probe, no need for data";
76 PrbShrData, desc="probe downgrade, return data";
77 }
78
79 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
80 DataArrayRead, desc="Read the data array";
81 DataArrayWrite, desc="Write the data array";
82 TagArrayRead, desc="Read the data array";
83 TagArrayWrite, desc="Write the data array";
84 }
85
86
87 structure(Entry, desc="...", interface="AbstractCacheEntry") {
88 State CacheState, desc="cache state";
89 bool Dirty, desc="Is the data dirty (diff than memory)?";
90 DataBlock DataBlk, desc="data for the block";
91 bool FromL2, default="false", desc="block just moved from L2";
92 }
93
94 structure(TBE, desc="...") {
95 State TBEState, desc="Transient state";
96 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
97 bool Dirty, desc="Is the data dirty (different than memory)?";
98 int NumPendingMsgs, desc="Number of acks/data messages that this processor is waiting for";
99 bool Shared, desc="Victim hit by shared probe";
100 }
101
102 structure(TBETable, external="yes") {
103 TBE lookup(Addr);
104 void allocate(Addr);
105 void deallocate(Addr);
106 bool isPresent(Addr);
107 }
108
109 TBETable TBEs, template="<SQC_TBE>", constructor="m_number_of_TBEs";
110 int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
111
112 Tick clockEdge();
113 Tick cyclesToTicks(Cycles c);
114
115 void set_cache_entry(AbstractCacheEntry b);
116 void unset_cache_entry();
117 void set_tbe(TBE b);
118 void unset_tbe();
119 void wakeUpAllBuffers();
120 void wakeUpBuffers(Addr a);
121 Cycles curCycle();
122
123 // Internal functions
124 Entry getCacheEntry(Addr address), return_by_pointer="yes" {
125 Entry cache_entry := static_cast(Entry, "pointer", L1cache.lookup(address));
126 return cache_entry;
127 }
128
129 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
130 TBE tbe := TBEs.lookup(addr);
131 if(is_valid(tbe)) {
132 return tbe.DataBlk;
133 } else {
134 return getCacheEntry(addr).DataBlk;
135 }
136 }
137
138 State getState(TBE tbe, Entry cache_entry, Addr addr) {
139 if(is_valid(tbe)) {
140 return tbe.TBEState;
141 } else if (is_valid(cache_entry)) {
142 return cache_entry.CacheState;
143 }
144 return State:I;
145 }
146
147 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
148 if (is_valid(tbe)) {
149 tbe.TBEState := state;
150 }
151
152 if (is_valid(cache_entry)) {
153 cache_entry.CacheState := state;
154 }
155 }
156
157 AccessPermission getAccessPermission(Addr addr) {
158 TBE tbe := TBEs.lookup(addr);
159 if(is_valid(tbe)) {
160 return SQC_State_to_permission(tbe.TBEState);
161 }
162
163 Entry cache_entry := getCacheEntry(addr);
164 if(is_valid(cache_entry)) {
165 return SQC_State_to_permission(cache_entry.CacheState);
166 }
167
168 return AccessPermission:NotPresent;
169 }
170
171 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
172 if (is_valid(cache_entry)) {
173 cache_entry.changePermission(SQC_State_to_permission(state));
174 }
175 }
176
177 void functionalRead(Addr addr, Packet *pkt) {
178 TBE tbe := TBEs.lookup(addr);
179 if(is_valid(tbe)) {
180 testAndRead(addr, tbe.DataBlk, pkt);
181 } else {
182 functionalMemoryRead(pkt);
183 }
184 }
185
186 int functionalWrite(Addr addr, Packet *pkt) {
187 int num_functional_writes := 0;
188
189 TBE tbe := TBEs.lookup(addr);
190 if(is_valid(tbe)) {
191 num_functional_writes := num_functional_writes +
192 testAndWrite(addr, tbe.DataBlk, pkt);
193 }
194
195 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
196 return num_functional_writes;
197 }
198
199 void recordRequestType(RequestType request_type, Addr addr) {
200 if (request_type == RequestType:DataArrayRead) {
201 L1cache.recordRequestType(CacheRequestType:DataArrayRead, addr);
202 } else if (request_type == RequestType:DataArrayWrite) {
203 L1cache.recordRequestType(CacheRequestType:DataArrayWrite, addr);
204 } else if (request_type == RequestType:TagArrayRead) {
205 L1cache.recordRequestType(CacheRequestType:TagArrayRead, addr);
206 } else if (request_type == RequestType:TagArrayWrite) {
207 L1cache.recordRequestType(CacheRequestType:TagArrayWrite, addr);
208 }
209 }
210
211 bool checkResourceAvailable(RequestType request_type, Addr addr) {
212 if (request_type == RequestType:DataArrayRead) {
213 return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
214 } else if (request_type == RequestType:DataArrayWrite) {
215 return L1cache.checkResourceAvailable(CacheResourceType:DataArray, addr);
216 } else if (request_type == RequestType:TagArrayRead) {
217 return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
218 } else if (request_type == RequestType:TagArrayWrite) {
219 return L1cache.checkResourceAvailable(CacheResourceType:TagArray, addr);
220 } else {
221 error("Invalid RequestType type in checkResourceAvailable");
222 return true;
223 }
224 }
225
226 // Out Ports
227
228 out_port(requestNetwork_out, CPURequestMsg, requestFromSQC);
229 out_port(responseNetwork_out, ResponseMsg, responseFromSQC);
230 out_port(unblockNetwork_out, UnblockMsg, unblockFromCore);
231
232 // In Ports
233
234 in_port(probeNetwork_in, TDProbeRequestMsg, probeToSQC) {
235 if (probeNetwork_in.isReady(clockEdge())) {
236 peek(probeNetwork_in, TDProbeRequestMsg, block_on="addr") {
237 Entry cache_entry := getCacheEntry(in_msg.addr);
238 TBE tbe := TBEs.lookup(in_msg.addr);
239
240 if (in_msg.Type == ProbeRequestType:PrbInv) {
241 if (in_msg.ReturnData) {
242 trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
243 } else {
244 trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
245 }
246 } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
247 assert(in_msg.ReturnData);
248 trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
249 }
250 }
251 }
252 }
253
254 in_port(responseToSQC_in, ResponseMsg, responseToSQC) {
255 if (responseToSQC_in.isReady(clockEdge())) {
256 peek(responseToSQC_in, ResponseMsg, block_on="addr") {
257
258 Entry cache_entry := getCacheEntry(in_msg.addr);
259 TBE tbe := TBEs.lookup(in_msg.addr);
260
261 if (in_msg.Type == CoherenceResponseType:TDSysResp) {
262 if (in_msg.State == CoherenceState:Shared) {
263 trigger(Event:TCC_AckS, in_msg.addr, cache_entry, tbe);
264 } else {
265 error("SQC should not receive TDSysResp other than CoherenceState:Shared");
266 }
267 } else if (in_msg.Type == CoherenceResponseType:TDSysWBAck) {
268 trigger(Event:TCC_AckWB, in_msg.addr, cache_entry, tbe);
269 } else if (in_msg.Type == CoherenceResponseType:TDSysWBNack) {
270 trigger(Event:TCC_NackWB, in_msg.addr, cache_entry, tbe);
271 } else {
272 error("Unexpected Response Message to Core");
273 }
274 }
275 }
276 }
277
278 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
279 if (mandatoryQueue_in.isReady(clockEdge())) {
280 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
281 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
282 TBE tbe := TBEs.lookup(in_msg.LineAddress);
283
284 assert(in_msg.Type == RubyRequestType:IFETCH);
285 if (is_valid(cache_entry) || L1cache.cacheAvail(in_msg.LineAddress)) {
286 trigger(Event:Fetch, in_msg.LineAddress, cache_entry, tbe);
287 } else {
288 Addr victim := L1cache.cacheProbe(in_msg.LineAddress);
289 trigger(Event:Repl, victim, getCacheEntry(victim), TBEs.lookup(victim));
290 }
291 }
292 }
293 }
294
295 // Actions
296
297 action(ic_invCache, "ic", desc="invalidate cache") {
298 if(is_valid(cache_entry)) {
299 L1cache.deallocate(address);
300 }
301 unset_cache_entry();
302 }
303
304 action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
305 enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
306 out_msg.addr := address;
307 out_msg.Type := CoherenceRequestType:RdBlkS;
308 out_msg.Requestor := machineID;
309 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
310 TCC_select_low_bit, TCC_select_num_bits));
311 out_msg.MessageSize := MessageSizeType:Request_Control;
312 out_msg.InitialRequestTime := curCycle();
313 }
314 }
315
316 action(vc_victim, "vc", desc="Victimize E/S Data") {
317 enqueue(requestNetwork_out, CPURequestMsg, issue_latency) {
318 out_msg.addr := address;
319 out_msg.Requestor := machineID;
320 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
321 TCC_select_low_bit, TCC_select_num_bits));
322 out_msg.MessageSize := MessageSizeType:Request_Control;
323 out_msg.Type := CoherenceRequestType:VicClean;
324 out_msg.InitialRequestTime := curCycle();
325 if (cache_entry.CacheState == State:S) {
326 out_msg.Shared := true;
327 } else {
328 out_msg.Shared := false;
329 }
330 out_msg.InitialRequestTime := curCycle();
331 }
332 }
333
334 action(a_allocate, "a", desc="allocate block") {
335 if (is_invalid(cache_entry)) {
336 set_cache_entry(L1cache.allocate(address, new Entry));
337 }
338 }
339
340 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
341 check_allocate(TBEs);
342 assert(is_valid(cache_entry));
343 TBEs.allocate(address);
344 set_tbe(TBEs.lookup(address));
345 tbe.DataBlk := cache_entry.DataBlk; // Data only used for WBs
346 tbe.Dirty := cache_entry.Dirty;
347 tbe.Shared := false;
348 }
349
350 action(d_deallocateTBE, "d", desc="Deallocate TBE") {
351 TBEs.deallocate(address);
352 unset_tbe();
353 }
354
355 action(p_popMandatoryQueue, "pm", desc="Pop Mandatory Queue") {
356 mandatoryQueue_in.dequeue(clockEdge());
357 }
358
359 action(pr_popResponseQueue, "pr", desc="Pop Response Queue") {
360 responseToSQC_in.dequeue(clockEdge());
361 }
362
363 action(pp_popProbeQueue, "pp", desc="pop probe queue") {
364 probeNetwork_in.dequeue(clockEdge());
365 }
366
367 action(l_loadDone, "l", desc="local load done") {
368 assert(is_valid(cache_entry));
369 sequencer.readCallback(address, cache_entry.DataBlk,
370 false, MachineType:L1Cache);
371 APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
372 }
373
374 action(xl_loadDone, "xl", desc="remote load done") {
375 peek(responseToSQC_in, ResponseMsg) {
376 assert(is_valid(cache_entry));
377 sequencer.readCallback(address,
378 cache_entry.DataBlk,
379 false,
380 machineIDToMachineType(in_msg.Sender),
381 in_msg.InitialRequestTime,
382 in_msg.ForwardRequestTime,
383 in_msg.ProbeRequestStartTime);
384 APPEND_TRANSITION_COMMENT(cache_entry.DataBlk);
385 }
386 }
387
388 action(w_writeCache, "w", desc="write data to cache") {
389 peek(responseToSQC_in, ResponseMsg) {
390 assert(is_valid(cache_entry));
391 cache_entry.DataBlk := in_msg.DataBlk;
392 cache_entry.Dirty := in_msg.Dirty;
393 }
394 }
395
396 action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
397 peek(responseToSQC_in, ResponseMsg) {
398 enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
399 out_msg.addr := address;
400 out_msg.Type := CoherenceResponseType:StaleNotif;
401 out_msg.Sender := machineID;
402 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
403 TCC_select_low_bit, TCC_select_num_bits));
404 out_msg.MessageSize := MessageSizeType:Response_Control;
405 DPRINTF(RubySlicc, "%s\n", out_msg);
406 }
407 }
408 }
409
410 action(wb_data, "wb", desc="write back data") {
411 peek(responseToSQC_in, ResponseMsg) {
412 enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
413 out_msg.addr := address;
414 out_msg.Type := CoherenceResponseType:CPUData;
415 out_msg.Sender := machineID;
416 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
417 TCC_select_low_bit, TCC_select_num_bits));
418 out_msg.DataBlk := tbe.DataBlk;
419 out_msg.Dirty := tbe.Dirty;
420 if (tbe.Shared) {
421 out_msg.NbReqShared := true;
422 } else {
423 out_msg.NbReqShared := false;
424 }
425 out_msg.State := CoherenceState:Shared; // faux info
426 out_msg.MessageSize := MessageSizeType:Writeback_Data;
427 DPRINTF(RubySlicc, "%s\n", out_msg);
428 }
429 }
430 }
431
432 action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
433 enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
434 out_msg.addr := address;
435 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
436 out_msg.Sender := machineID;
437 // will this always be ok? probably not for multisocket
438 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
439 TCC_select_low_bit, TCC_select_num_bits));
440 out_msg.Dirty := false;
441 out_msg.Hit := false;
442 out_msg.Ntsl := true;
443 out_msg.State := CoherenceState:NA;
444 out_msg.MessageSize := MessageSizeType:Response_Control;
445 }
446 }
447
448 action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
449 enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
450 out_msg.addr := address;
451 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
452 out_msg.Sender := machineID;
453 // will this always be ok? probably not for multisocket
454 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
455 TCC_select_low_bit, TCC_select_num_bits));
456 out_msg.Dirty := false;
457 out_msg.Ntsl := true;
458 out_msg.Hit := false;
459 out_msg.State := CoherenceState:NA;
460 out_msg.MessageSize := MessageSizeType:Response_Control;
461 }
462 }
463
464 action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
465 enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
466 out_msg.addr := address;
467 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
468 out_msg.Sender := machineID;
469 // will this always be ok? probably not for multisocket
470 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
471 TCC_select_low_bit, TCC_select_num_bits));
472 out_msg.Dirty := false; // only true if sending back data i think
473 out_msg.Hit := false;
474 out_msg.Ntsl := false;
475 out_msg.State := CoherenceState:NA;
476 out_msg.MessageSize := MessageSizeType:Response_Control;
477 }
478 }
479
480 action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
481 enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
482 assert(is_valid(cache_entry) || is_valid(tbe));
483 out_msg.addr := address;
484 out_msg.Type := CoherenceResponseType:CPUPrbResp;
485 out_msg.Sender := machineID;
486 // will this always be ok? probably not for multisocket
487 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
488 TCC_select_low_bit, TCC_select_num_bits));
489 out_msg.DataBlk := getDataBlock(address);
490 if (is_valid(tbe)) {
491 out_msg.Dirty := tbe.Dirty;
492 } else {
493 out_msg.Dirty := cache_entry.Dirty;
494 }
495 out_msg.Hit := true;
496 out_msg.State := CoherenceState:NA;
497 out_msg.MessageSize := MessageSizeType:Response_Data;
498 }
499 }
500
501 action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
502 enqueue(responseNetwork_out, ResponseMsg, issue_latency) {
503 assert(is_valid(cache_entry) || is_valid(tbe));
504 assert(is_valid(cache_entry));
505 out_msg.addr := address;
506 out_msg.Type := CoherenceResponseType:CPUPrbResp;
507 out_msg.Sender := machineID;
508 // will this always be ok? probably not for multisocket
509 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
510 TCC_select_low_bit, TCC_select_num_bits));
511 out_msg.DataBlk := getDataBlock(address);
512 if (is_valid(tbe)) {
513 out_msg.Dirty := tbe.Dirty;
514 } else {
515 out_msg.Dirty := cache_entry.Dirty;
516 }
517 out_msg.Hit := true;
518 out_msg.State := CoherenceState:NA;
519 out_msg.MessageSize := MessageSizeType:Response_Data;
520 }
521 }
522
523 action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
524 assert(is_valid(tbe));
525 tbe.Shared := true;
526 }
527
528 action(uu_sendUnblock, "uu", desc="state changed, unblock") {
529 enqueue(unblockNetwork_out, UnblockMsg, issue_latency) {
530 out_msg.addr := address;
531 out_msg.Sender := machineID;
532 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCCdir,
533 TCC_select_low_bit, TCC_select_num_bits));
534 out_msg.MessageSize := MessageSizeType:Unblock_Control;
535 DPRINTF(RubySlicc, "%s\n", out_msg);
536 }
537 }
538
539 action(yy_recycleProbeQueue, "yy", desc="recycle probe queue") {
540 probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
541 }
542
543 action(zz_recycleMandatoryQueue, "\z", desc="recycle mandatory queue") {
544 mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
545 }
546
547 // Transitions
548
549 // transitions from base
550 transition(I, Fetch, I_S) {TagArrayRead, TagArrayWrite} {
551 a_allocate;
552 nS_issueRdBlkS;
553 p_popMandatoryQueue;
554 }
555
556 // simple hit transitions
557 transition(S, Fetch) {TagArrayRead, DataArrayRead} {
558 l_loadDone;
559 p_popMandatoryQueue;
560 }
561
562 // recycles from transients
563 transition({I_S, S_I, I_C}, {Fetch, Repl}) {} {
564 zz_recycleMandatoryQueue;
565 }
566
567 transition(S, Repl, S_I) {TagArrayRead} {
568 t_allocateTBE;
569 vc_victim;
570 ic_invCache;
571 }
572
573 // TCC event
574 transition(I_S, TCC_AckS, S) {DataArrayRead, DataArrayWrite} {
575 w_writeCache;
576 xl_loadDone;
577 uu_sendUnblock;
578 pr_popResponseQueue;
579 }
580
581 transition(S_I, TCC_NackWB, I){TagArrayWrite} {
582 d_deallocateTBE;
583 pr_popResponseQueue;
584 }
585
586 transition(S_I, TCC_AckWB, I) {TagArrayWrite} {
587 wb_data;
588 d_deallocateTBE;
589 pr_popResponseQueue;
590 }
591
592 transition(I_C, TCC_AckWB, I){TagArrayWrite} {
593 ss_sendStaleNotification;
594 d_deallocateTBE;
595 pr_popResponseQueue;
596 }
597
598 transition(I_C, TCC_NackWB, I) {TagArrayWrite} {
599 d_deallocateTBE;
600 pr_popResponseQueue;
601 }
602
603 // Probe transitions
604 transition({S, I}, PrbInvData, I) {TagArrayRead, TagArrayWrite} {
605 pd_sendProbeResponseData;
606 ic_invCache;
607 pp_popProbeQueue;
608 }
609
610 transition(I_C, PrbInvData, I_C) {
611 pi_sendProbeResponseInv;
612 ic_invCache;
613 pp_popProbeQueue;
614 }
615
616 transition({S, I}, PrbInv, I) {TagArrayRead, TagArrayWrite} {
617 pi_sendProbeResponseInv;
618 ic_invCache;
619 pp_popProbeQueue;
620 }
621
622 transition({S}, PrbShrData, S) {DataArrayRead} {
623 pd_sendProbeResponseData;
624 pp_popProbeQueue;
625 }
626
627 transition({I, I_C}, PrbShrData) {TagArrayRead} {
628 prm_sendProbeResponseMiss;
629 pp_popProbeQueue;
630 }
631
632 transition(I_C, PrbInv, I_C){
633 pi_sendProbeResponseInv;
634 ic_invCache;
635 pp_popProbeQueue;
636 }
637
638 transition(I_S, {PrbInv, PrbInvData}) {} {
639 pi_sendProbeResponseInv;
640 ic_invCache;
641 a_allocate; // but make sure there is room for incoming data when it arrives
642 pp_popProbeQueue;
643 }
644
645 transition(I_S, PrbShrData) {} {
646 prm_sendProbeResponseMiss;
647 pp_popProbeQueue;
648 }
649
650 transition(S_I, PrbInvData, I_C) {TagArrayWrite} {
651 pi_sendProbeResponseInv;
652 ic_invCache;
653 pp_popProbeQueue;
654 }
655
656 transition(S_I, PrbInv, I_C) {TagArrayWrite} {
657 pi_sendProbeResponseInv;
658 ic_invCache;
659 pp_popProbeQueue;
660 }
661
662 transition(S_I, PrbShrData) {DataArrayRead} {
663 pd_sendProbeResponseData;
664 sf_setSharedFlip;
665 pp_popProbeQueue;
666 }
667 }