gpu-compute: AMD's baseline GPU model
[gem5.git] / src / mem / protocol / GPU_RfO-TCCdir.sm
1 /*
2 * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Author: Mithuna Thottethodi
34 */
35
36 machine(MachineType:TCCdir, "AMD read-for-ownership directory for TCC (aka GPU L2)")
37 : CacheMemory * directory;
38 // Convention: wire buffers are prefixed with "w_" for clarity
39 WireBuffer * w_reqToTCCDir;
40 WireBuffer * w_respToTCCDir;
41 WireBuffer * w_TCCUnblockToTCCDir;
42 WireBuffer * w_reqToTCC;
43 WireBuffer * w_probeToTCC;
44 WireBuffer * w_respToTCC;
45 int TCC_select_num_bits;
46 Cycles response_latency := 5;
47 Cycles directory_latency := 6;
48 Cycles issue_latency := 120;
49
50 // From the TCPs or SQCs
51 MessageBuffer * requestFromTCP, network="From", virtual_network="1", vnet_type="request";
52 MessageBuffer * responseFromTCP, network="From", virtual_network="3", vnet_type="response";
53 MessageBuffer * unblockFromTCP, network="From", virtual_network="5", vnet_type="unblock";
54
55 // To the Cores. TCC deals only with TCPs/SQCs. CP cores do not communicate directly with TCC.
56 MessageBuffer * probeToCore, network="To", virtual_network="1", vnet_type="request";
57 MessageBuffer * responseToCore, network="To", virtual_network="3", vnet_type="response";
58
59 // From the NB
60 MessageBuffer * probeFromNB, network="From", virtual_network="0", vnet_type="request";
61 MessageBuffer * responseFromNB, network="From", virtual_network="2", vnet_type="response";
62 // To the NB
63 MessageBuffer * requestToNB, network="To", virtual_network="0", vnet_type="request";
64 MessageBuffer * responseToNB, network="To", virtual_network="2", vnet_type="response";
65 MessageBuffer * unblockToNB, network="To", virtual_network="4", vnet_type="unblock";
66
67 MessageBuffer * triggerQueue, random="false";
68 {
69 // STATES
70 state_declaration(State, desc="Directory states", default="TCCdir_State_I") {
71 // Base states
72 I, AccessPermission:Invalid, desc="Invalid";
73 S, AccessPermission:Invalid, desc="Shared";
74 E, AccessPermission:Invalid, desc="Shared";
75 O, AccessPermission:Invalid, desc="Owner";
76 M, AccessPermission:Invalid, desc="Modified";
77
78 CP_I, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to invalid";
79 B_I, AccessPermission:Invalid, desc="Blocked, need not send data after acks are in, going to invalid";
80 CP_O, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to owned";
81 CP_S, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to shared";
82 CP_OM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to O_M";
83 CP_SM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to S_M";
84 CP_ISM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to I_M";
85 CP_IOM, AccessPermission:Invalid, desc="Blocked, must send data after acks are in, going to I_M";
86 CP_OSIW, AccessPermission:Invalid, desc="Blocked, must send data after acks+CancelWB are in, going to I_C";
87
88
89 // Transient states and busy states used for handling side (TCC-facing) interactions
90 BW_S, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
91 BW_E, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
92 BW_O, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
93 BW_M, AccessPermission:Invalid, desc="Blocked, Awaiting TCC unblock";
94
95 // Transient states and busy states used for handling upward (TCP-facing) interactions
96 I_M, AccessPermission:Invalid, desc="Invalid, issued RdBlkM, have not seen response yet";
97 I_ES, AccessPermission:Invalid, desc="Invalid, issued RdBlk, have not seen response yet";
98 I_S, AccessPermission:Invalid, desc="Invalid, issued RdBlkS, have not seen response yet";
99 BBS_S, AccessPermission:Invalid, desc="Blocked, going from S to S";
100 BBO_O, AccessPermission:Invalid, desc="Blocked, going from O to O";
101 BBM_M, AccessPermission:Invalid, desc="Blocked, going from M to M, waiting for data to forward";
102 BBM_O, AccessPermission:Invalid, desc="Blocked, going from M to O, waiting for data to forward";
103 BB_M, AccessPermission:Invalid, desc="Blocked, going from M to M, waiting for unblock";
104 BB_O, AccessPermission:Invalid, desc="Blocked, going from M to O, waiting for unblock";
105 BB_OO, AccessPermission:Invalid, desc="Blocked, going from O to O (adding sharers), waiting for unblock";
106 BB_S, AccessPermission:Invalid, desc="Blocked, going to S, waiting for (possible multiple) unblock(s)";
107 BBS_M, AccessPermission:Invalid, desc="Blocked, going from S or O to M";
108 BBO_M, AccessPermission:Invalid, desc="Blocked, going from S or O to M";
109 BBS_UM, AccessPermission:Invalid, desc="Blocked, going from S or O to M via upgrade";
110 BBO_UM, AccessPermission:Invalid, desc="Blocked, going from S or O to M via upgrade";
111 S_M, AccessPermission:Invalid, desc="Shared, issued CtoD, have not seen response yet";
112 O_M, AccessPermission:Invalid, desc="Shared, issued CtoD, have not seen response yet";
113
114 //
115 BBB_S, AccessPermission:Invalid, desc="Blocked, going to S after core unblock";
116 BBB_M, AccessPermission:Invalid, desc="Blocked, going to M after core unblock";
117 BBB_E, AccessPermission:Invalid, desc="Blocked, going to E after core unblock";
118
119 VES_I, AccessPermission:Invalid, desc="TCC replacement, waiting for clean WB ack";
120 VM_I, AccessPermission:Invalid, desc="TCC replacement, waiting for dirty WB ack";
121 VO_I, AccessPermission:Invalid, desc="TCC replacement, waiting for dirty WB ack";
122 VO_S, AccessPermission:Invalid, desc="TCC owner replacement, waiting for dirty WB ack";
123
124 ES_I, AccessPermission:Invalid, desc="L1 replacement, waiting for clean WB ack";
125 MO_I, AccessPermission:Invalid, desc="L1 replacement, waiting for dirty WB ack";
126
127 I_C, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from NB for canceled WB";
128 I_W, AccessPermission:Invalid, desc="Invalid, waiting for WBAck from NB; canceled WB raced with directory invalidation";
129
130 // Recall States
131 BRWD_I, AccessPermission:Invalid, desc="Recalling, waiting for WBAck and Probe Data responses";
132 BRW_I, AccessPermission:Read_Write, desc="Recalling, waiting for WBAck";
133 BRD_I, AccessPermission:Invalid, desc="Recalling, waiting for Probe Data responses";
134
135 }
136
137 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
138 DataArrayRead, desc="Read the data array";
139 DataArrayWrite, desc="Write the data array";
140 TagArrayRead, desc="Read the data array";
141 TagArrayWrite, desc="Write the data array";
142 }
143
144
145
146 // EVENTS
147 enumeration(Event, desc="TCC Directory Events") {
148 // Upward facing events (TCCdir w.r.t. TCP/SQC and TCC behaves like NBdir behaves with TCP/SQC and L3
149
150 // Directory Recall
151 Recall, desc="directory cache is full";
152 // CPU requests
153 CPUWrite, desc="Initial req from core, sent to TCC";
154 NoCPUWrite, desc="Initial req from core, but non-exclusive clean data; can be discarded";
155 CPUWriteCancel, desc="Initial req from core, sent to TCC";
156
157 // Requests from the TCPs
158 RdBlk, desc="RdBlk event";
159 RdBlkM, desc="RdBlkM event";
160 RdBlkS, desc="RdBlkS event";
161 CtoD, desc="Change to Dirty request";
162
163 // TCC writebacks
164 VicDirty, desc="...";
165 VicDirtyLast, desc="...";
166 VicClean, desc="...";
167 NoVic, desc="...";
168 StaleVic, desc="...";
169 CancelWB, desc="TCC got invalidating probe, canceled WB";
170
171 // Probe Responses from TCP/SQCs
172 CPUPrbResp, desc="Probe response from TCP/SQC";
173 TCCPrbResp, desc="Probe response from TCC";
174
175 ProbeAcksComplete, desc="All acks received";
176 ProbeAcksCompleteReissue, desc="All acks received, changing CtoD to reissue";
177
178 CoreUnblock, desc="unblock from TCP/SQC";
179 LastCoreUnblock, desc="Last unblock from TCP/SQC";
180 TCCUnblock, desc="unblock from TCC (current owner)";
181 TCCUnblock_Sharer, desc="unblock from TCC (a sharer, not owner)";
182 TCCUnblock_NotValid,desc="unblock from TCC (not valid...caused by stale writebacks)";
183
184 // Downward facing events
185
186 // NB initiated
187 NB_AckS, desc="NB Ack to TCC Request";
188 NB_AckE, desc="NB Ack to TCC Request";
189 NB_AckM, desc="NB Ack to TCC Request";
190 NB_AckCtoD, desc="NB Ack to TCC Request";
191 NB_AckWB, desc="NB Ack for clean WB";
192
193
194 // Incoming Probes from NB
195 PrbInvData, desc="Invalidating probe, return dirty data";
196 PrbInv, desc="Invalidating probe, no need to return data";
197 PrbShrData, desc="Downgrading probe, return data";
198 }
199
200
201 // TYPES
202
203 // Entry for directory
204 structure(Entry, desc="...", interface='AbstractCacheEntry') {
205 State CacheState, desc="Cache state (Cache of directory entries)";
206 DataBlock DataBlk, desc="data for the block";
207 NetDest Sharers, desc="Sharers for this block";
208 NetDest Owner, desc="Owner of this block";
209 NetDest MergedSharers, desc="Read sharers who are merged on a request";
210 int WaitingUnblocks, desc="Number of acks we're waiting for";
211 }
212
213 structure(TBE, desc="...") {
214 State TBEState, desc="Transient state";
215 DataBlock DataBlk, desc="DataBlk";
216 bool Dirty, desc="Is the data dirty?";
217 MachineID Requestor, desc="requestor";
218 int NumPendingAcks, desc="num acks expected";
219 MachineID OriginalRequestor, desc="Original Requestor";
220 MachineID UntransferredOwner, desc = "Untransferred owner for an upgrade transaction";
221 bool UntransferredOwnerExists, desc = "1 if Untransferred owner exists for an upgrade transaction";
222 bool Cached, desc="data hit in Cache";
223 bool Shared, desc="victim hit by shared probe";
224 bool Upgrade, desc="An upgrade request in progress";
225 bool CtoD, desc="Saved sysack info";
226 CoherenceState CohState, desc="Saved sysack info";
227 MessageSizeType MessageSize, desc="Saved sysack info";
228 MachineID Sender, desc="sender";
229 }
230
231 structure(TBETable, external = "yes") {
232 TBE lookup(Addr);
233 void allocate(Addr);
234 void deallocate(Addr);
235 bool isPresent(Addr);
236 }
237
238 // ** OBJECTS **
239 TBETable TBEs, template="<TCCdir_TBE>", constructor="m_number_of_TBEs";
240 int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
241 NetDest TCC_dir_subtree;
242 NetDest temp;
243
244 Tick clockEdge();
245 Tick cyclesToTicks(Cycles c);
246
247 void set_cache_entry(AbstractCacheEntry b);
248 void unset_cache_entry();
249 void set_tbe(TBE b);
250 void unset_tbe();
251
252
253 bool presentOrAvail(Addr addr) {
254 return directory.isTagPresent(addr) || directory.cacheAvail(addr);
255 }
256
257 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
258 return static_cast(Entry, "pointer", directory.lookup(addr));
259 }
260
261 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
262 TBE tbe := TBEs.lookup(addr);
263 if(is_valid(tbe)) {
264 return tbe.DataBlk;
265 } else {
266 assert(false);
267 return getCacheEntry(addr).DataBlk;
268 }
269 }
270
271 State getState(TBE tbe, Entry cache_entry, Addr addr) {
272 if(is_valid(tbe)) {
273 return tbe.TBEState;
274 } else if (is_valid(cache_entry)) {
275 return cache_entry.CacheState;
276 }
277 return State:I;
278 }
279
280 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
281 if (is_valid(cache_entry)) {
282 cache_entry.changePermission(TCCdir_State_to_permission(state));
283 }
284 }
285
286 AccessPermission getAccessPermission(Addr addr) {
287 TBE tbe := TBEs.lookup(addr);
288 if(is_valid(tbe)) {
289 return TCCdir_State_to_permission(tbe.TBEState);
290 }
291
292 Entry cache_entry := getCacheEntry(addr);
293 if(is_valid(cache_entry)) {
294 return TCCdir_State_to_permission(cache_entry.CacheState);
295 }
296
297 return AccessPermission:NotPresent;
298 }
299
300 void functionalRead(Addr addr, Packet *pkt) {
301 TBE tbe := TBEs.lookup(addr);
302 if(is_valid(tbe)) {
303 testAndRead(addr, tbe.DataBlk, pkt);
304 } else {
305 functionalMemoryRead(pkt);
306 }
307 }
308
309 int functionalWrite(Addr addr, Packet *pkt) {
310 int num_functional_writes := 0;
311
312 TBE tbe := TBEs.lookup(addr);
313 if(is_valid(tbe)) {
314 num_functional_writes := num_functional_writes +
315 testAndWrite(addr, tbe.DataBlk, pkt);
316 }
317
318 num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
319 return num_functional_writes;
320 }
321
322 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
323 if (is_valid(tbe)) {
324 tbe.TBEState := state;
325 }
326
327 if (is_valid(cache_entry)) {
328 cache_entry.CacheState := state;
329
330 if (state == State:S) {
331 assert(cache_entry.Owner.count() == 0);
332 }
333
334 if (state == State:O) {
335 assert(cache_entry.Owner.count() == 1);
336 assert(cache_entry.Sharers.isSuperset(cache_entry.Owner) == false);
337 }
338
339 if (state == State:M) {
340 assert(cache_entry.Owner.count() == 1);
341 assert(cache_entry.Sharers.count() == 0);
342 }
343
344 if (state == State:E) {
345 assert(cache_entry.Owner.count() == 0);
346 assert(cache_entry.Sharers.count() == 1);
347 }
348 }
349 }
350
351
352
353 void recordRequestType(RequestType request_type, Addr addr) {
354 if (request_type == RequestType:DataArrayRead) {
355 directory.recordRequestType(CacheRequestType:DataArrayRead, addr);
356 } else if (request_type == RequestType:DataArrayWrite) {
357 directory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
358 } else if (request_type == RequestType:TagArrayRead) {
359 directory.recordRequestType(CacheRequestType:TagArrayRead, addr);
360 } else if (request_type == RequestType:TagArrayWrite) {
361 directory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
362 }
363 }
364
365 bool checkResourceAvailable(RequestType request_type, Addr addr) {
366 if (request_type == RequestType:DataArrayRead) {
367 return directory.checkResourceAvailable(CacheResourceType:DataArray, addr);
368 } else if (request_type == RequestType:DataArrayWrite) {
369 return directory.checkResourceAvailable(CacheResourceType:DataArray, addr);
370 } else if (request_type == RequestType:TagArrayRead) {
371 return directory.checkResourceAvailable(CacheResourceType:TagArray, addr);
372 } else if (request_type == RequestType:TagArrayWrite) {
373 return directory.checkResourceAvailable(CacheResourceType:TagArray, addr);
374 } else {
375 error("Invalid RequestType type in checkResourceAvailable");
376 return true;
377 }
378 }
379
380 // ** OUT_PORTS **
381
382 // Three classes of ports
383 // Class 1: downward facing network links to NB
384 out_port(requestToNB_out, CPURequestMsg, requestToNB);
385 out_port(responseToNB_out, ResponseMsg, responseToNB);
386 out_port(unblockToNB_out, UnblockMsg, unblockToNB);
387
388
389 // Class 2: upward facing ports to GPU cores
390 out_port(probeToCore_out, TDProbeRequestMsg, probeToCore);
391 out_port(responseToCore_out, ResponseMsg, responseToCore);
392
393 // Class 3: sideward facing ports (on "wirebuffer" links) to TCC
394 out_port(w_requestTCC_out, CPURequestMsg, w_reqToTCC);
395 out_port(w_probeTCC_out, NBProbeRequestMsg, w_probeToTCC);
396 out_port(w_respTCC_out, ResponseMsg, w_respToTCC);
397
398
399 // local trigger port
400 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
401
402 //
403 // request queue going to NB
404 //
405
406 // ** IN_PORTS **
407
408 // Trigger Queue
409 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=8) {
410 if (triggerQueue_in.isReady(clockEdge())) {
411 peek(triggerQueue_in, TriggerMsg) {
412 TBE tbe := TBEs.lookup(in_msg.addr);
413 assert(is_valid(tbe));
414 Entry cache_entry := getCacheEntry(in_msg.addr);
415 if ((in_msg.Type == TriggerType:AcksComplete) && (tbe.Upgrade == false)) {
416 trigger(Event:ProbeAcksComplete, in_msg.addr, cache_entry, tbe);
417 } else if ((in_msg.Type == TriggerType:AcksComplete) && (tbe.Upgrade == true)) {
418 trigger(Event:ProbeAcksCompleteReissue, in_msg.addr, cache_entry, tbe);
419 }
420 }
421 }
422 }
423
424 // Unblock Networks (TCCdir can receive unblocks from TCC, TCPs)
425 // Port on first (of three) wire buffers from TCC
426 in_port(w_TCCUnblock_in, UnblockMsg, w_TCCUnblockToTCCDir, rank=7) {
427 if (w_TCCUnblock_in.isReady(clockEdge())) {
428 peek(w_TCCUnblock_in, UnblockMsg) {
429 TBE tbe := TBEs.lookup(in_msg.addr);
430 Entry cache_entry := getCacheEntry(in_msg.addr);
431 if (in_msg.currentOwner) {
432 trigger(Event:TCCUnblock, in_msg.addr, cache_entry, tbe);
433 } else if (in_msg.valid) {
434 trigger(Event:TCCUnblock_Sharer, in_msg.addr, cache_entry, tbe);
435 } else {
436 trigger(Event:TCCUnblock_NotValid, in_msg.addr, cache_entry, tbe);
437 }
438 }
439 }
440 }
441
442 in_port(unblockNetwork_in, UnblockMsg, unblockFromTCP, rank=6) {
443 if (unblockNetwork_in.isReady(clockEdge())) {
444 peek(unblockNetwork_in, UnblockMsg) {
445 TBE tbe := TBEs.lookup(in_msg.addr);
446 Entry cache_entry := getCacheEntry(in_msg.addr);
447 if(cache_entry.WaitingUnblocks == 1) {
448 trigger(Event:LastCoreUnblock, in_msg.addr, cache_entry, tbe);
449 }
450 else {
451 trigger(Event:CoreUnblock, in_msg.addr, cache_entry, tbe);
452 }
453 }
454 }
455 }
456
457
458 //Responses from TCC, and Cores
459 // Port on second (of three) wire buffers from TCC
460 in_port(w_TCCResponse_in, ResponseMsg, w_respToTCCDir, rank=5) {
461 if (w_TCCResponse_in.isReady(clockEdge())) {
462 peek(w_TCCResponse_in, ResponseMsg) {
463 TBE tbe := TBEs.lookup(in_msg.addr);
464 Entry cache_entry := getCacheEntry(in_msg.addr);
465 if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
466 trigger(Event:TCCPrbResp, in_msg.addr, cache_entry, tbe);
467 }
468 }
469 }
470 }
471
472 in_port(responseNetwork_in, ResponseMsg, responseFromTCP, rank=4) {
473 if (responseNetwork_in.isReady(clockEdge())) {
474 peek(responseNetwork_in, ResponseMsg) {
475 TBE tbe := TBEs.lookup(in_msg.addr);
476 Entry cache_entry := getCacheEntry(in_msg.addr);
477 if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
478 trigger(Event:CPUPrbResp, in_msg.addr, cache_entry, tbe);
479 }
480 }
481 }
482 }
483
484
485 // Port on third (of three) wire buffers from TCC
486 in_port(w_TCCRequest_in, CPURequestMsg, w_reqToTCCDir, rank=3) {
487 if(w_TCCRequest_in.isReady(clockEdge())) {
488 peek(w_TCCRequest_in, CPURequestMsg) {
489 TBE tbe := TBEs.lookup(in_msg.addr);
490 Entry cache_entry := getCacheEntry(in_msg.addr);
491 if (in_msg.Type == CoherenceRequestType:WrCancel) {
492 trigger(Event:CancelWB, in_msg.addr, cache_entry, tbe);
493 } else if (in_msg.Type == CoherenceRequestType:VicDirty) {
494 if (is_valid(cache_entry) && cache_entry.Owner.isElement(in_msg.Requestor)) {
495 // if modified, or owner with no other sharers
496 if ((cache_entry.CacheState == State:M) || (cache_entry.Sharers.count() == 0)) {
497 assert(cache_entry.Owner.count()==1);
498 trigger(Event:VicDirtyLast, in_msg.addr, cache_entry, tbe);
499 } else {
500 trigger(Event:VicDirty, in_msg.addr, cache_entry, tbe);
501 }
502 } else {
503 trigger(Event:StaleVic, in_msg.addr, cache_entry, tbe);
504 }
505 } else {
506 if (in_msg.Type == CoherenceRequestType:VicClean) {
507 if (is_valid(cache_entry) && cache_entry.Sharers.isElement(in_msg.Requestor)) {
508 if (cache_entry.Sharers.count() == 1) {
509 // Last copy, victimize to L3
510 trigger(Event:VicClean, in_msg.addr, cache_entry, tbe);
511 } else {
512 // Either not the last copy or stall. No need to victimmize
513 // remove sharer from sharer list
514 assert(cache_entry.Sharers.count() > 1);
515 trigger(Event:NoVic, in_msg.addr, cache_entry, tbe);
516 }
517 } else {
518 trigger(Event:StaleVic, in_msg.addr, cache_entry, tbe);
519 }
520 }
521 }
522 }
523 }
524 }
525
526 in_port(responseFromNB_in, ResponseMsg, responseFromNB, rank=2) {
527 if (responseFromNB_in.isReady(clockEdge())) {
528 peek(responseFromNB_in, ResponseMsg, block_on="addr") {
529
530 TBE tbe := TBEs.lookup(in_msg.addr);
531 Entry cache_entry := getCacheEntry(in_msg.addr);
532 if (in_msg.Type == CoherenceResponseType:NBSysResp) {
533 if (in_msg.State == CoherenceState:Modified) {
534 if (in_msg.CtoD) {
535 trigger(Event:NB_AckCtoD, in_msg.addr, cache_entry, tbe);
536 } else {
537 trigger(Event:NB_AckM, in_msg.addr, cache_entry, tbe);
538 }
539 } else if (in_msg.State == CoherenceState:Shared) {
540 trigger(Event:NB_AckS, in_msg.addr, cache_entry, tbe);
541 } else if (in_msg.State == CoherenceState:Exclusive) {
542 trigger(Event:NB_AckE, in_msg.addr, cache_entry, tbe);
543 }
544 } else if (in_msg.Type == CoherenceResponseType:NBSysWBAck) {
545 trigger(Event:NB_AckWB, in_msg.addr, cache_entry, tbe);
546 } else {
547 error("Unexpected Response Message to Core");
548 }
549 }
550 }
551 }
552
553 // Finally handling incoming requests (from TCP) and probes (from NB).
554
555 in_port(probeNetwork_in, NBProbeRequestMsg, probeFromNB, rank=1) {
556 if (probeNetwork_in.isReady(clockEdge())) {
557 peek(probeNetwork_in, NBProbeRequestMsg) {
558 DPRINTF(RubySlicc, "%s\n", in_msg);
559 DPRINTF(RubySlicc, "machineID: %s\n", machineID);
560 Entry cache_entry := getCacheEntry(in_msg.addr);
561 TBE tbe := TBEs.lookup(in_msg.addr);
562
563 if (in_msg.Type == ProbeRequestType:PrbInv) {
564 if (in_msg.ReturnData) {
565 trigger(Event:PrbInvData, in_msg.addr, cache_entry, tbe);
566 } else {
567 trigger(Event:PrbInv, in_msg.addr, cache_entry, tbe);
568 }
569 } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
570 assert(in_msg.ReturnData);
571 trigger(Event:PrbShrData, in_msg.addr, cache_entry, tbe);
572 }
573 }
574 }
575 }
576
577
578 in_port(coreRequestNetwork_in, CPURequestMsg, requestFromTCP, rank=0) {
579 if (coreRequestNetwork_in.isReady(clockEdge())) {
580 peek(coreRequestNetwork_in, CPURequestMsg) {
581 TBE tbe := TBEs.lookup(in_msg.addr);
582 Entry cache_entry := getCacheEntry(in_msg.addr);
583 if (presentOrAvail(in_msg.addr)) {
584 if (in_msg.Type == CoherenceRequestType:VicDirty) {
585 trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
586 } else if (in_msg.Type == CoherenceRequestType:VicClean) {
587 if (is_valid(cache_entry) && cache_entry.Owner.isElement(in_msg.Requestor)) {
588 trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
589 } else if(is_valid(cache_entry) && (cache_entry.Sharers.count() + cache_entry.Owner.count() ) >1) {
590 trigger(Event:NoCPUWrite, in_msg.addr, cache_entry, tbe);
591 } else {
592 trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
593 }
594 } else if (in_msg.Type == CoherenceRequestType:RdBlk) {
595 trigger(Event:RdBlk, in_msg.addr, cache_entry, tbe);
596 } else if (in_msg.Type == CoherenceRequestType:RdBlkS) {
597 trigger(Event:RdBlkS, in_msg.addr, cache_entry, tbe);
598 } else if (in_msg.Type == CoherenceRequestType:RdBlkM) {
599 trigger(Event:RdBlkM, in_msg.addr, cache_entry, tbe);
600 } else if (in_msg.Type == CoherenceRequestType:WrCancel) {
601 trigger(Event:CPUWriteCancel, in_msg.addr, cache_entry, tbe);
602 }
603 } else {
604 // All requests require a directory entry
605 Addr victim := directory.cacheProbe(in_msg.addr);
606 trigger(Event:Recall, victim, getCacheEntry(victim), TBEs.lookup(victim));
607 }
608 }
609 }
610 }
611
612
613
614
615 // Actions
616
617 //Downward facing actions
618
619 action(c_clearOwner, "c", desc="Clear the owner field") {
620 cache_entry.Owner.clear();
621 }
622
623 action(rS_removeRequesterFromSharers, "rS", desc="Remove unblocker from sharer list") {
624 peek(unblockNetwork_in, UnblockMsg) {
625 cache_entry.Sharers.remove(in_msg.Sender);
626 }
627 }
628
629 action(rT_removeTCCFromSharers, "rT", desc="Remove TCC from sharer list") {
630 peek(w_TCCRequest_in, CPURequestMsg) {
631 cache_entry.Sharers.remove(in_msg.Requestor);
632 }
633 }
634
635 action(rO_removeOriginalRequestorFromSharers, "rO", desc="Remove replacing core from sharer list") {
636 peek(coreRequestNetwork_in, CPURequestMsg) {
637 cache_entry.Sharers.remove(in_msg.Requestor);
638 }
639 }
640
641 action(rC_removeCoreFromSharers, "rC", desc="Remove replacing core from sharer list") {
642 peek(coreRequestNetwork_in, CPURequestMsg) {
643 cache_entry.Sharers.remove(in_msg.Requestor);
644 }
645 }
646
647 action(rCo_removeCoreFromOwner, "rCo", desc="Remove replacing core from sharer list") {
648 // Note that under some cases this action will try to remove a stale owner
649 peek(coreRequestNetwork_in, CPURequestMsg) {
650 cache_entry.Owner.remove(in_msg.Requestor);
651 }
652 }
653
654 action(rR_removeResponderFromSharers, "rR", desc="Remove responder from sharer list") {
655 peek(responseNetwork_in, ResponseMsg) {
656 cache_entry.Sharers.remove(in_msg.Sender);
657 }
658 }
659
660 action(nC_sendNullWBAckToCore, "nC", desc = "send a null WB Ack to release core") {
661 peek(coreRequestNetwork_in, CPURequestMsg) {
662 enqueue(responseToCore_out, ResponseMsg, 1) {
663 out_msg.addr := address;
664 out_msg.Type := CoherenceResponseType:TDSysWBNack;
665 out_msg.Sender := machineID;
666 out_msg.Destination.add(in_msg.Requestor);
667 out_msg.MessageSize := in_msg.MessageSize;
668 }
669 }
670 }
671
672 action(nT_sendNullWBAckToTCC, "nT", desc = "send a null WB Ack to release TCC") {
673 peek(w_TCCRequest_in, CPURequestMsg) {
674 enqueue(w_respTCC_out, ResponseMsg, 1) {
675 out_msg.addr := address;
676 out_msg.Type := CoherenceResponseType:TDSysWBAck;
677 out_msg.Sender := machineID;
678 out_msg.Destination.add(in_msg.Requestor);
679 out_msg.MessageSize := in_msg.MessageSize;
680 }
681 }
682 }
683
684 action(eto_moveExSharerToOwner, "eto", desc="move the current exclusive sharer to owner") {
685 assert(cache_entry.Sharers.count() == 1);
686 assert(cache_entry.Owner.count() == 0);
687 cache_entry.Owner := cache_entry.Sharers;
688 cache_entry.Sharers.clear();
689 APPEND_TRANSITION_COMMENT(" new owner ");
690 APPEND_TRANSITION_COMMENT(cache_entry.Owner);
691 }
692
693 action(aT_addTCCToSharers, "aT", desc="Add TCC to sharer list") {
694 peek(w_TCCUnblock_in, UnblockMsg) {
695 cache_entry.Sharers.add(in_msg.Sender);
696 }
697 }
698
699 action(as_addToSharers, "as", desc="Add unblocker to sharer list") {
700 peek(unblockNetwork_in, UnblockMsg) {
701 cache_entry.Sharers.add(in_msg.Sender);
702 }
703 }
704
705 action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
706 cache_entry.Sharers.addNetDest(cache_entry.Owner);
707 cache_entry.Owner.clear();
708 }
709
710 action(cc_clearSharers, "\c", desc="Clear the sharers field") {
711 cache_entry.Sharers.clear();
712 }
713
714 action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
715 peek(unblockNetwork_in, UnblockMsg) {
716 cache_entry.Owner.clear();
717 cache_entry.Owner.add(in_msg.Sender);
718 APPEND_TRANSITION_COMMENT(" tcp_ub owner ");
719 APPEND_TRANSITION_COMMENT(cache_entry.Owner);
720 }
721 }
722
723 action(eT_ownerIsUnblocker, "eT", desc="TCC (unblocker) is now owner") {
724 peek(w_TCCUnblock_in, UnblockMsg) {
725 cache_entry.Owner.clear();
726 cache_entry.Owner.add(in_msg.Sender);
727 APPEND_TRANSITION_COMMENT(" tcc_ub owner ");
728 APPEND_TRANSITION_COMMENT(cache_entry.Owner);
729 }
730 }
731
732 action(ctr_copyTCCResponseToTBE, "ctr", desc="Copy TCC probe response data to TBE") {
733 peek(w_TCCResponse_in, ResponseMsg) {
734 // Overwrite data if tbe does not hold dirty data. Stop once it is dirty.
735 if(tbe.Dirty == false) {
736 tbe.DataBlk := in_msg.DataBlk;
737 tbe.Dirty := in_msg.Dirty;
738 tbe.Sender := in_msg.Sender;
739 }
740 DPRINTF(RubySlicc, "%s\n", (tbe.DataBlk));
741 }
742 }
743
744 action(ccr_copyCoreResponseToTBE, "ccr", desc="Copy core probe response data to TBE") {
745 peek(responseNetwork_in, ResponseMsg) {
746 // Overwrite data if tbe does not hold dirty data. Stop once it is dirty.
747 if(tbe.Dirty == false) {
748 tbe.DataBlk := in_msg.DataBlk;
749 tbe.Dirty := in_msg.Dirty;
750
751 if(tbe.Sender == machineID) {
752 tbe.Sender := in_msg.Sender;
753 }
754 }
755 DPRINTF(RubySlicc, "%s\n", (tbe.DataBlk));
756 }
757 }
758
759 action(cd_clearDirtyBitTBE, "cd", desc="Clear Dirty bit in TBE") {
760 tbe.Dirty := false;
761 }
762
763 action(n_issueRdBlk, "n-", desc="Issue RdBlk") {
764 enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
765 out_msg.addr := address;
766 out_msg.Type := CoherenceRequestType:RdBlk;
767 out_msg.Requestor := machineID;
768 out_msg.Destination.add(map_Address_to_Directory(address));
769 out_msg.MessageSize := MessageSizeType:Request_Control;
770 }
771 }
772
773 action(nS_issueRdBlkS, "nS", desc="Issue RdBlkS") {
774 enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
775 out_msg.addr := address;
776 out_msg.Type := CoherenceRequestType:RdBlkS;
777 out_msg.Requestor := machineID;
778 out_msg.Destination.add(map_Address_to_Directory(address));
779 out_msg.MessageSize := MessageSizeType:Request_Control;
780 }
781 }
782
783 action(nM_issueRdBlkM, "nM", desc="Issue RdBlkM") {
784 enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
785 out_msg.addr := address;
786 out_msg.Type := CoherenceRequestType:RdBlkM;
787 out_msg.Requestor := machineID;
788 out_msg.Destination.add(map_Address_to_Directory(address));
789 out_msg.MessageSize := MessageSizeType:Request_Control;
790 }
791 }
792
793 action(rU_rememberUpgrade, "rU", desc="Remember that this was an upgrade") {
794 tbe.Upgrade := true;
795 }
796
797 action(ruo_rememberUntransferredOwner, "ruo", desc="Remember the untransferred owner") {
798 peek(responseNetwork_in, ResponseMsg) {
799 if(in_msg.UntransferredOwner == true) {
800 tbe.UntransferredOwner := in_msg.Sender;
801 tbe.UntransferredOwnerExists := true;
802 }
803 DPRINTF(RubySlicc, "%s\n", (in_msg));
804 }
805 }
806
807 action(ruoT_rememberUntransferredOwnerTCC, "ruoT", desc="Remember the untransferred owner") {
808 peek(w_TCCResponse_in, ResponseMsg) {
809 if(in_msg.UntransferredOwner == true) {
810 tbe.UntransferredOwner := in_msg.Sender;
811 tbe.UntransferredOwnerExists := true;
812 }
813 DPRINTF(RubySlicc, "%s\n", (in_msg));
814 }
815 }
816
817 action(vd_victim, "vd", desc="Victimize M/O Data") {
818 enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
819 out_msg.addr := address;
820 out_msg.Requestor := machineID;
821 out_msg.Destination.add(map_Address_to_Directory(address));
822 out_msg.MessageSize := MessageSizeType:Request_Control;
823 out_msg.Type := CoherenceRequestType:VicDirty;
824 if (cache_entry.CacheState == State:O) {
825 out_msg.Shared := true;
826 } else {
827 out_msg.Shared := false;
828 }
829 out_msg.Dirty := true;
830 }
831 }
832
833 action(vc_victim, "vc", desc="Victimize E/S Data") {
834 enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
835 out_msg.addr := address;
836 out_msg.Requestor := machineID;
837 out_msg.Destination.add(map_Address_to_Directory(address));
838 out_msg.MessageSize := MessageSizeType:Request_Control;
839 out_msg.Type := CoherenceRequestType:VicClean;
840 if (cache_entry.CacheState == State:S) {
841 out_msg.Shared := true;
842 } else {
843 out_msg.Shared := false;
844 }
845 out_msg.Dirty := false;
846 }
847 }
848
849
850 action(sT_sendRequestToTCC, "sT", desc="send request to TCC") {
851 peek(coreRequestNetwork_in, CPURequestMsg) {
852 enqueue(w_requestTCC_out, CPURequestMsg, 1) {
853 out_msg.addr := address;
854 out_msg.Type := in_msg.Type;
855 out_msg.Requestor := in_msg.Requestor;
856 out_msg.DataBlk := in_msg.DataBlk;
857 out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC,
858 TCC_select_low_bit, TCC_select_num_bits));
859 out_msg.Shared := in_msg.Shared;
860 out_msg.MessageSize := in_msg.MessageSize;
861 }
862 APPEND_TRANSITION_COMMENT(" requestor ");
863 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
864
865 }
866 }
867
868
869 action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") {
870 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
871 TCC_select_low_bit, TCC_select_num_bits);
872
873 temp := cache_entry.Sharers;
874 temp.addNetDest(cache_entry.Owner);
875 if (temp.isElement(tcc)) {
876 temp.remove(tcc);
877 }
878 if (temp.count() > 0) {
879 enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
880 out_msg.addr := address;
881 out_msg.Type := ProbeRequestType:PrbDowngrade;
882 out_msg.ReturnData := true;
883 out_msg.MessageSize := MessageSizeType:Control;
884 out_msg.Destination := temp;
885 tbe.NumPendingAcks := temp.count();
886 if(cache_entry.CacheState == State:M) {
887 assert(tbe.NumPendingAcks == 1);
888 }
889 DPRINTF(RubySlicc, "%s\n", (out_msg));
890 }
891 }
892 }
893
894 action(ls2_probeShrL2Data, "ls2", desc="local probe downgrade L2, return data") {
895 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
896 TCC_select_low_bit, TCC_select_num_bits);
897 if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
898 enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
899 out_msg.addr := address;
900 out_msg.Type := ProbeRequestType:PrbDowngrade;
901 out_msg.ReturnData := true;
902 out_msg.MessageSize := MessageSizeType:Control;
903 out_msg.Destination.add(tcc);
904 tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
905 DPRINTF(RubySlicc, "%s\n", out_msg);
906
907 }
908 }
909 }
910
911 action(s2_probeShrL2Data, "s2", desc="probe shared L2, return data") {
912 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
913 TCC_select_low_bit, TCC_select_num_bits);
914 if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
915 enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
916 out_msg.addr := address;
917 out_msg.Type := ProbeRequestType:PrbDowngrade;
918 out_msg.ReturnData := true;
919 out_msg.MessageSize := MessageSizeType:Control;
920 out_msg.Destination.add(tcc);
921 tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
922 DPRINTF(RubySlicc, "%s\n", out_msg);
923
924 }
925 }
926 }
927
928 action(ldc_probeInvCoreData, "ldc", desc="local probe to inv cores, return data") {
929 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
930 TCC_select_low_bit, TCC_select_num_bits);
931 peek(coreRequestNetwork_in, CPURequestMsg) {
932 NetDest dest:= cache_entry.Sharers;
933 dest.addNetDest(cache_entry.Owner);
934 if(dest.isElement(tcc)){
935 dest.remove(tcc);
936 }
937 dest.remove(in_msg.Requestor);
938 tbe.NumPendingAcks := dest.count();
939 if (dest.count()>0){
940 enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
941 out_msg.addr := address;
942 out_msg.Type := ProbeRequestType:PrbInv;
943 out_msg.ReturnData := true;
944 out_msg.MessageSize := MessageSizeType:Control;
945
946 out_msg.Destination.addNetDest(dest);
947 if(cache_entry.CacheState == State:M) {
948 assert(tbe.NumPendingAcks == 1);
949 }
950
951 DPRINTF(RubySlicc, "%s\n", (out_msg));
952 }
953 }
954 }
955 }
956
957 action(ld2_probeInvL2Data, "ld2", desc="local probe inv L2, return data") {
958 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
959 TCC_select_low_bit, TCC_select_num_bits);
960 if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
961 enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
962 out_msg.addr := address;
963 out_msg.Type := ProbeRequestType:PrbInv;
964 out_msg.ReturnData := true;
965 out_msg.MessageSize := MessageSizeType:Control;
966 out_msg.Destination.add(tcc);
967 tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
968 DPRINTF(RubySlicc, "%s\n", out_msg);
969
970 }
971 }
972 }
973
974 action(dc_probeInvCoreData, "dc", desc="probe inv cores + TCC, return data") {
975 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
976 TCC_select_low_bit, TCC_select_num_bits);
977 enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
978 out_msg.addr := address;
979 out_msg.Type := ProbeRequestType:PrbInv;
980 out_msg.ReturnData := true;
981 out_msg.MessageSize := MessageSizeType:Control;
982
983 out_msg.Destination.addNetDest(cache_entry.Sharers);
984 out_msg.Destination.addNetDest(cache_entry.Owner);
985 tbe.NumPendingAcks := cache_entry.Sharers.count() + cache_entry.Owner.count();
986 if(cache_entry.CacheState == State:M) {
987 assert(tbe.NumPendingAcks == 1);
988 }
989 if (out_msg.Destination.isElement(tcc)) {
990 out_msg.Destination.remove(tcc);
991 tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
992 }
993
994 DPRINTF(RubySlicc, "%s\n", (out_msg));
995 }
996 }
997
998 action(d2_probeInvL2Data, "d2", desc="probe inv L2, return data") {
999 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
1000 TCC_select_low_bit, TCC_select_num_bits);
1001 if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
1002 enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
1003 out_msg.addr := address;
1004 out_msg.Type := ProbeRequestType:PrbInv;
1005 out_msg.ReturnData := true;
1006 out_msg.MessageSize := MessageSizeType:Control;
1007 out_msg.Destination.add(tcc);
1008 tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
1009 DPRINTF(RubySlicc, "%s\n", out_msg);
1010
1011 }
1012 }
1013 }
1014
1015 action(lpc_probeInvCore, "lpc", desc="local probe inv cores, no data") {
1016 peek(coreRequestNetwork_in, CPURequestMsg) {
1017 TCC_dir_subtree.broadcast(MachineType:TCP);
1018 TCC_dir_subtree.broadcast(MachineType:SQC);
1019
1020 temp := cache_entry.Sharers;
1021 temp := temp.OR(cache_entry.Owner);
1022 TCC_dir_subtree := TCC_dir_subtree.AND(temp);
1023 tbe.NumPendingAcks := TCC_dir_subtree.count();
1024 if(cache_entry.CacheState == State:M) {
1025 assert(tbe.NumPendingAcks == 1);
1026 }
1027 if(TCC_dir_subtree.isElement(in_msg.Requestor)) {
1028 TCC_dir_subtree.remove(in_msg.Requestor);
1029 tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
1030 }
1031
1032 if(TCC_dir_subtree.count() > 0) {
1033 enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
1034 out_msg.addr := address;
1035 out_msg.Type := ProbeRequestType:PrbInv;
1036 out_msg.ReturnData := false;
1037 out_msg.MessageSize := MessageSizeType:Control;
1038 out_msg.localCtoD := true;
1039
1040 out_msg.Destination.addNetDest(TCC_dir_subtree);
1041
1042 DPRINTF(RubySlicc, "%s\n", (out_msg));
1043 }
1044 }
1045 }
1046 }
1047
1048 action(ipc_probeInvCore, "ipc", desc="probe inv cores, no data") {
1049 TCC_dir_subtree.broadcast(MachineType:TCP);
1050 TCC_dir_subtree.broadcast(MachineType:SQC);
1051
1052 temp := cache_entry.Sharers;
1053 temp := temp.OR(cache_entry.Owner);
1054 TCC_dir_subtree := TCC_dir_subtree.AND(temp);
1055 tbe.NumPendingAcks := TCC_dir_subtree.count();
1056 if(TCC_dir_subtree.count() > 0) {
1057
1058 enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
1059 out_msg.addr := address;
1060 out_msg.Type := ProbeRequestType:PrbInv;
1061 out_msg.ReturnData := false;
1062 out_msg.MessageSize := MessageSizeType:Control;
1063
1064 out_msg.Destination.addNetDest(TCC_dir_subtree);
1065 if(cache_entry.CacheState == State:M) {
1066 assert(tbe.NumPendingAcks == 1);
1067 }
1068
1069 DPRINTF(RubySlicc, "%s\n", (out_msg));
1070 }
1071 }
1072 }
1073
1074 action(i2_probeInvL2, "i2", desc="probe inv L2, no data") {
1075 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
1076 TCC_select_low_bit, TCC_select_num_bits);
1077 if ((cache_entry.Sharers.isElement(tcc)) || (cache_entry.Owner.isElement(tcc))) {
1078 enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
1079 tbe.NumPendingAcks := tbe.NumPendingAcks + 1;
1080 out_msg.addr := address;
1081 out_msg.Type := ProbeRequestType:PrbInv;
1082 out_msg.ReturnData := false;
1083 out_msg.MessageSize := MessageSizeType:Control;
1084 out_msg.Destination.add(tcc);
1085 DPRINTF(RubySlicc, "%s\n", out_msg);
1086
1087 }
1088 }
1089 }
1090
1091 action(pi_sendProbeResponseInv, "pi", desc="send probe ack inv, no data") {
1092 enqueue(responseToNB_out, ResponseMsg, issue_latency) {
1093 out_msg.addr := address;
1094 out_msg.Type := CoherenceResponseType:CPUPrbResp; // TCC, L3 respond in same way to probes
1095 out_msg.Sender := machineID;
1096 out_msg.Destination.add(map_Address_to_Directory(address));
1097 out_msg.Dirty := false;
1098 out_msg.Hit := false;
1099 out_msg.Ntsl := true;
1100 out_msg.State := CoherenceState:NA;
1101 out_msg.MessageSize := MessageSizeType:Response_Control;
1102 }
1103 }
1104
1105 action(pim_sendProbeResponseInvMs, "pim", desc="send probe ack inv, no data") {
1106 enqueue(responseToNB_out, ResponseMsg, issue_latency) {
1107 out_msg.addr := address;
1108 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
1109 out_msg.Sender := machineID;
1110 out_msg.Destination.add(map_Address_to_Directory(address));
1111 out_msg.Dirty := false;
1112 out_msg.Ntsl := true;
1113 out_msg.Hit := false;
1114 out_msg.State := CoherenceState:NA;
1115 out_msg.MessageSize := MessageSizeType:Response_Control;
1116 }
1117 }
1118
1119 action(prm_sendProbeResponseMiss, "prm", desc="send probe ack PrbShrData, no data") {
1120 enqueue(responseToNB_out, ResponseMsg, issue_latency) {
1121 out_msg.addr := address;
1122 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and TCC respond in same way to probes
1123 out_msg.Sender := machineID;
1124 out_msg.Destination.add(map_Address_to_Directory(address));
1125 out_msg.Dirty := false; // only true if sending back data i think
1126 out_msg.Hit := false;
1127 out_msg.Ntsl := false;
1128 out_msg.State := CoherenceState:NA;
1129 out_msg.MessageSize := MessageSizeType:Response_Control;
1130 }
1131 }
1132
1133
1134
1135 action(pd_sendProbeResponseData, "pd", desc="send probe ack, with data") {
1136 enqueue(responseToNB_out, ResponseMsg, issue_latency) {
1137 assert(is_valid(cache_entry) || is_valid(tbe));
1138 out_msg.addr := address;
1139 out_msg.Type := CoherenceResponseType:CPUPrbResp;
1140 out_msg.Sender := machineID;
1141 out_msg.Destination.add(map_Address_to_Directory(address));
1142 out_msg.DataBlk := getDataBlock(address);
1143 if (is_valid(tbe)) {
1144 out_msg.Dirty := tbe.Dirty;
1145 }
1146 out_msg.Hit := true;
1147 out_msg.State := CoherenceState:NA;
1148 out_msg.MessageSize := MessageSizeType:Response_Data;
1149 }
1150 }
1151
1152
1153 action(pdm_sendProbeResponseDataMs, "pdm", desc="send probe ack, with data") {
1154 enqueue(responseToNB_out, ResponseMsg, issue_latency) {
1155 assert(is_valid(cache_entry) || is_valid(tbe));
1156 assert(is_valid(cache_entry));
1157 out_msg.addr := address;
1158 out_msg.Type := CoherenceResponseType:CPUPrbResp;
1159 out_msg.Sender := machineID;
1160 out_msg.Destination.add(map_Address_to_Directory(address));
1161 out_msg.DataBlk := getDataBlock(address);
1162 if (is_valid(tbe)) {
1163 out_msg.Dirty := tbe.Dirty;
1164 }
1165 out_msg.Hit := true;
1166 out_msg.State := CoherenceState:NA;
1167 out_msg.MessageSize := MessageSizeType:Response_Data;
1168 }
1169 }
1170
1171 action(mc_cancelWB, "mc", desc="send writeback cancel to NB directory") {
1172 enqueue(requestToNB_out, CPURequestMsg, issue_latency) {
1173 out_msg.addr := address;
1174 out_msg.Type := CoherenceRequestType:WrCancel;
1175 out_msg.Destination.add(map_Address_to_Directory(address));
1176 out_msg.Requestor := machineID;
1177 out_msg.MessageSize := MessageSizeType:Request_Control;
1178 }
1179 }
1180
1181 action(sCS_sendCollectiveResponseS, "sCS", desc="send shared response to all merged TCP/SQC") {
1182 enqueue(responseToCore_out, ResponseMsg, 1) {
1183 out_msg.addr := address;
1184 out_msg.Type := CoherenceResponseType:TDSysResp;
1185 out_msg.Sender := tbe.Sender;
1186 out_msg.DataBlk := tbe.DataBlk;
1187 out_msg.MessageSize := MessageSizeType:Response_Data;
1188 out_msg.CtoD := false;
1189 out_msg.State := CoherenceState:Shared;
1190 out_msg.Destination.addNetDest(cache_entry.MergedSharers);
1191 out_msg.Shared := tbe.Shared;
1192 out_msg.Dirty := tbe.Dirty;
1193 DPRINTF(RubySlicc, "%s\n", out_msg);
1194 }
1195 }
1196
1197 action(sS_sendResponseS, "sS", desc="send shared response to TCP/SQC") {
1198 enqueue(responseToCore_out, ResponseMsg, 1) {
1199 out_msg.addr := address;
1200 out_msg.Type := CoherenceResponseType:TDSysResp;
1201 out_msg.Sender := tbe.Sender;
1202 out_msg.DataBlk := tbe.DataBlk;
1203 out_msg.MessageSize := MessageSizeType:Response_Data;
1204 out_msg.CtoD := false;
1205 out_msg.State := CoherenceState:Shared;
1206 out_msg.Destination.add(tbe.OriginalRequestor);
1207 out_msg.Shared := tbe.Shared;
1208 out_msg.Dirty := tbe.Dirty;
1209 DPRINTF(RubySlicc, "%s\n", out_msg);
1210 }
1211 }
1212
1213 action(sM_sendResponseM, "sM", desc="send response to TCP/SQC") {
1214 enqueue(responseToCore_out, ResponseMsg, 1) {
1215 out_msg.addr := address;
1216 out_msg.Type := CoherenceResponseType:TDSysResp;
1217 out_msg.Sender := tbe.Sender;
1218 out_msg.DataBlk := tbe.DataBlk;
1219 out_msg.MessageSize := MessageSizeType:Response_Data;
1220 out_msg.CtoD := false;
1221 out_msg.State := CoherenceState:Modified;
1222 out_msg.Destination.add(tbe.OriginalRequestor);
1223 out_msg.Shared := tbe.Shared;
1224 out_msg.Dirty := tbe.Dirty;
1225 DPRINTF(RubySlicc, "%s\n", out_msg);
1226 }
1227 }
1228
1229
1230
1231 action(fw2_forwardWBAck, "fw2", desc="forward WBAck to TCC") {
1232 peek(responseFromNB_in, ResponseMsg) {
1233 if(tbe.OriginalRequestor != machineID) {
1234 enqueue(w_respTCC_out, ResponseMsg, 1) {
1235 out_msg.addr := address;
1236 out_msg.Type := CoherenceResponseType:TDSysWBAck;
1237 out_msg.Sender := machineID;
1238 //out_msg.DataBlk := tbe.DataBlk;
1239 out_msg.Destination.add(tbe.OriginalRequestor);
1240 out_msg.MessageSize := in_msg.MessageSize;
1241 }
1242 }
1243 }
1244 }
1245
1246 action(sa_saveSysAck, "sa", desc="Save SysAck ") {
1247 peek(responseFromNB_in, ResponseMsg) {
1248 tbe.Dirty := in_msg.Dirty;
1249 if (tbe.Dirty == false) {
1250 tbe.DataBlk := in_msg.DataBlk;
1251 }
1252 else {
1253 tbe.DataBlk := tbe.DataBlk;
1254 }
1255 tbe.CtoD := in_msg.CtoD;
1256 tbe.CohState := in_msg.State;
1257 tbe.Shared := in_msg.Shared;
1258 tbe.MessageSize := in_msg.MessageSize;
1259 }
1260 }
1261
1262 action(fsa_forwardSavedAck, "fsa", desc="forward saved SysAck to TCP or SQC") {
1263 enqueue(responseToCore_out, ResponseMsg, 1) {
1264 out_msg.addr := address;
1265 out_msg.Type := CoherenceResponseType:TDSysResp;
1266 out_msg.Sender := machineID;
1267 if (tbe.Dirty == false) {
1268 out_msg.DataBlk := tbe.DataBlk;
1269 }
1270 else {
1271 out_msg.DataBlk := tbe.DataBlk;
1272 }
1273 out_msg.CtoD := tbe.CtoD;
1274 out_msg.State := tbe.CohState;
1275 out_msg.Destination.add(tbe.OriginalRequestor);
1276 out_msg.Shared := tbe.Shared;
1277 out_msg.MessageSize := tbe.MessageSize;
1278 out_msg.Dirty := tbe.Dirty;
1279 out_msg.Sender := tbe.Sender;
1280 }
1281 }
1282
1283 action(fa_forwardSysAck, "fa", desc="forward SysAck to TCP or SQC") {
1284 peek(responseFromNB_in, ResponseMsg) {
1285 enqueue(responseToCore_out, ResponseMsg, 1) {
1286 out_msg.addr := address;
1287 out_msg.Type := CoherenceResponseType:TDSysResp;
1288 out_msg.Sender := machineID;
1289 if (tbe.Dirty == false) {
1290 out_msg.DataBlk := in_msg.DataBlk;
1291 tbe.Sender := machineID;
1292 }
1293 else {
1294 out_msg.DataBlk := tbe.DataBlk;
1295 }
1296 out_msg.CtoD := in_msg.CtoD;
1297 out_msg.State := in_msg.State;
1298 out_msg.Destination.add(tbe.OriginalRequestor);
1299 out_msg.Shared := in_msg.Shared;
1300 out_msg.MessageSize := in_msg.MessageSize;
1301 out_msg.Dirty := in_msg.Dirty;
1302 out_msg.Sender := tbe.Sender;
1303 DPRINTF(RubySlicc, "%s\n", (out_msg.DataBlk));
1304 }
1305 }
1306 }
1307
1308 action(pso_probeSharedDataOwner, "pso", desc="probe shared data at owner") {
1309 MachineID tcc := mapAddressToRange(address,MachineType:TCC,
1310 TCC_select_low_bit, TCC_select_num_bits);
1311 if (cache_entry.Owner.isElement(tcc)) {
1312 enqueue(w_probeTCC_out, TDProbeRequestMsg, 1) {
1313 out_msg.addr := address;
1314 out_msg.Type := ProbeRequestType:PrbDowngrade;
1315 out_msg.ReturnData := true;
1316 out_msg.MessageSize := MessageSizeType:Control;
1317 out_msg.Destination.add(tcc);
1318 DPRINTF(RubySlicc, "%s\n", out_msg);
1319 }
1320 }
1321 else { // i.e., owner is a core
1322 enqueue(probeToCore_out, TDProbeRequestMsg, response_latency) {
1323 out_msg.addr := address;
1324 out_msg.Type := ProbeRequestType:PrbDowngrade;
1325 out_msg.ReturnData := true;
1326 out_msg.MessageSize := MessageSizeType:Control;
1327 out_msg.Destination.addNetDest(cache_entry.Owner);
1328 DPRINTF(RubySlicc, "%s\n", out_msg);
1329 }
1330 }
1331 tbe.NumPendingAcks := 1;
1332 }
1333
1334 action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
1335 coreRequestNetwork_in.dequeue(clockEdge());
1336 }
1337
1338 action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
1339 unblockNetwork_in.dequeue(clockEdge());
1340 }
1341
1342 action(pk_popResponseQueue, "pk", desc="Pop response queue") {
1343 responseNetwork_in.dequeue(clockEdge());
1344 }
1345
1346 action(pp_popProbeQueue, "pp", desc="Pop incoming probe queue") {
1347 probeNetwork_in.dequeue(clockEdge());
1348 }
1349
1350 action(pR_popResponseFromNBQueue, "pR", desc="Pop incoming Response queue From NB") {
1351 responseFromNB_in.dequeue(clockEdge());
1352 }
1353
1354 action(pt_popTriggerQueue, "pt", desc="pop trigger queue") {
1355 triggerQueue_in.dequeue(clockEdge());
1356 }
1357
1358 action(pl_popTCCRequestQueue, "pl", desc="pop TCC request queue") {
1359 w_TCCRequest_in.dequeue(clockEdge());
1360 }
1361
1362 action(plr_popTCCResponseQueue, "plr", desc="pop TCC response queue") {
1363 w_TCCResponse_in.dequeue(clockEdge());
1364 }
1365
1366 action(plu_popTCCUnblockQueue, "plu", desc="pop TCC unblock queue") {
1367 w_TCCUnblock_in.dequeue(clockEdge());
1368 }
1369
1370
1371 action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
1372 peek(unblockNetwork_in, UnblockMsg) {
1373 cache_entry.Sharers.add(in_msg.Sender);
1374 cache_entry.MergedSharers.remove(in_msg.Sender);
1375 assert(cache_entry.WaitingUnblocks >= 0);
1376 cache_entry.WaitingUnblocks := cache_entry.WaitingUnblocks - 1;
1377 }
1378 }
1379
1380 action(q_addOutstandingMergedSharer, "q", desc="Increment outstanding requests") {
1381 peek(coreRequestNetwork_in, CPURequestMsg) {
1382 cache_entry.MergedSharers.add(in_msg.Requestor);
1383 cache_entry.WaitingUnblocks := cache_entry.WaitingUnblocks + 1;
1384 }
1385 }
1386
1387 action(uu_sendUnblock, "uu", desc="state changed, unblock") {
1388 enqueue(unblockToNB_out, UnblockMsg, issue_latency) {
1389 out_msg.addr := address;
1390 out_msg.Destination.add(map_Address_to_Directory(address));
1391 out_msg.MessageSize := MessageSizeType:Unblock_Control;
1392 DPRINTF(RubySlicc, "%s\n", out_msg);
1393 }
1394 }
1395
1396 action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
1397 coreRequestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1398 }
1399
1400 action(yy_recycleTCCRequestQueue, "yy", desc="recycle yy request queue") {
1401 w_TCCRequest_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1402 }
1403
1404 action(xz_recycleResponseQueue, "xz", desc="recycle response queue") {
1405 responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1406 }
1407
1408 action(xx_recycleTCCResponseQueue, "xx", desc="recycle TCC response queue") {
1409 w_TCCResponse_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1410 }
1411
1412 action(vv_recycleTCCUnblockQueue, "vv", desc="Recycle the probe request queue") {
1413 w_TCCUnblock_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1414 }
1415
1416 action(xy_recycleUnblockQueue, "xy", desc="Recycle the probe request queue") {
1417 w_TCCUnblock_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1418 }
1419
1420 action(ww_recycleProbeRequest, "ww", desc="Recycle the probe request queue") {
1421 probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1422 }
1423
1424 action(x_decrementAcks, "x", desc="decrement Acks pending") {
1425 tbe.NumPendingAcks := tbe.NumPendingAcks - 1;
1426 }
1427
1428 action(o_checkForAckCompletion, "o", desc="check for ack completion") {
1429 if (tbe.NumPendingAcks == 0) {
1430 enqueue(triggerQueue_out, TriggerMsg, 1) {
1431 out_msg.addr := address;
1432 out_msg.Type := TriggerType:AcksComplete;
1433 }
1434 }
1435 APPEND_TRANSITION_COMMENT(" tbe acks ");
1436 APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks);
1437 }
1438
1439 action(tp_allocateTBE, "tp", desc="allocate TBE Entry for upward transactions") {
1440 check_allocate(TBEs);
1441 peek(probeNetwork_in, NBProbeRequestMsg) {
1442 TBEs.allocate(address);
1443 set_tbe(TBEs.lookup(address));
1444 tbe.Dirty := false;
1445 tbe.NumPendingAcks := 0;
1446 tbe.UntransferredOwnerExists := false;
1447 }
1448 }
1449
1450 action(tv_allocateTBE, "tv", desc="allocate TBE Entry for TCC transactions") {
1451 check_allocate(TBEs);
1452 peek(w_TCCRequest_in, CPURequestMsg) {
1453 TBEs.allocate(address);
1454 set_tbe(TBEs.lookup(address));
1455 tbe.DataBlk := in_msg.DataBlk; // Data only for WBs
1456 tbe.Dirty := false;
1457 tbe.OriginalRequestor := in_msg.Requestor;
1458 tbe.NumPendingAcks := 0;
1459 tbe.UntransferredOwnerExists := false;
1460 }
1461 }
1462
1463 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
1464 check_allocate(TBEs);//check whether resources are full
1465 peek(coreRequestNetwork_in, CPURequestMsg) {
1466 TBEs.allocate(address);
1467 set_tbe(TBEs.lookup(address));
1468 tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
1469 tbe.Dirty := false;
1470 tbe.Upgrade := false;
1471 tbe.OriginalRequestor := in_msg.Requestor;
1472 tbe.NumPendingAcks := 0;
1473 tbe.UntransferredOwnerExists := false;
1474 tbe.Sender := machineID;
1475 }
1476 }
1477
1478 action(tr_allocateTBE, "tr", desc="allocate TBE Entry for recall") {
1479 check_allocate(TBEs);//check whether resources are full
1480 TBEs.allocate(address);
1481 set_tbe(TBEs.lookup(address));
1482 tbe.DataBlk := cache_entry.DataBlk; // Data only for WBs
1483 tbe.Dirty := false;
1484 tbe.Upgrade := false;
1485 tbe.OriginalRequestor := machineID; //Recall request, Self initiated
1486 tbe.NumPendingAcks := 0;
1487 tbe.UntransferredOwnerExists := false;
1488 }
1489
1490 action(dt_deallocateTBE, "dt", desc="Deallocate TBE entry") {
1491 TBEs.deallocate(address);
1492 unset_tbe();
1493 }
1494
1495
1496 action(d_allocateDir, "d", desc="allocate Directory Cache") {
1497 if (is_invalid(cache_entry)) {
1498 set_cache_entry(directory.allocate(address, new Entry));
1499 }
1500 }
1501
1502 action(dd_deallocateDir, "dd", desc="deallocate Directory Cache") {
1503 if (is_valid(cache_entry)) {
1504 directory.deallocate(address);
1505 }
1506 unset_cache_entry();
1507 }
1508
1509 action(ss_sendStaleNotification, "ss", desc="stale data; nothing to writeback") {
1510 enqueue(responseToNB_out, ResponseMsg, issue_latency) {
1511 out_msg.addr := address;
1512 out_msg.Type := CoherenceResponseType:StaleNotif;
1513 out_msg.Destination.add(map_Address_to_Directory(address));
1514 out_msg.Sender := machineID;
1515 out_msg.MessageSize := MessageSizeType:Response_Control;
1516 }
1517 }
1518
1519 action(wb_data, "wb", desc="write back data") {
1520 enqueue(responseToNB_out, ResponseMsg, issue_latency) {
1521 out_msg.addr := address;
1522 out_msg.Type := CoherenceResponseType:CPUData;
1523 out_msg.Sender := machineID;
1524 out_msg.Destination.add(map_Address_to_Directory(address));
1525 out_msg.DataBlk := tbe.DataBlk;
1526 out_msg.Dirty := tbe.Dirty;
1527 if (tbe.Shared) {
1528 out_msg.NbReqShared := true;
1529 } else {
1530 out_msg.NbReqShared := false;
1531 }
1532 out_msg.State := CoherenceState:Shared; // faux info
1533 out_msg.MessageSize := MessageSizeType:Writeback_Data;
1534 DPRINTF(RubySlicc, "%s\n", out_msg);
1535 }
1536 }
1537
1538 action(sf_setSharedFlip, "sf", desc="hit by shared probe, status may be different") {
1539 assert(is_valid(tbe));
1540 tbe.Shared := true;
1541 }
1542
1543 action(y_writeDataToTBE, "y", desc="write Probe Data to TBE") {
1544 peek(responseNetwork_in, ResponseMsg) {
1545 if (!tbe.Dirty || in_msg.Dirty) {
1546 tbe.DataBlk := in_msg.DataBlk;
1547 tbe.Dirty := in_msg.Dirty;
1548 }
1549 if (in_msg.Hit) {
1550 tbe.Cached := true;
1551 }
1552 }
1553 }
1554
1555 action(ty_writeTCCDataToTBE, "ty", desc="write TCC Probe Data to TBE") {
1556 peek(w_TCCResponse_in, ResponseMsg) {
1557 if (!tbe.Dirty || in_msg.Dirty) {
1558 tbe.DataBlk := in_msg.DataBlk;
1559 tbe.Dirty := in_msg.Dirty;
1560 }
1561 if (in_msg.Hit) {
1562 tbe.Cached := true;
1563 }
1564 }
1565 }
1566
1567
1568 action(ut_updateTag, "ut", desc="update Tag (i.e. set MRU)") {
1569 directory.setMRU(address);
1570 }
1571
1572 // TRANSITIONS
1573
1574 // Handling TCP/SQC requests (similar to how NB dir handles TCC events with some changes to account for stateful directory).
1575
1576
1577 // transitions from base
1578 transition(I, RdBlk, I_ES){TagArrayRead} {
1579 d_allocateDir;
1580 t_allocateTBE;
1581 n_issueRdBlk;
1582 i_popIncomingRequestQueue;
1583 }
1584
1585 transition(I, RdBlkS, I_S){TagArrayRead} {
1586 d_allocateDir;
1587 t_allocateTBE;
1588 nS_issueRdBlkS;
1589 i_popIncomingRequestQueue;
1590 }
1591
1592
1593 transition(I_S, NB_AckS, BBB_S) {
1594 fa_forwardSysAck;
1595 pR_popResponseFromNBQueue;
1596 }
1597
1598 transition(I_ES, NB_AckS, BBB_S) {
1599 fa_forwardSysAck;
1600 pR_popResponseFromNBQueue;
1601 }
1602
1603 transition(I_ES, NB_AckE, BBB_E) {
1604 fa_forwardSysAck;
1605 pR_popResponseFromNBQueue;
1606 }
1607
1608 transition({S_M, O_M}, {NB_AckCtoD,NB_AckM}, BBB_M) {
1609 fa_forwardSysAck;
1610 pR_popResponseFromNBQueue;
1611 }
1612
1613 transition(I_M, NB_AckM, BBB_M) {
1614 fa_forwardSysAck;
1615 pR_popResponseFromNBQueue;
1616 }
1617
1618 transition(BBB_M, CoreUnblock, M){TagArrayWrite} {
1619 c_clearOwner;
1620 cc_clearSharers;
1621 e_ownerIsUnblocker;
1622 uu_sendUnblock;
1623 dt_deallocateTBE;
1624 j_popIncomingUnblockQueue;
1625 }
1626
1627 transition(BBB_S, CoreUnblock, S){TagArrayWrite} {
1628 as_addToSharers;
1629 uu_sendUnblock;
1630 dt_deallocateTBE;
1631 j_popIncomingUnblockQueue;
1632 }
1633
1634 transition(BBB_E, CoreUnblock, E){TagArrayWrite} {
1635 as_addToSharers;
1636 uu_sendUnblock;
1637 dt_deallocateTBE;
1638 j_popIncomingUnblockQueue;
1639 }
1640
1641
1642 transition(I, RdBlkM, I_M){TagArrayRead} {
1643 d_allocateDir;
1644 t_allocateTBE;
1645 nM_issueRdBlkM;
1646 i_popIncomingRequestQueue;
1647 }
1648
1649 //
1650 transition(S, {RdBlk, RdBlkS}, BBS_S){TagArrayRead} {
1651 t_allocateTBE;
1652 sc_probeShrCoreData;
1653 s2_probeShrL2Data;
1654 q_addOutstandingMergedSharer;
1655 i_popIncomingRequestQueue;
1656 }
1657 // Merging of read sharing into a single request
1658 transition(BBS_S, {RdBlk, RdBlkS}) {
1659 q_addOutstandingMergedSharer;
1660 i_popIncomingRequestQueue;
1661 }
1662 // Wait for probe acks to be complete
1663 transition(BBS_S, CPUPrbResp) {
1664 ccr_copyCoreResponseToTBE;
1665 x_decrementAcks;
1666 o_checkForAckCompletion;
1667 pk_popResponseQueue;
1668 }
1669
1670 transition(BBS_S, TCCPrbResp) {
1671 ctr_copyTCCResponseToTBE;
1672 x_decrementAcks;
1673 o_checkForAckCompletion;
1674 plr_popTCCResponseQueue;
1675 }
1676
1677 // Window for merging complete with this transition
1678 // Send responses to all outstanding
1679 transition(BBS_S, ProbeAcksComplete, BB_S) {
1680 sCS_sendCollectiveResponseS;
1681 pt_popTriggerQueue;
1682 }
1683
1684 transition(BB_S, CoreUnblock, BB_S) {
1685 m_addUnlockerToSharers;
1686 j_popIncomingUnblockQueue;
1687 }
1688
1689 transition(BB_S, LastCoreUnblock, S) {
1690 m_addUnlockerToSharers;
1691 dt_deallocateTBE;
1692 j_popIncomingUnblockQueue;
1693 }
1694
1695 transition(O, {RdBlk, RdBlkS}, BBO_O){TagArrayRead} {
1696 t_allocateTBE;
1697 pso_probeSharedDataOwner;
1698 q_addOutstandingMergedSharer;
1699 i_popIncomingRequestQueue;
1700 }
1701 // Merging of read sharing into a single request
1702 transition(BBO_O, {RdBlk, RdBlkS}) {
1703 q_addOutstandingMergedSharer;
1704 i_popIncomingRequestQueue;
1705 }
1706
1707 // Wait for probe acks to be complete
1708 transition(BBO_O, CPUPrbResp) {
1709 ccr_copyCoreResponseToTBE;
1710 x_decrementAcks;
1711 o_checkForAckCompletion;
1712 pk_popResponseQueue;
1713 }
1714
1715 transition(BBO_O, TCCPrbResp) {
1716 ctr_copyTCCResponseToTBE;
1717 x_decrementAcks;
1718 o_checkForAckCompletion;
1719 plr_popTCCResponseQueue;
1720 }
1721
1722 // Window for merging complete with this transition
1723 // Send responses to all outstanding
1724 transition(BBO_O, ProbeAcksComplete, BB_OO) {
1725 sCS_sendCollectiveResponseS;
1726 pt_popTriggerQueue;
1727 }
1728
1729 transition(BB_OO, CoreUnblock) {
1730 m_addUnlockerToSharers;
1731 j_popIncomingUnblockQueue;
1732 }
1733
1734 transition(BB_OO, LastCoreUnblock, O){TagArrayWrite} {
1735 m_addUnlockerToSharers;
1736 dt_deallocateTBE;
1737 j_popIncomingUnblockQueue;
1738 }
1739
1740 transition(S, CPUWrite, BW_S){TagArrayRead} {
1741 t_allocateTBE;
1742 rC_removeCoreFromSharers;
1743 sT_sendRequestToTCC;
1744 i_popIncomingRequestQueue;
1745 }
1746
1747 transition(E, CPUWrite, BW_E){TagArrayRead} {
1748 t_allocateTBE;
1749 rC_removeCoreFromSharers;
1750 sT_sendRequestToTCC;
1751 i_popIncomingRequestQueue;
1752 }
1753
1754 transition(O, CPUWrite, BW_O){TagArrayRead} {
1755 t_allocateTBE;
1756 rCo_removeCoreFromOwner;
1757 rC_removeCoreFromSharers;
1758 sT_sendRequestToTCC;
1759 i_popIncomingRequestQueue;
1760 }
1761
1762 transition(M, CPUWrite, BW_M){TagArrayRead} {
1763 t_allocateTBE;
1764 rCo_removeCoreFromOwner;
1765 rC_removeCoreFromSharers;
1766 sT_sendRequestToTCC;
1767 i_popIncomingRequestQueue;
1768 }
1769
1770 transition(BW_S, TCCUnblock_Sharer, S){TagArrayWrite} {
1771 aT_addTCCToSharers;
1772 dt_deallocateTBE;
1773 plu_popTCCUnblockQueue;
1774 }
1775
1776 transition(BW_S, TCCUnblock_NotValid, S){TagArrayWrite} {
1777 dt_deallocateTBE;
1778 plu_popTCCUnblockQueue;
1779 }
1780
1781 transition(BW_E, TCCUnblock, E){TagArrayWrite} {
1782 cc_clearSharers;
1783 aT_addTCCToSharers;
1784 dt_deallocateTBE;
1785 plu_popTCCUnblockQueue;
1786 }
1787
1788 transition(BW_E, TCCUnblock_NotValid, E) {
1789 dt_deallocateTBE;
1790 plu_popTCCUnblockQueue;
1791 }
1792
1793 transition(BW_M, TCCUnblock, M) {
1794 c_clearOwner;
1795 cc_clearSharers;
1796 eT_ownerIsUnblocker;
1797 dt_deallocateTBE;
1798 plu_popTCCUnblockQueue;
1799 }
1800
1801 transition(BW_M, TCCUnblock_NotValid, M) {
1802 // Note this transition should only be executed if we received a stale wb
1803 dt_deallocateTBE;
1804 plu_popTCCUnblockQueue;
1805 }
1806
1807 transition(BW_O, TCCUnblock, O) {
1808 c_clearOwner;
1809 eT_ownerIsUnblocker;
1810 dt_deallocateTBE;
1811 plu_popTCCUnblockQueue;
1812 }
1813
1814 transition(BW_O, TCCUnblock_NotValid, O) {
1815 // Note this transition should only be executed if we received a stale wb
1816 dt_deallocateTBE;
1817 plu_popTCCUnblockQueue;
1818 }
1819
1820 // We lost the owner likely do to an invalidation racing with a 'O' wb
1821 transition(BW_O, TCCUnblock_Sharer, S) {
1822 c_clearOwner;
1823 aT_addTCCToSharers;
1824 dt_deallocateTBE;
1825 plu_popTCCUnblockQueue;
1826 }
1827
1828 transition({BW_M, BW_S, BW_E, BW_O}, {PrbInv,PrbInvData,PrbShrData}) {
1829 ww_recycleProbeRequest;
1830 }
1831
1832 transition(BRWD_I, {PrbInvData, PrbInv, PrbShrData}) {
1833 ww_recycleProbeRequest;
1834 }
1835
1836 // Three step process: locally invalidate others, issue CtoD, wait for NB_AckCtoD
1837 transition(S, CtoD, BBS_UM) {TagArrayRead} {
1838 t_allocateTBE;
1839 lpc_probeInvCore;
1840 i2_probeInvL2;
1841 o_checkForAckCompletion;
1842 i_popIncomingRequestQueue;
1843 }
1844
1845 transition(BBS_UM, CPUPrbResp, BBS_UM) {
1846 x_decrementAcks;
1847 o_checkForAckCompletion;
1848 pk_popResponseQueue;
1849 }
1850
1851 transition(BBS_UM, TCCPrbResp) {
1852 x_decrementAcks;
1853 o_checkForAckCompletion;
1854 plr_popTCCResponseQueue;
1855 }
1856
1857 transition(BBS_UM, ProbeAcksComplete, S_M) {
1858 rU_rememberUpgrade;
1859 nM_issueRdBlkM;
1860 pt_popTriggerQueue;
1861 }
1862
1863 // Three step process: locally invalidate others, issue CtoD, wait for NB_AckCtoD
1864 transition(O, CtoD, BBO_UM){TagArrayRead} {
1865 t_allocateTBE;
1866 lpc_probeInvCore;
1867 i2_probeInvL2;
1868 o_checkForAckCompletion;
1869 i_popIncomingRequestQueue;
1870 }
1871
1872 transition(BBO_UM, CPUPrbResp, BBO_UM) {
1873 ruo_rememberUntransferredOwner;
1874 x_decrementAcks;
1875 o_checkForAckCompletion;
1876 pk_popResponseQueue;
1877 }
1878
1879 transition(BBO_UM, TCCPrbResp) {
1880 ruoT_rememberUntransferredOwnerTCC;
1881 x_decrementAcks;
1882 o_checkForAckCompletion;
1883 plr_popTCCResponseQueue;
1884 }
1885
1886 transition(BBO_UM, ProbeAcksComplete, O_M) {
1887 rU_rememberUpgrade;
1888 nM_issueRdBlkM;
1889 pt_popTriggerQueue;
1890 }
1891
1892 transition({S,E}, RdBlkM, BBS_M){TagArrayWrite} {
1893 t_allocateTBE;
1894 ldc_probeInvCoreData;
1895 ld2_probeInvL2Data;
1896 o_checkForAckCompletion;
1897 i_popIncomingRequestQueue;
1898 }
1899
1900 transition(BBS_M, CPUPrbResp) {
1901 ccr_copyCoreResponseToTBE;
1902 rR_removeResponderFromSharers;
1903 x_decrementAcks;
1904 o_checkForAckCompletion;
1905 pk_popResponseQueue;
1906 }
1907
1908 transition(BBS_M, TCCPrbResp) {
1909 ctr_copyTCCResponseToTBE;
1910 x_decrementAcks;
1911 o_checkForAckCompletion;
1912 plr_popTCCResponseQueue;
1913 }
1914
1915 transition(BBS_M, ProbeAcksComplete, S_M) {
1916 nM_issueRdBlkM;
1917 pt_popTriggerQueue;
1918 }
1919
1920 transition(O, RdBlkM, BBO_M){TagArrayRead} {
1921 t_allocateTBE;
1922 ldc_probeInvCoreData;
1923 ld2_probeInvL2Data;
1924 o_checkForAckCompletion;
1925 i_popIncomingRequestQueue;
1926 }
1927
1928 transition(BBO_M, CPUPrbResp) {
1929 ccr_copyCoreResponseToTBE;
1930 rR_removeResponderFromSharers;
1931 x_decrementAcks;
1932 o_checkForAckCompletion;
1933 pk_popResponseQueue;
1934 }
1935
1936 transition(BBO_M, TCCPrbResp) {
1937 ctr_copyTCCResponseToTBE;
1938 x_decrementAcks;
1939 o_checkForAckCompletion;
1940 plr_popTCCResponseQueue;
1941 }
1942
1943 transition(BBO_M, ProbeAcksComplete, O_M) {
1944 nM_issueRdBlkM;
1945 pt_popTriggerQueue;
1946 }
1947
1948 //
1949 transition(M, RdBlkM, BBM_M){TagArrayRead} {
1950 t_allocateTBE;
1951 ldc_probeInvCoreData;
1952 ld2_probeInvL2Data;
1953 i_popIncomingRequestQueue;
1954 }
1955
1956 transition(BBM_M, CPUPrbResp) {
1957 ccr_copyCoreResponseToTBE;
1958 x_decrementAcks;
1959 o_checkForAckCompletion;
1960 pk_popResponseQueue;
1961 }
1962
1963 // TCP recalled block before receiving probe
1964 transition({BBM_M, BBS_M, BBO_M}, {CPUWrite,NoCPUWrite}) {
1965 zz_recycleRequest;
1966 }
1967
1968 transition(BBM_M, TCCPrbResp) {
1969 ctr_copyTCCResponseToTBE;
1970 x_decrementAcks;
1971 o_checkForAckCompletion;
1972 plr_popTCCResponseQueue;
1973 }
1974
1975 transition(BBM_M, ProbeAcksComplete, BB_M) {
1976 sM_sendResponseM;
1977 pt_popTriggerQueue;
1978 }
1979
1980 transition(BB_M, CoreUnblock, M){TagArrayWrite} {
1981 e_ownerIsUnblocker;
1982 dt_deallocateTBE;
1983 j_popIncomingUnblockQueue;
1984 }
1985
1986 transition(M, {RdBlkS, RdBlk}, BBM_O){TagArrayRead} {
1987 t_allocateTBE;
1988 sc_probeShrCoreData;
1989 s2_probeShrL2Data;
1990 i_popIncomingRequestQueue;
1991 }
1992
1993 transition(E, {RdBlkS, RdBlk}, BBM_O){TagArrayRead} {
1994 t_allocateTBE;
1995 eto_moveExSharerToOwner;
1996 sc_probeShrCoreData;
1997 s2_probeShrL2Data;
1998 i_popIncomingRequestQueue;
1999 }
2000
2001 transition(BBM_O, CPUPrbResp) {
2002 ccr_copyCoreResponseToTBE;
2003 x_decrementAcks;
2004 o_checkForAckCompletion;
2005 pk_popResponseQueue;
2006 }
2007 transition(BBM_O, TCCPrbResp) {
2008 ctr_copyTCCResponseToTBE;
2009 x_decrementAcks;
2010 o_checkForAckCompletion;
2011 plr_popTCCResponseQueue;
2012 }
2013 transition(BBM_O, ProbeAcksComplete, BB_O) {
2014 sS_sendResponseS;
2015 pt_popTriggerQueue;
2016 }
2017
2018 transition(BB_O, CoreUnblock, O){TagArrayWrite} {
2019 as_addToSharers;
2020 dt_deallocateTBE;
2021 j_popIncomingUnblockQueue;
2022 }
2023
2024 transition({BBO_O, BBM_M, BBS_S, BBM_O, BB_M, BB_O, BB_S, BBO_UM, BBS_UM, BBS_M, BBO_M, BB_OO}, {PrbInvData, PrbInv,PrbShrData}) {
2025 ww_recycleProbeRequest;
2026 }
2027
2028 transition({BBM_O, BBS_S, CP_S, CP_O, CP_SM, CP_OM, BBO_O}, {CPUWrite,NoCPUWrite}) {
2029 zz_recycleRequest;
2030 }
2031
2032 // stale CtoD raced with external invalidation
2033 transition({I, CP_I, B_I, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, CtoD) {
2034 i_popIncomingRequestQueue;
2035 }
2036
2037 // stale CtoD raced with internal RdBlkM
2038 transition({BBM_M, BBS_M, BBO_M, BBB_M, BBS_UM, BBO_UM}, CtoD) {
2039 i_popIncomingRequestQueue;
2040 }
2041
2042 transition({E, M}, CtoD) {
2043 i_popIncomingRequestQueue;
2044 }
2045
2046
2047 // TCC-directory has sent out (And potentially received acks for) probes.
2048 // TCP/SQC replacement (known to be stale subsequent) are popped off.
2049 transition({BBO_UM, BBS_UM}, {CPUWrite,NoCPUWrite}) {
2050 nC_sendNullWBAckToCore;
2051 i_popIncomingRequestQueue;
2052 }
2053
2054 transition(S_M, {NoCPUWrite, CPUWrite}) {
2055 zz_recycleRequest;
2056 }
2057
2058 transition(O_M, {NoCPUWrite, CPUWrite}) {
2059 zz_recycleRequest;
2060 }
2061
2062
2063 transition({BBM_M, BBS_M, BBO_M, BBO_UM, BBS_UM}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
2064 nT_sendNullWBAckToTCC;
2065 pl_popTCCRequestQueue;
2066 }
2067
2068 transition({CP_S, CP_O, CP_OM, CP_SM}, {VicDirty, VicClean, VicDirtyLast, CancelWB, NoVic}) {
2069 yy_recycleTCCRequestQueue;
2070 }
2071
2072 // However, when TCCdir has sent out PrbSharedData, one cannot ignore.
2073 transition({BBS_S, BBO_O, BBM_O, S_M, O_M, BBB_M, BBB_S, BBB_E}, {VicDirty, VicClean, VicDirtyLast,CancelWB}) {
2074 yy_recycleTCCRequestQueue;
2075 }
2076
2077 transition({BW_S,BW_E,BW_O, BW_M}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
2078 yy_recycleTCCRequestQueue;
2079 }
2080
2081 transition({BW_S,BW_E,BW_O, BW_M}, CancelWB) {
2082 nT_sendNullWBAckToTCC;
2083 pl_popTCCRequestQueue;
2084 }
2085
2086
2087 /// recycle if waiting for unblocks.
2088 transition({BB_M,BB_O,BB_S,BB_OO}, {VicDirty, VicClean, VicDirtyLast,NoVic,CancelWB}) {
2089 yy_recycleTCCRequestQueue;
2090 }
2091
2092 transition({BBS_S, BBO_O}, NoVic) {
2093 rT_removeTCCFromSharers;
2094 nT_sendNullWBAckToTCC;
2095 pl_popTCCRequestQueue;
2096 }
2097
2098 // stale. Pop message and send dummy ack.
2099 transition({I_S, I_ES, I_M}, {VicDirty, VicClean, VicDirtyLast, NoVic}) {
2100 nT_sendNullWBAckToTCC;
2101 pl_popTCCRequestQueue;
2102 }
2103
2104 transition(M, VicDirtyLast, VM_I){TagArrayRead} {
2105 tv_allocateTBE;
2106 vd_victim;
2107 pl_popTCCRequestQueue;
2108 }
2109
2110 transition(E, VicDirty, VM_I){TagArrayRead} {
2111 tv_allocateTBE;
2112 vd_victim;
2113 pl_popTCCRequestQueue;
2114 }
2115
2116 transition(O, VicDirty, VO_S){TagArrayRead} {
2117 tv_allocateTBE;
2118 vd_victim;
2119 pl_popTCCRequestQueue;
2120 }
2121
2122 transition(O, {VicDirtyLast, VicClean}, VO_I){TagArrayRead} {
2123 tv_allocateTBE;
2124 vd_victim;
2125 pl_popTCCRequestQueue;
2126 }
2127
2128 transition({E, S}, VicClean, VES_I){TagArrayRead} {
2129 tv_allocateTBE;
2130 vc_victim;
2131 pl_popTCCRequestQueue;
2132 }
2133
2134 transition({O, S}, NoVic){TagArrayRead} {
2135 rT_removeTCCFromSharers;
2136 nT_sendNullWBAckToTCC;
2137 pl_popTCCRequestQueue;
2138 }
2139
2140 transition({O,S}, NoCPUWrite){TagArrayRead} {
2141 rC_removeCoreFromSharers;
2142 nC_sendNullWBAckToCore;
2143 i_popIncomingRequestQueue;
2144 }
2145
2146 transition({M,E}, NoCPUWrite){TagArrayRead} {
2147 rC_removeCoreFromSharers;
2148 nC_sendNullWBAckToCore;
2149 i_popIncomingRequestQueue;
2150 }
2151
2152 // This can only happen if it is race. (TCCdir sent out probes which caused this cancel in the first place.)
2153 transition({VM_I, VES_I, VO_I}, CancelWB) {
2154 pl_popTCCRequestQueue;
2155 }
2156
2157 transition({VM_I, VES_I, VO_I}, NB_AckWB, I){TagArrayWrite} {
2158 c_clearOwner;
2159 cc_clearSharers;
2160 wb_data;
2161 fw2_forwardWBAck;
2162 dt_deallocateTBE;
2163 dd_deallocateDir;
2164 pR_popResponseFromNBQueue;
2165 }
2166
2167 transition(VO_S, NB_AckWB, S){TagArrayWrite} {
2168 c_clearOwner;
2169 wb_data;
2170 fw2_forwardWBAck;
2171 dt_deallocateTBE;
2172 pR_popResponseFromNBQueue;
2173 }
2174
2175 transition(I_C, NB_AckWB, I){TagArrayWrite} {
2176 c_clearOwner;
2177 cc_clearSharers;
2178 ss_sendStaleNotification;
2179 fw2_forwardWBAck;
2180 dt_deallocateTBE;
2181 dd_deallocateDir;
2182 pR_popResponseFromNBQueue;
2183 }
2184
2185 transition(I_W, NB_AckWB, I) {
2186 ss_sendStaleNotification;
2187 dt_deallocateTBE;
2188 dd_deallocateDir;
2189 pR_popResponseFromNBQueue;
2190 }
2191
2192
2193
2194 // Do not handle replacements, reads of any kind or writebacks from transients; recycle
2195 transition({I_M, I_ES, I_S, MO_I, ES_I, S_M, O_M, VES_I, VO_I, VO_S, VM_I, I_C, I_W}, {RdBlkS,RdBlkM,RdBlk,CtoD}) {
2196 zz_recycleRequest;
2197 }
2198
2199 transition( VO_S, NoCPUWrite) {
2200 zz_recycleRequest;
2201 }
2202
2203 transition({BW_M, BW_S, BW_O, BW_E}, {RdBlkS,RdBlkM,RdBlk,CtoD,NoCPUWrite, CPUWrite}) {
2204 zz_recycleRequest;
2205 }
2206
2207 transition({BBB_M, BBB_S, BBB_E, BB_O, BB_M, BB_S, BB_OO}, { RdBlk, RdBlkS, RdBlkM, CPUWrite, NoCPUWrite}) {
2208 zz_recycleRequest;
2209 }
2210
2211 transition({BBB_S, BBB_E, BB_O, BB_S, BB_OO}, { CtoD}) {
2212 zz_recycleRequest;
2213 }
2214
2215 transition({BBS_UM, BBO_UM, BBM_M, BBM_O, BBS_M, BBO_M}, { RdBlk, RdBlkS, RdBlkM}) {
2216 zz_recycleRequest;
2217 }
2218
2219 transition(BBM_O, CtoD) {
2220 zz_recycleRequest;
2221 }
2222
2223 transition({BBS_S, BBO_O}, {RdBlkM, CtoD}) {
2224 zz_recycleRequest;
2225 }
2226
2227 transition({B_I, CP_I, CP_S, CP_O, CP_OM, CP_SM, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, {RdBlk, RdBlkS, RdBlkM}) {
2228 zz_recycleRequest;
2229 }
2230
2231 transition({CP_O, CP_S, CP_OM}, CtoD) {
2232 zz_recycleRequest;
2233 }
2234
2235 // Ignore replacement related messages after probe got in.
2236 transition({CP_I, B_I, CP_IOM, CP_ISM, CP_OSIW, BRWD_I, BRW_I, BRD_I}, {CPUWrite, NoCPUWrite}) {
2237 zz_recycleRequest;
2238 }
2239
2240 // Ignore replacement related messages after probes processed
2241 transition({I, I_S, I_ES, I_M, I_C, I_W}, {CPUWrite,NoCPUWrite}) {
2242 nC_sendNullWBAckToCore;
2243 i_popIncomingRequestQueue;
2244 }
2245 // cannot ignore cancel... otherwise TCP/SQC will be stuck in I_C
2246 transition({I, I_S, I_ES, I_M, I_C, I_W, S_M, M, O, E, S}, CPUWriteCancel){TagArrayRead} {
2247 nC_sendNullWBAckToCore;
2248 i_popIncomingRequestQueue;
2249 }
2250
2251 transition({CP_I, B_I, CP_IOM, CP_ISM, BRWD_I, BRW_I, BRD_I}, {NoVic, VicClean, VicDirty, VicDirtyLast}){
2252 nT_sendNullWBAckToTCC;
2253 pl_popTCCRequestQueue;
2254 }
2255
2256 // Handling Probes from NB (General process: (1) propagate up, go to blocking state (2) process acks (3) on last ack downward.)
2257
2258 // step 1
2259 transition({M, O, E, S}, PrbInvData, CP_I){TagArrayRead} {
2260 tp_allocateTBE;
2261 dc_probeInvCoreData;
2262 d2_probeInvL2Data;
2263 pp_popProbeQueue;
2264 }
2265 // step 2a
2266 transition(CP_I, CPUPrbResp) {
2267 y_writeDataToTBE;
2268 x_decrementAcks;
2269 o_checkForAckCompletion;
2270 pk_popResponseQueue;
2271 }
2272 // step 2b
2273 transition(CP_I, TCCPrbResp) {
2274 ty_writeTCCDataToTBE;
2275 x_decrementAcks;
2276 o_checkForAckCompletion;
2277 plr_popTCCResponseQueue;
2278 }
2279 // step 3
2280 transition(CP_I, ProbeAcksComplete, I){TagArrayWrite} {
2281 pd_sendProbeResponseData;
2282 c_clearOwner;
2283 cc_clearSharers;
2284 dt_deallocateTBE;
2285 dd_deallocateDir;
2286 pt_popTriggerQueue;
2287 }
2288
2289 // step 1
2290 transition({M, O, E, S}, PrbInv, B_I){TagArrayWrite} {
2291 tp_allocateTBE;
2292 ipc_probeInvCore;
2293 i2_probeInvL2;
2294 pp_popProbeQueue;
2295 }
2296 // step 2
2297 transition(B_I, CPUPrbResp) {
2298 x_decrementAcks;
2299 o_checkForAckCompletion;
2300 pk_popResponseQueue;
2301 }
2302 // step 2b
2303 transition(B_I, TCCPrbResp) {
2304 x_decrementAcks;
2305 o_checkForAckCompletion;
2306 plr_popTCCResponseQueue;
2307 }
2308 // step 3
2309 transition(B_I, ProbeAcksComplete, I){TagArrayWrite} {
2310 // send response down to NB
2311 pi_sendProbeResponseInv;
2312 c_clearOwner;
2313 cc_clearSharers;
2314 dt_deallocateTBE;
2315 dd_deallocateDir;
2316 pt_popTriggerQueue;
2317 }
2318
2319
2320 // step 1
2321 transition({M, O}, PrbShrData, CP_O){TagArrayRead} {
2322 tp_allocateTBE;
2323 sc_probeShrCoreData;
2324 s2_probeShrL2Data;
2325 pp_popProbeQueue;
2326 }
2327
2328 transition(E, PrbShrData, CP_O){TagArrayRead} {
2329 tp_allocateTBE;
2330 eto_moveExSharerToOwner;
2331 sc_probeShrCoreData;
2332 s2_probeShrL2Data;
2333 pp_popProbeQueue;
2334 }
2335 // step 2
2336 transition(CP_O, CPUPrbResp) {
2337 y_writeDataToTBE;
2338 x_decrementAcks;
2339 o_checkForAckCompletion;
2340 pk_popResponseQueue;
2341 }
2342 // step 2b
2343 transition(CP_O, TCCPrbResp) {
2344 ty_writeTCCDataToTBE;
2345 x_decrementAcks;
2346 o_checkForAckCompletion;
2347 plr_popTCCResponseQueue;
2348 }
2349 // step 3
2350 transition(CP_O, ProbeAcksComplete, O){TagArrayWrite} {
2351 // send response down to NB
2352 pd_sendProbeResponseData;
2353 dt_deallocateTBE;
2354 pt_popTriggerQueue;
2355 }
2356
2357 //step 1
2358 transition(S, PrbShrData, CP_S) {
2359 tp_allocateTBE;
2360 sc_probeShrCoreData;
2361 s2_probeShrL2Data;
2362 pp_popProbeQueue;
2363 }
2364 // step 2
2365 transition(CP_S, CPUPrbResp) {
2366 y_writeDataToTBE;
2367 x_decrementAcks;
2368 o_checkForAckCompletion;
2369 pk_popResponseQueue;
2370 }
2371 // step 2b
2372 transition(CP_S, TCCPrbResp) {
2373 ty_writeTCCDataToTBE;
2374 x_decrementAcks;
2375 o_checkForAckCompletion;
2376 plr_popTCCResponseQueue;
2377 }
2378 // step 3
2379 transition(CP_S, ProbeAcksComplete, S) {
2380 // send response down to NB
2381 pd_sendProbeResponseData;
2382 dt_deallocateTBE;
2383 pt_popTriggerQueue;
2384 }
2385
2386 // step 1
2387 transition(O_M, PrbInvData, CP_IOM) {
2388 dc_probeInvCoreData;
2389 d2_probeInvL2Data;
2390 pp_popProbeQueue;
2391 }
2392 // step 2a
2393 transition(CP_IOM, CPUPrbResp) {
2394 y_writeDataToTBE;
2395 x_decrementAcks;
2396 o_checkForAckCompletion;
2397 pk_popResponseQueue;
2398 }
2399 // step 2b
2400 transition(CP_IOM, TCCPrbResp) {
2401 ty_writeTCCDataToTBE;
2402 x_decrementAcks;
2403 o_checkForAckCompletion;
2404 plr_popTCCResponseQueue;
2405 }
2406 // step 3
2407 transition(CP_IOM, ProbeAcksComplete, I_M) {
2408 pdm_sendProbeResponseDataMs;
2409 c_clearOwner;
2410 cc_clearSharers;
2411 cd_clearDirtyBitTBE;
2412 pt_popTriggerQueue;
2413 }
2414
2415 transition(CP_IOM, ProbeAcksCompleteReissue, I){TagArrayWrite} {
2416 pdm_sendProbeResponseDataMs;
2417 c_clearOwner;
2418 cc_clearSharers;
2419 dt_deallocateTBE;
2420 dd_deallocateDir;
2421 pt_popTriggerQueue;
2422 }
2423
2424 // step 1
2425 transition(S_M, PrbInvData, CP_ISM) {
2426 dc_probeInvCoreData;
2427 d2_probeInvL2Data;
2428 o_checkForAckCompletion;
2429 pp_popProbeQueue;
2430 }
2431 // step 2a
2432 transition(CP_ISM, CPUPrbResp) {
2433 y_writeDataToTBE;
2434 x_decrementAcks;
2435 o_checkForAckCompletion;
2436 pk_popResponseQueue;
2437 }
2438 // step 2b
2439 transition(CP_ISM, TCCPrbResp) {
2440 ty_writeTCCDataToTBE;
2441 x_decrementAcks;
2442 o_checkForAckCompletion;
2443 plr_popTCCResponseQueue;
2444 }
2445 // step 3
2446 transition(CP_ISM, ProbeAcksComplete, I_M) {
2447 pdm_sendProbeResponseDataMs;
2448 c_clearOwner;
2449 cc_clearSharers;
2450 cd_clearDirtyBitTBE;
2451
2452 //dt_deallocateTBE;
2453 pt_popTriggerQueue;
2454 }
2455 transition(CP_ISM, ProbeAcksCompleteReissue, I){TagArrayWrite} {
2456 pim_sendProbeResponseInvMs;
2457 c_clearOwner;
2458 cc_clearSharers;
2459 dt_deallocateTBE;
2460 dd_deallocateDir;
2461 pt_popTriggerQueue;
2462 }
2463
2464 // step 1
2465 transition({S_M, O_M}, {PrbInv}, CP_ISM) {
2466 dc_probeInvCoreData;
2467 d2_probeInvL2Data;
2468 pp_popProbeQueue;
2469 }
2470 // next steps inherited from BS_ISM
2471
2472 // Simpler cases
2473
2474 transition({I_C, I_W}, {PrbInvData, PrbInv, PrbShrData}) {
2475 pi_sendProbeResponseInv;
2476 pp_popProbeQueue;
2477 }
2478
2479 //If the directory is certain that the block is not present, one can send an acknowledgement right away.
2480 // No need for three step process.
2481 transition(I, {PrbInv,PrbShrData,PrbInvData}){TagArrayRead} {
2482 pi_sendProbeResponseInv;
2483 pp_popProbeQueue;
2484 }
2485
2486 transition({I_M, I_ES, I_S}, {PrbInv, PrbInvData}) {
2487 pi_sendProbeResponseInv;
2488 pp_popProbeQueue;
2489 }
2490
2491 transition({I_M, I_ES, I_S}, PrbShrData) {
2492 prm_sendProbeResponseMiss;
2493 pp_popProbeQueue;
2494 }
2495
2496 //step 1
2497 transition(S_M, PrbShrData, CP_SM) {
2498 sc_probeShrCoreData;
2499 s2_probeShrL2Data;
2500 o_checkForAckCompletion;
2501 pp_popProbeQueue;
2502 }
2503 // step 2
2504 transition(CP_SM, CPUPrbResp) {
2505 y_writeDataToTBE;
2506 x_decrementAcks;
2507 o_checkForAckCompletion;
2508 pk_popResponseQueue;
2509 }
2510 // step 2b
2511 transition(CP_SM, TCCPrbResp) {
2512 ty_writeTCCDataToTBE;
2513 x_decrementAcks;
2514 o_checkForAckCompletion;
2515 plr_popTCCResponseQueue;
2516 }
2517 // step 3
2518 transition(CP_SM, {ProbeAcksComplete,ProbeAcksCompleteReissue}, S_M){DataArrayRead} {
2519 // send response down to NB
2520 pd_sendProbeResponseData;
2521 pt_popTriggerQueue;
2522 }
2523
2524 //step 1
2525 transition(O_M, PrbShrData, CP_OM) {
2526 sc_probeShrCoreData;
2527 s2_probeShrL2Data;
2528 pp_popProbeQueue;
2529 }
2530 // step 2
2531 transition(CP_OM, CPUPrbResp) {
2532 y_writeDataToTBE;
2533 x_decrementAcks;
2534 o_checkForAckCompletion;
2535 pk_popResponseQueue;
2536 }
2537 // step 2b
2538 transition(CP_OM, TCCPrbResp) {
2539 ty_writeTCCDataToTBE;
2540 x_decrementAcks;
2541 o_checkForAckCompletion;
2542 plr_popTCCResponseQueue;
2543 }
2544 // step 3
2545 transition(CP_OM, {ProbeAcksComplete,ProbeAcksCompleteReissue}, O_M) {
2546 // send response down to NB
2547 pd_sendProbeResponseData;
2548 pt_popTriggerQueue;
2549 }
2550
2551 transition(BRW_I, PrbInvData, I_W) {
2552 pd_sendProbeResponseData;
2553 pp_popProbeQueue;
2554 }
2555
2556 transition({VM_I,VO_I}, PrbInvData, I_C) {
2557 pd_sendProbeResponseData;
2558 pp_popProbeQueue;
2559 }
2560
2561 transition(VES_I, {PrbInvData,PrbInv}, I_C) {
2562 pi_sendProbeResponseInv;
2563 pp_popProbeQueue;
2564 }
2565
2566 transition({VM_I, VO_I, BRW_I}, PrbInv, I_W) {
2567 pi_sendProbeResponseInv;
2568 pp_popProbeQueue;
2569 }
2570
2571 transition({VM_I, VO_I, VO_S, VES_I, BRW_I}, PrbShrData) {
2572 pd_sendProbeResponseData;
2573 sf_setSharedFlip;
2574 pp_popProbeQueue;
2575 }
2576
2577 transition(VO_S, PrbInvData, CP_OSIW) {
2578 dc_probeInvCoreData;
2579 d2_probeInvL2Data;
2580 pp_popProbeQueue;
2581 }
2582
2583 transition(CP_OSIW, TCCPrbResp) {
2584 x_decrementAcks;
2585 o_checkForAckCompletion;
2586 plr_popTCCResponseQueue;
2587 }
2588 transition(CP_OSIW, CPUPrbResp) {
2589 x_decrementAcks;
2590 o_checkForAckCompletion;
2591 pk_popResponseQueue;
2592 }
2593
2594 transition(CP_OSIW, ProbeAcksComplete, I_C) {
2595 pd_sendProbeResponseData;
2596 cd_clearDirtyBitTBE;
2597 pt_popTriggerQueue;
2598 }
2599
2600 transition({I, S, E, O, M, CP_O, CP_S, CP_OM, CP_SM, CP_OSIW, BW_S, BW_E, BW_O, BW_M, I_M, I_ES, I_S, BBS_S, BBO_O, BBM_M, BBM_O, BB_M, BB_O, BB_OO, BB_S, BBS_M, BBO_M, BBO_UM, BBS_UM, S_M, O_M, BBB_S, BBB_M, BBB_E, VES_I, VM_I, VO_I, VO_S, ES_I, MO_I, I_C, I_W}, StaleVic) {
2601 nT_sendNullWBAckToTCC;
2602 pl_popTCCRequestQueue;
2603 }
2604
2605 transition({CP_I, B_I, CP_IOM, CP_ISM, BRWD_I, BRW_I, BRD_I}, StaleVic) {
2606 nT_sendNullWBAckToTCC;
2607 pl_popTCCRequestQueue;
2608 }
2609
2610 // Recall Transistions
2611 // transient states still require the directory state
2612 transition({M, O}, Recall, BRWD_I) {
2613 tr_allocateTBE;
2614 vd_victim;
2615 dc_probeInvCoreData;
2616 d2_probeInvL2Data;
2617 }
2618
2619 transition({E, S}, Recall, BRWD_I) {
2620 tr_allocateTBE;
2621 vc_victim;
2622 dc_probeInvCoreData;
2623 d2_probeInvL2Data;
2624 }
2625
2626 transition(I, Recall) {
2627 dd_deallocateDir;
2628 }
2629
2630 transition({BRWD_I, BRD_I}, CPUPrbResp) {
2631 y_writeDataToTBE;
2632 x_decrementAcks;
2633 o_checkForAckCompletion;
2634 pk_popResponseQueue;
2635 }
2636
2637 transition({BRWD_I, BRD_I}, TCCPrbResp) {
2638 ty_writeTCCDataToTBE;
2639 x_decrementAcks;
2640 o_checkForAckCompletion;
2641 plr_popTCCResponseQueue;
2642 }
2643
2644 transition(BRWD_I, NB_AckWB, BRD_I) {
2645 pR_popResponseFromNBQueue;
2646 }
2647
2648 transition(BRWD_I, ProbeAcksComplete, BRW_I) {
2649 pt_popTriggerQueue;
2650 }
2651
2652 transition(BRW_I, NB_AckWB, I) {
2653 wb_data;
2654 dt_deallocateTBE;
2655 dd_deallocateDir;
2656 pR_popResponseFromNBQueue;
2657 }
2658
2659 transition(BRD_I, ProbeAcksComplete, I) {
2660 wb_data;
2661 dt_deallocateTBE;
2662 dd_deallocateDir;
2663 pt_popTriggerQueue;
2664 }
2665
2666 // wait for stable state for Recall
2667 transition({BRWD_I,BRD_I,BRW_I,CP_O, CP_S, CP_OM, CP_SM, CP_OSIW, BW_S, BW_E, BW_O, BW_M, I_M, I_ES, I_S, BBS_S, BBO_O, BBM_M, BBM_O, BB_M, BB_O, BB_OO, BB_S, BBS_M, BBO_M, BBO_UM, BBS_UM, S_M, O_M, BBB_S, BBB_M, BBB_E, VES_I, VM_I, VO_I, VO_S, ES_I, MO_I, I_C, I_W, CP_I}, Recall) {
2668 zz_recycleRequest; // stall and wait would be for the wrong address
2669 ut_updateTag; // try to find an easier recall
2670 }
2671
2672 }