mem-cache: Create an address aware TempCacheBlk
[gem5.git] / src / mem / protocol / MOESI_AMD_Base-RegionDir.sm
1 /*
2 * Copyright (c) 2012-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Authors: Jason Power
34 */
35
36 machine(MachineType:RegionDir, "Region Directory for AMD_Base-like protocol")
37 : CacheMemory *cacheMemory; // stores only region addresses. Must set block size same as below
38 NodeID cpuRegionBufferNum;
39 NodeID gpuRegionBufferNum;
40 int blocksPerRegion := 64; // 4k regions
41 Cycles toDirLatency := 10; // Latency to fwd requests and send invs to directory
42 bool always_migrate := "False";
43 bool sym_migrate := "False";
44 bool asym_migrate := "False";
45 bool noTCCdir := "False";
46 int TCC_select_num_bits := 1;
47
48 // To the directory
49 MessageBuffer * requestToDir, network="To", virtual_network="5", vnet_type="request";
50
51 // To the region buffers
52 MessageBuffer * notifyToRBuffer, network="To", virtual_network="7", vnet_type="request";
53 MessageBuffer * probeToRBuffer, network="To", virtual_network="8", vnet_type="request";
54
55 // From the region buffers
56 MessageBuffer * responseFromRBuffer, network="From", virtual_network="2", vnet_type="response";
57 MessageBuffer * requestFromRegBuf, network="From", virtual_network="0", vnet_type="request";
58
59 MessageBuffer * triggerQueue;
60 {
61
62 // States
63 state_declaration(State, desc="Region states", default="RegionDir_State_NP") {
64 NP, AccessPermission:Invalid, desc="Not present in region directory";
65 P, AccessPermission:Invalid, desc="Region is private to owner";
66 S, AccessPermission:Invalid, desc="Region is shared between CPU and GPU";
67
68 P_NP, AccessPermission:Invalid, desc="Evicting the region";
69 NP_P, AccessPermission:Invalid, desc="Must wait for ack from R-buf";
70 NP_S, AccessPermission:Invalid, desc="Must wait for ack from R-buf";
71 P_P, AccessPermission:Invalid, desc="Waiting for ack from R-buf";
72 S_S, AccessPermission:Invalid, desc="Waiting for ack from R-buf";
73 P_S, AccessPermission:Invalid, desc="Downgrading the region";
74 S_P, AccessPermission:Invalid, desc="Upgrading the region";
75 P_AS, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
76 S_AP, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
77 P_AP, AccessPermission:Invalid, desc="Sent invalidates, waiting for acks";
78
79 SP_NP_W, AccessPermission:Invalid, desc="Last sharer writing back, waiting for ack";
80 S_W, AccessPermission:Invalid, desc="Sharer writing back, waiting for ack";
81
82 P_AP_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
83 P_AS_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
84 S_AP_W, AccessPermission:Invalid, desc="Fwded request to dir, waiting for ack";
85 }
86
87 enumeration(Event, desc="Region directory events") {
88 SendInv, desc="Send inv message to any machine that has a region buffer";
89 SendUpgrade, desc="Send upgrade message to any machine that has a region buffer";
90 SendDowngrade, desc="Send downgrade message to any machine that has a region buffer";
91
92 Evict, desc="Evict this region";
93
94 UpgradeRequest, desc="Request from r-buf for an upgrade";
95 SharedRequest, desc="Request from r-buf for read";
96 PrivateRequest, desc="Request from r-buf for write";
97
98 InvAckCore, desc="Ack from region buffer to order the invalidate";
99 InvAckCoreNoShare, desc="Ack from region buffer to order the invalidate, and it does not have the region";
100 CPUPrivateAck, desc="Ack from region buffer to order private notification";
101
102 LastAck, desc="Done eviciting all the blocks";
103
104 StaleCleanWbRequest, desc="stale clean writeback reqeust";
105 StaleCleanWbRequestNoShare, desc="stale clean wb req from a cache which should be removed from sharers";
106 CleanWbRequest, desc="clean writeback reqeust, multiple sharers";
107 CleanWbRequest_LastSharer, desc="clean writeback reqeust, last sharer";
108 WritebackAck, desc="Writeback Ack from region buffer";
109 DirReadyAck, desc="Directory is ready, waiting Ack from region buffer";
110
111 TriggerInv, desc="trigger invalidate message";
112 TriggerDowngrade, desc="trigger downgrade message";
113 }
114
115 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
116 DataArrayRead, desc="Read the data array";
117 DataArrayWrite, desc="Write the data array";
118 TagArrayRead, desc="Read the data array";
119 TagArrayWrite, desc="Write the data array";
120 }
121
122 structure(BoolVec, external="yes") {
123 bool at(int);
124 void resize(int);
125 void clear();
126 }
127
128 structure(Entry, desc="Region entry", interface="AbstractCacheEntry") {
129 Addr addr, desc="Base address of this region";
130 NetDest Sharers, desc="Set of machines that are sharing, but not owners";
131 State RegionState, desc="Region state";
132 DataBlock DataBlk, desc="Data for the block (always empty in region dir)";
133 MachineID Owner, desc="Machine which owns all blocks in this region";
134 Cycles ProbeStart, desc="Time when the first probe request was issued";
135 bool LastWriten, default="false", desc="The last time someone accessed this region, it wrote it";
136 bool LastWritenByCpu, default="false", desc="The last time the CPU accessed this region, it wrote it";
137 bool LastWritenByGpu, default="false", desc="The last time the GPU accessed this region, it wrote it";
138 }
139
140 structure(TBE, desc="...") {
141 State TBEState, desc="Transient state";
142 MachineID Owner, desc="Machine which owns all blocks in this region";
143 NetDest Sharers, desc="Set of machines to send evicts";
144 int NumValidBlocks, desc="Number of blocks valid so we don't have to count a BoolVec";
145 bool AllAcksReceived, desc="Got all necessary acks from dir";
146 CoherenceRequestType MsgType, desc="Msg type for the evicts could be inv or dwngrd";
147 Cycles ProbeRequestTime, default="Cycles(0)", desc="Start of probe request";
148 Cycles InitialRequestTime, default="Cycles(0)", desc="To forward back on out msg";
149 Addr DemandAddress, desc="Demand address from original request";
150 uint64_t probe_id, desc="probe id for lifetime profiling";
151 }
152
153 structure(TBETable, external="yes") {
154 TBE lookup(Addr);
155 void allocate(Addr);
156 void deallocate(Addr);
157 bool isPresent(Addr);
158 }
159
160 // Stores only region addresses
161 TBETable TBEs, template="<RegionDir_TBE>", constructor="m_number_of_TBEs";
162 int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
163
164 Tick clockEdge();
165 Tick cyclesToTicks(Cycles c);
166
167 void set_cache_entry(AbstractCacheEntry b);
168 void unset_cache_entry();
169 void set_tbe(TBE b);
170 void unset_tbe();
171 void wakeUpAllBuffers();
172 void wakeUpBuffers(Addr a);
173 Cycles curCycle();
174 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
175
176 int blockBits, default="RubySystem::getBlockSizeBits()";
177 int blockBytes, default="RubySystem::getBlockSizeBytes()";
178 int regionBits, default="log2(m_blocksPerRegion)";
179
180 // Functions
181
182 MachineID getCoreMachine(MachineID rBuf, Addr address) {
183 if (machineIDToNodeID(rBuf) == cpuRegionBufferNum) {
184 return createMachineID(MachineType:CorePair, intToID(0));
185 } else if (machineIDToNodeID(rBuf) == gpuRegionBufferNum) {
186 if (noTCCdir) {
187 return mapAddressToRange(address,MachineType:TCC,
188 TCC_select_low_bit, TCC_select_num_bits);
189 } else {
190 return createMachineID(MachineType:TCCdir, intToID(0));
191 }
192 } else {
193 error("Unexpected region buffer number");
194 }
195 }
196
197 bool isCpuMachine(MachineID rBuf) {
198 if (machineIDToNodeID(rBuf) == cpuRegionBufferNum) {
199 return true;
200 } else if (machineIDToNodeID(rBuf) == gpuRegionBufferNum) {
201 return false;
202 } else {
203 error("Unexpected region buffer number");
204 }
205 }
206
207 bool symMigrate(Entry cache_entry) {
208 return cache_entry.LastWriten;
209 }
210
211 bool asymMigrate(Entry cache_entry, MachineID requestor) {
212 if (isCpuMachine(requestor)) {
213 return cache_entry.LastWritenByCpu;
214 } else {
215 return cache_entry.LastWritenByGpu;
216 }
217 }
218
219 int getRegionOffset(Addr addr) {
220 if (blocksPerRegion > 1) {
221 Addr offset := bitSelect(addr, blockBits, regionBits+blockBits-1);
222 int ret := addressToInt(offset);
223 assert(ret < blocksPerRegion);
224 return ret;
225 } else {
226 return 0;
227 }
228 }
229
230 Addr getRegionBase(Addr addr) {
231 return maskLowOrderBits(addr, blockBits+regionBits);
232 }
233
234 Addr getNextBlock(Addr addr) {
235 Addr a := addr;
236 makeNextStrideAddress(a, 1);
237 return a;
238 }
239
240 bool presentOrAvail(Addr addr) {
241 DPRINTF(RubySlicc, "Present? %s, avail? %s\n", cacheMemory.isTagPresent(getRegionBase(addr)), cacheMemory.cacheAvail(getRegionBase(addr)));
242 return cacheMemory.isTagPresent(getRegionBase(addr)) || cacheMemory.cacheAvail(getRegionBase(addr));
243 }
244
245 // Returns a region entry!
246 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
247 return static_cast(Entry, "pointer", cacheMemory.lookup(getRegionBase(addr)));
248 }
249
250 TBE getTBE(Addr addr), return_by_pointer="yes" {
251 return TBEs.lookup(getRegionBase(addr));
252 }
253
254 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
255 return getCacheEntry(getRegionBase(addr)).DataBlk;
256 }
257
258 State getState(TBE tbe, Entry cache_entry, Addr addr) {
259 if (is_valid(tbe)) {
260 return tbe.TBEState;
261 } else if (is_valid(cache_entry)) {
262 return cache_entry.RegionState;
263 }
264 return State:NP;
265 }
266
267 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
268 if (is_valid(tbe)) {
269 tbe.TBEState := state;
270 }
271 if (is_valid(cache_entry)) {
272 cache_entry.RegionState := state;
273 }
274 }
275
276 AccessPermission getAccessPermission(Addr addr) {
277 TBE tbe := getTBE(addr);
278 if(is_valid(tbe)) {
279 return RegionDir_State_to_permission(tbe.TBEState);
280 }
281 Entry cache_entry := getCacheEntry(addr);
282 if(is_valid(cache_entry)) {
283 return RegionDir_State_to_permission(cache_entry.RegionState);
284 }
285 return AccessPermission:NotPresent;
286 }
287
288 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
289 if (is_valid(cache_entry)) {
290 cache_entry.changePermission(RegionDir_State_to_permission(state));
291 }
292 }
293
294 void functionalRead(Addr addr, Packet *pkt) {
295 functionalMemoryRead(pkt);
296 }
297
298 int functionalWrite(Addr addr, Packet *pkt) {
299 if (functionalMemoryWrite(pkt)) {
300 return 1;
301 } else {
302 return 0;
303 }
304 }
305
306 void recordRequestType(RequestType request_type, Addr addr) {
307 if (request_type == RequestType:DataArrayRead) {
308 cacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr);
309 } else if (request_type == RequestType:DataArrayWrite) {
310 cacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr);
311 } else if (request_type == RequestType:TagArrayRead) {
312 cacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
313 } else if (request_type == RequestType:TagArrayWrite) {
314 cacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
315 }
316 }
317
318 bool checkResourceAvailable(RequestType request_type, Addr addr) {
319 if (request_type == RequestType:DataArrayRead) {
320 return cacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
321 } else if (request_type == RequestType:DataArrayWrite) {
322 return cacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr);
323 } else if (request_type == RequestType:TagArrayRead) {
324 return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
325 } else if (request_type == RequestType:TagArrayWrite) {
326 return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
327 } else {
328 error("Invalid RequestType type in checkResourceAvailable");
329 return true;
330 }
331 }
332
333 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
334
335 out_port(requestNetwork_out, CPURequestMsg, requestToDir);
336 out_port(notifyNetwork_out, CPURequestMsg, notifyToRBuffer);
337 out_port(probeNetwork_out, NBProbeRequestMsg, probeToRBuffer);
338
339 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=2) {
340 if (triggerQueue_in.isReady(clockEdge())) {
341 peek(triggerQueue_in, TriggerMsg) {
342 assert(in_msg.addr == getRegionBase(in_msg.addr));
343 Entry cache_entry := getCacheEntry(in_msg.addr);
344 TBE tbe := getTBE(in_msg.addr);
345 DPRINTF(RubySlicc, "trigger msg: %s (%s)\n", in_msg, getRegionBase(in_msg.addr));
346 if (in_msg.Type == TriggerType:AcksComplete) {
347 assert(is_valid(tbe));
348 trigger(Event:LastAck, in_msg.addr, cache_entry, tbe);
349 } else if (in_msg.Type == TriggerType:InvRegion) {
350 assert(is_valid(tbe));
351 trigger(Event:TriggerInv, in_msg.addr, cache_entry, tbe);
352 } else if (in_msg.Type == TriggerType:DowngradeRegion) {
353 assert(is_valid(tbe));
354 trigger(Event:TriggerDowngrade, in_msg.addr, cache_entry, tbe);
355 } else {
356 error("Unknown trigger message");
357 }
358 }
359 }
360 }
361
362 in_port(responseNetwork_in, ResponseMsg, responseFromRBuffer, rank=1) {
363 if (responseNetwork_in.isReady(clockEdge())) {
364 peek(responseNetwork_in, ResponseMsg) {
365 TBE tbe := getTBE(in_msg.addr);
366 Entry cache_entry := getCacheEntry(in_msg.addr);
367 if (in_msg.Type == CoherenceResponseType:CPUPrbResp) {
368 assert(in_msg.addr == getRegionBase(in_msg.addr));
369 assert(is_valid(tbe));
370 if (in_msg.NotCached) {
371 trigger(Event:InvAckCoreNoShare, in_msg.addr, cache_entry, tbe);
372 } else {
373 trigger(Event:InvAckCore, in_msg.addr, cache_entry, tbe);
374 }
375 } else if (in_msg.Type == CoherenceResponseType:PrivateAck) {
376 assert(in_msg.addr == getRegionBase(in_msg.addr));
377 assert(is_valid(cache_entry));
378 //Fix Me...add back in: assert(cache_entry.Sharers.isElement(in_msg.Sender));
379 trigger(Event:CPUPrivateAck, in_msg.addr, cache_entry, tbe);
380 } else if (in_msg.Type == CoherenceResponseType:RegionWbAck) {
381 //Fix Me...add back in: assert(cache_entry.Sharers.isElement(in_msg.Sender) == false);
382 assert(in_msg.addr == getRegionBase(in_msg.addr));
383 trigger(Event:WritebackAck, in_msg.addr, cache_entry, tbe);
384 } else if (in_msg.Type == CoherenceResponseType:DirReadyAck) {
385 assert(is_valid(tbe));
386 trigger(Event:DirReadyAck, getRegionBase(in_msg.addr), cache_entry, tbe);
387 } else {
388 error("Invalid response type");
389 }
390 }
391 }
392 }
393
394 // In from cores
395 // NOTE: We get the cache / TBE entry based on the region address,
396 // but pass the block address to the actions
397 in_port(requestNetwork_in, CPURequestMsg, requestFromRegBuf, rank=0) {
398 if (requestNetwork_in.isReady(clockEdge())) {
399 peek(requestNetwork_in, CPURequestMsg) {
400 //assert(in_msg.addr == getRegionBase(in_msg.addr));
401 Addr address := getRegionBase(in_msg.addr);
402 DPRINTF(RubySlicc, "Got %s, base %s\n", in_msg.addr, address);
403 if (presentOrAvail(address)) {
404 TBE tbe := getTBE(address);
405 Entry cache_entry := getCacheEntry(address);
406 if (in_msg.Type == CoherenceRequestType:PrivateRequest) {
407 if (is_valid(cache_entry) && (cache_entry.Owner != in_msg.Requestor ||
408 getState(tbe, cache_entry, address) == State:S)) {
409 trigger(Event:SendInv, address, cache_entry, tbe);
410 } else {
411 trigger(Event:PrivateRequest, address, cache_entry, tbe);
412 }
413 } else if (in_msg.Type == CoherenceRequestType:SharedRequest) {
414 if (is_invalid(cache_entry)) {
415 // If no one has ever requested this region give private permissions
416 trigger(Event:PrivateRequest, address, cache_entry, tbe);
417 } else {
418 if (always_migrate ||
419 (sym_migrate && symMigrate(cache_entry)) ||
420 (asym_migrate && asymMigrate(cache_entry, in_msg.Requestor))) {
421 if (cache_entry.Sharers.count() == 1 &&
422 cache_entry.Sharers.isElement(in_msg.Requestor)) {
423 trigger(Event:UpgradeRequest, address, cache_entry, tbe);
424 } else {
425 trigger(Event:SendInv, address, cache_entry, tbe);
426 }
427 } else { // don't migrate
428 if(cache_entry.Sharers.isElement(in_msg.Requestor) ||
429 getState(tbe, cache_entry, address) == State:S) {
430 trigger(Event:SharedRequest, address, cache_entry, tbe);
431 } else {
432 trigger(Event:SendDowngrade, address, cache_entry, tbe);
433 }
434 }
435 }
436 } else if (in_msg.Type == CoherenceRequestType:UpgradeRequest) {
437 if (is_invalid(cache_entry)) {
438 trigger(Event:PrivateRequest, address, cache_entry, tbe);
439 } else if (cache_entry.Sharers.count() == 1 && cache_entry.Sharers.isElement(in_msg.Requestor)) {
440 trigger(Event:UpgradeRequest, address, cache_entry, tbe);
441 } else {
442 trigger(Event:SendUpgrade, address, cache_entry, tbe);
443 }
444 } else if (in_msg.Type == CoherenceRequestType:CleanWbRequest) {
445 if (is_invalid(cache_entry) || cache_entry.Sharers.isElement(in_msg.Requestor) == false) {
446 trigger(Event:StaleCleanWbRequest, address, cache_entry, tbe);
447 } else {
448 DPRINTF(RubySlicc, "wb address %s(%s) owner %s sharers %s requestor %s %d %d\n", in_msg.addr, getRegionBase(in_msg.addr), cache_entry.Owner, cache_entry.Sharers, in_msg.Requestor, cache_entry.Sharers.isElement(in_msg.Requestor), cache_entry.Sharers.count());
449 if (cache_entry.Sharers.isElement(in_msg.Requestor) && cache_entry.Sharers.count() == 1) {
450 DPRINTF(RubySlicc, "last wb\n");
451 trigger(Event:CleanWbRequest_LastSharer, address, cache_entry, tbe);
452 } else {
453 DPRINTF(RubySlicc, "clean wb\n");
454 trigger(Event:CleanWbRequest, address, cache_entry, tbe);
455 }
456 }
457 } else {
458 error("unknown region dir request type");
459 }
460 } else {
461 Addr victim := cacheMemory.cacheProbe(getRegionBase(in_msg.addr));
462 TBE victim_tbe := getTBE(victim);
463 Entry victim_entry := getCacheEntry(victim);
464 DPRINTF(RubySlicc, "Evicting address %s for new region at address %s(%s)\n", victim, in_msg.addr, getRegionBase(in_msg.addr));
465 assert(is_valid(victim_entry));
466 trigger(Event:Evict, victim, victim_entry, victim_tbe);
467 }
468 }
469 }
470 }
471
472 // Actions
473
474 action(f_fwdReqToDir, "f", desc="Forward CPU request to directory") {
475 peek(requestNetwork_in, CPURequestMsg) {
476 enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
477 out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
478 out_msg.Type := in_msg.OriginalType;
479 out_msg.DataBlk := in_msg.DataBlk;
480 out_msg.Dirty := in_msg.Dirty;
481 out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
482 out_msg.WTRequestor := in_msg.WTRequestor;
483 out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
484 out_msg.Shared := in_msg.Shared;
485 out_msg.MessageSize := in_msg.MessageSize;
486 out_msg.Private := in_msg.Private;
487 out_msg.NoAckNeeded := true;
488 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
489 out_msg.ProbeRequestStartTime := curCycle();
490 out_msg.DemandRequest := true;
491 if (is_valid(cache_entry) && getState(tbe, cache_entry, address) != State:S) {
492 out_msg.Acks := cache_entry.Sharers.count();
493 } else {
494 out_msg.Acks := 0;
495 }
496 }
497 }
498 }
499
500 action(f_fwdReqToDirShared, "fs", desc="Forward CPU request to directory (shared)") {
501 peek(requestNetwork_in, CPURequestMsg) {
502 enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
503 out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
504 out_msg.Type := in_msg.OriginalType;
505 out_msg.DataBlk := in_msg.DataBlk;
506 out_msg.Dirty := in_msg.Dirty;
507 out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
508 out_msg.WTRequestor := in_msg.WTRequestor;
509 out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
510 out_msg.Shared := in_msg.Shared;
511 out_msg.MessageSize := in_msg.MessageSize;
512 out_msg.Private := in_msg.Private;
513 out_msg.NoAckNeeded := true;
514 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
515 out_msg.ProbeRequestStartTime := curCycle();
516 out_msg.DemandRequest := true;
517 out_msg.ForceShared := true;
518 if (is_valid(cache_entry) && getState(tbe, cache_entry, address) != State:S) {
519 out_msg.Acks := cache_entry.Sharers.count();
520 } else {
521 out_msg.Acks := 0;
522 }
523 }
524 }
525 }
526
527 action(f_fwdReqToDirWithAck, "fa", desc="Forward CPU request to directory with ack request") {
528 peek(requestNetwork_in, CPURequestMsg) {
529 enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
530 out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
531 out_msg.Type := in_msg.OriginalType;
532 out_msg.DataBlk := in_msg.DataBlk;
533 out_msg.Dirty := in_msg.Dirty;
534 out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
535 out_msg.WTRequestor := in_msg.WTRequestor;
536 out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
537 out_msg.Shared := in_msg.Shared;
538 out_msg.MessageSize := in_msg.MessageSize;
539 out_msg.Private := in_msg.Private;
540 out_msg.NoAckNeeded := false;
541 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
542 out_msg.ProbeRequestStartTime := curCycle();
543 out_msg.DemandRequest := true;
544 if (is_valid(cache_entry)) {
545 out_msg.Acks := cache_entry.Sharers.count();
546 // Don't need an ack from the requestor!
547 if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
548 out_msg.Acks := out_msg.Acks - 1;
549 }
550 } else {
551 out_msg.Acks := 0;
552 }
553 }
554 }
555 }
556
557 action(f_fwdReqToDirWithAckShared, "fas", desc="Forward CPU request to directory with ack request") {
558 peek(requestNetwork_in, CPURequestMsg) {
559 enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
560 out_msg.addr := in_msg.addr; // This is the block address. "address" is the region address
561 out_msg.Type := in_msg.OriginalType;
562 out_msg.DataBlk := in_msg.DataBlk;
563 out_msg.Dirty := in_msg.Dirty;
564 out_msg.Requestor := getCoreMachine(in_msg.Requestor,address);
565 out_msg.WTRequestor := in_msg.WTRequestor;
566 out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
567 out_msg.Shared := in_msg.Shared;
568 out_msg.MessageSize := in_msg.MessageSize;
569 out_msg.Private := in_msg.Private;
570 out_msg.NoAckNeeded := false;
571 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
572 out_msg.ProbeRequestStartTime := curCycle();
573 out_msg.DemandRequest := true;
574 out_msg.ForceShared := true;
575 if (is_valid(cache_entry)) {
576 out_msg.Acks := cache_entry.Sharers.count();
577 // Don't need an ack from the requestor!
578 if (cache_entry.Sharers.isElement(in_msg.Requestor)) {
579 out_msg.Acks := out_msg.Acks - 1;
580 }
581 } else {
582 out_msg.Acks := 0;
583 }
584 }
585 }
586 }
587
588 action(a_allocateRegionEntry, "a", desc="Allocate a new entry") {
589 set_cache_entry(cacheMemory.allocate(getRegionBase(address), new Entry));
590 peek(requestNetwork_in, CPURequestMsg) {
591 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
592 }
593 }
594
595 action(d_deallocateRegionEntry, "d", desc="Deallocate region entry") {
596 cacheMemory.deallocate(getRegionBase(address));
597 unset_cache_entry();
598 }
599
600 action(ra_receiveAck, "ra", desc="Mark TBE entry as received this ack") {
601 //assert(tbe.ValidBlocks.at(getRegionOffset(address)));
602 DPRINTF(RubySlicc, "received ack for %s reg: %s\n", address, getRegionBase(address));
603 tbe.NumValidBlocks := tbe.NumValidBlocks - 1;
604 assert(tbe.NumValidBlocks >= 0);
605 if (tbe.NumValidBlocks == 0) {
606 tbe.AllAcksReceived := true;
607 enqueue(triggerQueue_out, TriggerMsg, 1) {
608 out_msg.Type := TriggerType:AcksComplete;
609 out_msg.addr := address;
610 }
611 }
612 APPEND_TRANSITION_COMMENT(getRegionBase(address));
613 APPEND_TRANSITION_COMMENT(" Acks left receive ");
614 APPEND_TRANSITION_COMMENT(tbe.NumValidBlocks);
615 }
616
617 action(ca_checkAcks, "ca", desc="Check to see if we need more acks") {
618 if (tbe.NumValidBlocks == 0) {
619 tbe.AllAcksReceived := true;
620 enqueue(triggerQueue_out, TriggerMsg, 1) {
621 out_msg.Type := TriggerType:AcksComplete;
622 out_msg.addr := address;
623 }
624 }
625 }
626
627 action(ti_triggerInv, "ti", desc="") {
628 enqueue(triggerQueue_out, TriggerMsg, 1) {
629 out_msg.Type := TriggerType:InvRegion;
630 out_msg.addr := address;
631 }
632 }
633
634 action(td_triggerDowngrade, "td", desc="") {
635 enqueue(triggerQueue_out, TriggerMsg, 1) {
636 out_msg.Type := TriggerType:DowngradeRegion;
637 out_msg.addr := address;
638 }
639 }
640
641 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
642 check_allocate(TBEs);
643 TBEs.allocate(getRegionBase(address));
644 set_tbe(getTBE(address));
645 if (is_valid(cache_entry)) {
646 tbe.Owner := cache_entry.Owner;
647 tbe.Sharers := cache_entry.Sharers;
648 tbe.AllAcksReceived := true; // assume no acks are required
649 }
650 tbe.ProbeRequestTime := curCycle();
651 peek(requestNetwork_in, CPURequestMsg) {
652 tbe.InitialRequestTime := in_msg.InitialRequestTime;
653 tbe.DemandAddress := in_msg.addr;
654 }
655 APPEND_TRANSITION_COMMENT(getRegionBase(address));
656 APPEND_TRANSITION_COMMENT(" Acks left ");
657 APPEND_TRANSITION_COMMENT(tbe.NumValidBlocks);
658 APPEND_TRANSITION_COMMENT(" Owner, ");
659 APPEND_TRANSITION_COMMENT(tbe.Owner);
660 APPEND_TRANSITION_COMMENT(" sharers, ");
661 APPEND_TRANSITION_COMMENT(tbe.Sharers);
662 }
663
664 action(ss_setSharers, "ss", desc="Add requestor to sharers") {
665 peek(requestNetwork_in, CPURequestMsg) {
666 cache_entry.Sharers.add(in_msg.Requestor);
667 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
668 }
669 }
670
671 action(rs_removeSharer, "rs", desc="Remove requestor to sharers") {
672 peek(requestNetwork_in, CPURequestMsg) {
673 cache_entry.Sharers.remove(in_msg.Requestor);
674 APPEND_TRANSITION_COMMENT(" removing ");
675 APPEND_TRANSITION_COMMENT(in_msg.Requestor);
676 APPEND_TRANSITION_COMMENT(" sharers ");
677 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
678 }
679 }
680
681 action(rsr_removeSharerResponse, "rsr", desc="Remove requestor to sharers") {
682 peek(responseNetwork_in, ResponseMsg) {
683 cache_entry.Sharers.remove(in_msg.Sender);
684 APPEND_TRANSITION_COMMENT(cache_entry.Sharers);
685 }
686 }
687
688 action(cs_clearSharers, "cs", desc="Add requestor to sharers") {
689 cache_entry.Sharers.clear();
690 }
691
692 action(so_setOwner, "so", desc="Set the owner to the requestor") {
693 peek(requestNetwork_in, CPURequestMsg) {
694 cache_entry.Owner := in_msg.Requestor;
695 APPEND_TRANSITION_COMMENT(" Owner now: ");
696 APPEND_TRANSITION_COMMENT(cache_entry.Owner);
697 }
698 }
699
700 action(rr_removeRequestorFromTBE, "rr", desc="Remove requestor from TBE sharers") {
701 peek(requestNetwork_in, CPURequestMsg) {
702 tbe.Sharers.remove(in_msg.Requestor);
703 }
704 }
705
706 action(ur_updateDirtyStatusOnRequest, "ur", desc="Update dirty status on demand request") {
707 peek(requestNetwork_in, CPURequestMsg) {
708 if (is_valid(cache_entry)) {
709 if ((in_msg.Type == CoherenceRequestType:SharedRequest) &&
710 (cache_entry.Sharers.isElement(in_msg.Requestor) == false)) {
711 cache_entry.LastWriten := false;
712 if (isCpuMachine(in_msg.Requestor)) {
713 cache_entry.LastWritenByCpu := false;
714 } else {
715 cache_entry.LastWritenByGpu := false;
716 }
717 } else if ((in_msg.Type == CoherenceRequestType:PrivateRequest) ||
718 (in_msg.Type == CoherenceRequestType:UpgradeRequest)) {
719 cache_entry.LastWriten := true;
720 if (isCpuMachine(in_msg.Requestor)) {
721 cache_entry.LastWritenByCpu := true;
722 } else {
723 cache_entry.LastWritenByGpu := true;
724 }
725 }
726 }
727 }
728 }
729
730 action(ud_updateDirtyStatusWithWb, "ud", desc="Update dirty status on writeback") {
731 peek(requestNetwork_in, CPURequestMsg) {
732 if (is_valid(cache_entry) && in_msg.Dirty) {
733 cache_entry.LastWriten := true;
734 if (isCpuMachine(in_msg.Requestor)) {
735 cache_entry.LastWritenByCpu := true;
736 } else {
737 cache_entry.LastWritenByGpu := true;
738 }
739 }
740 }
741 }
742
743 action(sns_setNumAcksSharers, "sns", desc="Set number of acks to one per shared region buffer") {
744 assert(is_valid(tbe));
745 assert(is_valid(cache_entry));
746 tbe.NumValidBlocks := tbe.Sharers.count();
747 }
748
749 action(sno_setNumAcksOne, "sno", desc="Set number of acks to one per shared region buffer") {
750 assert(is_valid(tbe));
751 assert(is_valid(cache_entry));
752 tbe.NumValidBlocks := 1;
753 }
754
755 action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
756 TBEs.deallocate(getRegionBase(address));
757 APPEND_TRANSITION_COMMENT(" reg: ");
758 APPEND_TRANSITION_COMMENT(getRegionBase(address));
759 unset_tbe();
760 }
761
762 action(wb_sendWbNotice, "wb", desc="Send notice to cache that writeback is acknowledged") {
763 peek(requestNetwork_in, CPURequestMsg) {
764 enqueue(notifyNetwork_out, CPURequestMsg, 1) {
765 out_msg.addr := getRegionBase(address);
766 out_msg.Type := CoherenceRequestType:WbNotify;
767 out_msg.Destination.add(in_msg.Requestor);
768 out_msg.Requestor := machineID;
769 out_msg.MessageSize := MessageSizeType:Request_Control;
770 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
771 }
772 }
773 }
774
775 action(wbn_sendWbNoticeNoAck, "wbn", desc="Send notice to cache that writeback is acknowledged (no ack needed)") {
776 peek(requestNetwork_in, CPURequestMsg) {
777 enqueue(notifyNetwork_out, CPURequestMsg, 1) {
778 out_msg.addr := getRegionBase(address);
779 out_msg.Type := CoherenceRequestType:WbNotify;
780 out_msg.Destination.add(in_msg.Requestor);
781 out_msg.Requestor := machineID;
782 out_msg.MessageSize := MessageSizeType:Request_Control;
783 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
784 out_msg.NoAckNeeded := true;
785 }
786 }
787 }
788
789 action(b_sendPrivateNotice, "b", desc="Send notice to private cache that it has private access") {
790 peek(requestNetwork_in, CPURequestMsg) {
791 enqueue(notifyNetwork_out, CPURequestMsg, 1) {
792 out_msg.addr := getRegionBase(address);
793 out_msg.Type := CoherenceRequestType:PrivateNotify;
794 out_msg.Destination.add(in_msg.Requestor);
795 out_msg.Requestor := machineID;
796 out_msg.MessageSize := MessageSizeType:Request_Control;
797 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
798 }
799 }
800 }
801
802 action(bs_sendSharedNotice, "bs", desc="Send notice to private cache that it has private access") {
803 peek(requestNetwork_in, CPURequestMsg) {
804 enqueue(notifyNetwork_out, CPURequestMsg, 1) {
805 out_msg.addr := getRegionBase(address);
806 out_msg.Type := CoherenceRequestType:SharedNotify;
807 out_msg.Destination.add(in_msg.Requestor);
808 out_msg.Requestor := machineID;
809 out_msg.MessageSize := MessageSizeType:Request_Control;
810 out_msg.InitialRequestTime := in_msg.InitialRequestTime;
811 }
812 }
813 }
814
815 action(c_sendSharedNoticeToOrigReq, "c", desc="Send notice to private cache that it has shared access") {
816 assert(is_valid(tbe));
817 enqueue(notifyNetwork_out, CPURequestMsg, 1) {
818 out_msg.addr := getRegionBase(address);
819 out_msg.Type := CoherenceRequestType:SharedNotify;
820 out_msg.Destination.add(tbe.Owner);
821 out_msg.Requestor := machineID;
822 out_msg.MessageSize := MessageSizeType:Request_Control;
823 out_msg.ProbeRequestStartTime := tbe.ProbeRequestTime;
824 out_msg.InitialRequestTime := tbe.InitialRequestTime;
825 APPEND_TRANSITION_COMMENT("dest: ");
826 APPEND_TRANSITION_COMMENT(out_msg.Destination);
827 }
828 }
829
830 action(sp_sendPrivateNoticeToOrigReq, "sp", desc="Send notice to private cache that it has private access") {
831 assert(is_valid(tbe));
832 enqueue(notifyNetwork_out, CPURequestMsg, 1) {
833 out_msg.addr := getRegionBase(address);
834 out_msg.Type := CoherenceRequestType:PrivateNotify;
835 out_msg.Destination.add(tbe.Owner);
836 out_msg.Requestor := machineID;
837 out_msg.MessageSize := MessageSizeType:Request_Control;
838 out_msg.ProbeRequestStartTime := tbe.ProbeRequestTime;
839 out_msg.InitialRequestTime := tbe.InitialRequestTime;
840 APPEND_TRANSITION_COMMENT("dest: ");
841 APPEND_TRANSITION_COMMENT(out_msg.Destination);
842 }
843 }
844
845 action(i_RegionInvNotify, "i", desc="Send notice to private cache that it no longer has private access") {
846 enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
847 out_msg.addr := address;
848 out_msg.DemandAddress := tbe.DemandAddress;
849 //out_msg.Requestor := tbe.Requestor;
850 out_msg.Requestor := machineID;
851 out_msg.Type := ProbeRequestType:PrbInv;
852 //Fix me: assert(tbe.Sharers.count() > 0);
853 out_msg.DemandRequest := true;
854 out_msg.Destination := tbe.Sharers;
855 out_msg.MessageSize := MessageSizeType:Request_Control;
856 APPEND_TRANSITION_COMMENT("dest: ");
857 APPEND_TRANSITION_COMMENT(out_msg.Destination);
858 }
859 }
860
861 action(i0_RegionInvNotifyDemand0, "i0", desc="Send notice to private cache that it no longer has private access") {
862 enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
863 out_msg.addr := address;
864 // Demand address should default to 0 -> out_msg.DemandAddress := 0;
865 out_msg.Requestor := machineID;
866 out_msg.Type := ProbeRequestType:PrbInv;
867 out_msg.Destination := tbe.Sharers;
868 out_msg.MessageSize := MessageSizeType:Request_Control;
869 APPEND_TRANSITION_COMMENT("dest: ");
870 APPEND_TRANSITION_COMMENT(out_msg.Destination);
871 }
872 }
873
874 action(rd_RegionDowngrade, "rd", desc="Send notice to private cache that it only has shared access") {
875 enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
876 out_msg.addr := address;
877 out_msg.DemandAddress := tbe.DemandAddress;
878 out_msg.Requestor := machineID;
879 out_msg.Type := ProbeRequestType:PrbDowngrade;
880 out_msg.DemandRequest := true;
881 out_msg.Destination := tbe.Sharers;
882 out_msg.MessageSize := MessageSizeType:Request_Control;
883 APPEND_TRANSITION_COMMENT("dest: ");
884 APPEND_TRANSITION_COMMENT(out_msg.Destination);
885 }
886 }
887
888 action(p_popRequestQueue, "p", desc="Pop the request queue") {
889 requestNetwork_in.dequeue(clockEdge());
890 }
891
892 action(pt_popTriggerQueue, "pt", desc="Pop the trigger queue") {
893 triggerQueue_in.dequeue(clockEdge());
894 }
895
896 action(pr_popResponseQueue, "pr", desc="Pop the response queue") {
897 responseNetwork_in.dequeue(clockEdge());
898 }
899
900 action(s_stallAndWaitRequest, "s", desc="Stall and wait on the region address") {
901 Addr regAddr := getRegionBase(address);
902 stall_and_wait(requestNetwork_in, regAddr);
903 }
904
905 action(w_wakeUpRegionDependents, "w", desc="Wake up any requests waiting for this region") {
906 wakeUpBuffers(getRegionBase(address));
907 }
908
909 action(wa_wakeUpAllDependents, "wa", desc="Wake up any requests waiting for this region") {
910 wakeUpAllBuffers();
911 }
912
913 action(zz_recycleRequestQueue, "\z", desc="...") {
914 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
915 }
916
917 action(z_stall, "z", desc="stall request queue") {
918 // fake state
919 }
920
921 action(mru_setMRU, "mru", desc="set MRU") {
922 cacheMemory.setMRU(address);
923 }
924
925 // Transistions
926
927 transition({NP_P, P_P, NP_S, S_S, S_P, P_S, P_NP, S_AP, P_AS, P_AP, SP_NP_W, S_W, P_AP_W, P_AS_W, S_AP_W}, {PrivateRequest, SharedRequest, UpgradeRequest, SendInv, SendUpgrade, SendDowngrade, CleanWbRequest, CleanWbRequest_LastSharer, StaleCleanWbRequest}) {
928 s_stallAndWaitRequest
929 }
930
931 transition({NP_P, P_P, NP_S, S_S, S_P, S_W, P_S, P_NP, S_AP, P_AS, P_AP, P_AP_W, P_AS_W, S_AP_W}, Evict) {
932 zz_recycleRequestQueue;
933 }
934
935 transition(NP, {PrivateRequest, SendUpgrade}, NP_P) {TagArrayRead, TagArrayWrite} {
936 a_allocateRegionEntry;
937 ur_updateDirtyStatusOnRequest;
938 f_fwdReqToDir;
939 b_sendPrivateNotice;
940 so_setOwner;
941 ss_setSharers;
942 t_allocateTBE;
943 p_popRequestQueue;
944 }
945
946 transition(P, {PrivateRequest, UpgradeRequest}, P_P) {TagArrayRead} {
947 mru_setMRU;
948 ur_updateDirtyStatusOnRequest;
949 f_fwdReqToDir;
950 b_sendPrivateNotice;
951 t_allocateTBE;
952 p_popRequestQueue;
953 }
954
955 transition({NP_P, P_P}, CPUPrivateAck, P) {
956 dt_deallocateTBE;
957 w_wakeUpRegionDependents;
958 pr_popResponseQueue;
959 }
960
961 transition({NP, P, S}, StaleCleanWbRequest) {TagArrayRead, TagArrayWrite} {
962 wbn_sendWbNoticeNoAck;
963 ud_updateDirtyStatusWithWb;
964 p_popRequestQueue;
965 }
966
967 transition(NP, SharedRequest, NP_S) {TagArrayRead, TagArrayWrite} {
968 a_allocateRegionEntry;
969 ur_updateDirtyStatusOnRequest;
970 f_fwdReqToDirShared;
971 bs_sendSharedNotice;
972 so_setOwner;
973 ss_setSharers;
974 t_allocateTBE;
975 p_popRequestQueue;
976 }
977
978 // Could probably do this in parallel with other shared requests
979 transition(S, SharedRequest, S_S) {TagArrayRead, TagArrayWrite} {
980 mru_setMRU;
981 ur_updateDirtyStatusOnRequest;
982 f_fwdReqToDirShared;
983 bs_sendSharedNotice;
984 ss_setSharers;
985 t_allocateTBE;
986 p_popRequestQueue;
987 }
988
989 transition({P, S}, CleanWbRequest_LastSharer, SP_NP_W) {TagArrayRead, TagArrayWrite} {
990 ud_updateDirtyStatusWithWb;
991 wb_sendWbNotice;
992 rs_removeSharer;
993 t_allocateTBE;
994 d_deallocateRegionEntry;
995 p_popRequestQueue;
996 }
997
998 transition(S, CleanWbRequest, S_W) {TagArrayRead, TagArrayWrite} {
999 ud_updateDirtyStatusWithWb;
1000 wb_sendWbNotice;
1001 rs_removeSharer;
1002 t_allocateTBE;
1003 p_popRequestQueue;
1004 }
1005
1006 transition(SP_NP_W, WritebackAck, NP) {
1007 dt_deallocateTBE;
1008 w_wakeUpRegionDependents;
1009 pr_popResponseQueue;
1010 }
1011
1012 transition(S_W, WritebackAck, S) {
1013 dt_deallocateTBE;
1014 w_wakeUpRegionDependents;
1015 pr_popResponseQueue;
1016 }
1017
1018 transition({NP_S, S_S}, CPUPrivateAck, S) {
1019 dt_deallocateTBE;
1020 w_wakeUpRegionDependents;
1021 pr_popResponseQueue;
1022 }
1023
1024 transition(S, UpgradeRequest, S_P) {TagArrayRead, TagArrayWrite} {
1025 mru_setMRU;
1026 ur_updateDirtyStatusOnRequest;
1027 f_fwdReqToDir;
1028 b_sendPrivateNotice;
1029 so_setOwner;
1030 t_allocateTBE;
1031 p_popRequestQueue;
1032 }
1033
1034 transition(S_P, CPUPrivateAck, P) {
1035 dt_deallocateTBE;
1036 w_wakeUpRegionDependents;
1037 pr_popResponseQueue;
1038 }
1039
1040 transition(P, SendInv, P_AP_W) {TagArrayRead, TagArrayWrite} {
1041 mru_setMRU;
1042 ur_updateDirtyStatusOnRequest;
1043 f_fwdReqToDirWithAck;
1044 so_setOwner;
1045 t_allocateTBE;
1046 rr_removeRequestorFromTBE;
1047 sns_setNumAcksSharers;
1048 cs_clearSharers;
1049 ss_setSharers;
1050 //i_RegionInvNotify;
1051 p_popRequestQueue;
1052 }
1053
1054 transition({P_AP_W, S_AP_W}, DirReadyAck) {
1055 ti_triggerInv;
1056 pr_popResponseQueue;
1057 }
1058
1059 transition(P_AS_W, DirReadyAck) {
1060 td_triggerDowngrade;
1061 pr_popResponseQueue;
1062 }
1063
1064 transition(P_AS_W, TriggerDowngrade, P_AS) {
1065 rd_RegionDowngrade;
1066 pt_popTriggerQueue;
1067 }
1068
1069 transition(P_AP_W, TriggerInv, P_AP) {
1070 i_RegionInvNotify;
1071 pt_popTriggerQueue;
1072 }
1073
1074 transition(S_AP_W, TriggerInv, S_AP) {
1075 i_RegionInvNotify;
1076 pt_popTriggerQueue;
1077 }
1078
1079 transition(P, SendUpgrade, P_AP_W) {TagArrayRead, TagArrayWrite} {
1080 mru_setMRU;
1081 ur_updateDirtyStatusOnRequest;
1082 f_fwdReqToDirWithAck;
1083 so_setOwner;
1084 t_allocateTBE;
1085 rr_removeRequestorFromTBE;
1086 sns_setNumAcksSharers;
1087 cs_clearSharers;
1088 ss_setSharers;
1089 p_popRequestQueue;
1090 }
1091
1092 transition(P, Evict, P_NP) {TagArrayRead, TagArrayWrite} {
1093 t_allocateTBE;
1094 sns_setNumAcksSharers;
1095 i0_RegionInvNotifyDemand0;
1096 d_deallocateRegionEntry;
1097 }
1098
1099 transition(S, SendInv, P_AP_W) {TagArrayRead, TagArrayWrite} {
1100 mru_setMRU;
1101 ur_updateDirtyStatusOnRequest;
1102 f_fwdReqToDirWithAck;
1103 so_setOwner;
1104 t_allocateTBE;
1105 rr_removeRequestorFromTBE;
1106 sns_setNumAcksSharers;
1107 cs_clearSharers;
1108 ss_setSharers;
1109 p_popRequestQueue;
1110 }
1111
1112 transition(S, Evict, P_NP) {TagArrayRead, TagArrayWrite} {
1113 t_allocateTBE;
1114 sns_setNumAcksSharers;
1115 i0_RegionInvNotifyDemand0;
1116 d_deallocateRegionEntry;
1117 }
1118
1119 transition(P_NP, LastAck, NP) {
1120 dt_deallocateTBE;
1121 wa_wakeUpAllDependents;
1122 pt_popTriggerQueue;
1123 }
1124
1125 transition(S, SendUpgrade, S_AP_W) {TagArrayRead, TagArrayWrite} {
1126 mru_setMRU;
1127 ur_updateDirtyStatusOnRequest;
1128 f_fwdReqToDirWithAck;
1129 so_setOwner;
1130 t_allocateTBE;
1131 rr_removeRequestorFromTBE;
1132 sns_setNumAcksSharers;
1133 cs_clearSharers;
1134 ss_setSharers;
1135 p_popRequestQueue;
1136 }
1137
1138 transition(S_AP, LastAck, S_P) {
1139 sp_sendPrivateNoticeToOrigReq;
1140 pt_popTriggerQueue;
1141 }
1142
1143 transition(P_AP, LastAck, P_P) {
1144 sp_sendPrivateNoticeToOrigReq;
1145 pt_popTriggerQueue;
1146 }
1147
1148 transition(P, SendDowngrade, P_AS_W) {TagArrayRead, TagArrayWrite} {
1149 mru_setMRU;
1150 ur_updateDirtyStatusOnRequest;
1151 f_fwdReqToDirWithAckShared;
1152 so_setOwner;
1153 t_allocateTBE;
1154 sns_setNumAcksSharers;
1155 ss_setSharers; //why do we set the sharers before sending the downgrade? Are we sending a downgrade to the requestor?
1156 p_popRequestQueue;
1157 }
1158
1159 transition(P_AS, LastAck, P_S) {
1160 c_sendSharedNoticeToOrigReq;
1161 pt_popTriggerQueue;
1162 }
1163
1164 transition(P_S, CPUPrivateAck, S) {
1165 dt_deallocateTBE;
1166 w_wakeUpRegionDependents;
1167 pr_popResponseQueue;
1168 }
1169
1170 transition({P_NP, P_AS, S_AP, P_AP}, InvAckCore) {} {
1171 ra_receiveAck;
1172 pr_popResponseQueue;
1173 }
1174
1175 transition({P_NP, S_AP, P_AP}, InvAckCoreNoShare) {} {
1176 ra_receiveAck;
1177 pr_popResponseQueue;
1178 }
1179
1180 transition(P_AS, InvAckCoreNoShare) {} {
1181 ra_receiveAck;
1182 rsr_removeSharerResponse;
1183 pr_popResponseQueue;
1184 }
1185
1186 }
1187
1188