mem-cache: Add match functions to QueueEntry
[gem5.git] / src / mem / protocol / MOESI_AMD_Base-RegionBuffer.sm
1 /*
2 * Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * For use for simulation and test purposes only
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Authors: Jason Power
34 */
35
36 machine(MachineType:RegionBuffer, "Region Buffer for AMD_Base-like protocol")
37 : CacheMemory *cacheMemory; // stores only region addresses. Must set block size same as below
38 bool isOnCPU;
39 int blocksPerRegion := 64; // 4k regions
40 Cycles toDirLatency := 5; // Latency to fwd requests to directory
41 Cycles toRegionDirLatency := 5; // Latency for requests and acks to directory
42 Cycles nextEvictLatency := 1; // latency added between each block while evicting region
43 bool noTCCdir := "False";
44 int TCC_select_num_bits := 1;
45
46 // From the Cores
47 MessageBuffer * requestFromCore, network="From", virtual_network="0", vnet_type="request";
48 MessageBuffer * responseFromCore, network="From", virtual_network="2", vnet_type="response";
49
50 // Requests to the cores or directory
51 MessageBuffer * requestToNetwork, network="To", virtual_network="0", vnet_type="request";
52
53 // From Region-Dir
54 MessageBuffer * notifyFromRegionDir, network="From", virtual_network="7", vnet_type="request";
55 MessageBuffer * probeFromRegionDir, network="From", virtual_network="8", vnet_type="request";
56
57 // From the directory
58 MessageBuffer * unblockFromDir, network="From", virtual_network="4", vnet_type="unblock";
59
60 // To the region-Dir
61 MessageBuffer * responseToRegDir, network="To", virtual_network="2", vnet_type="response";
62
63 MessageBuffer * triggerQueue;
64 {
65
66 // States
67 state_declaration(State, desc="Region states", default="RegionBuffer_State_NP") {
68 NP, AccessPermission:Invalid, desc="Not present in region directory";
69 P, AccessPermission:Invalid, desc="Region is private to the cache";
70 S, AccessPermission:Invalid, desc="Region is possibly shared with others";
71
72 NP_PS, AccessPermission:Invalid, desc="Intermediate state waiting for notify from r-dir";
73 S_P, AccessPermission:Invalid, desc="Intermediate state while upgrading region";
74
75 P_NP, AccessPermission:Invalid, desc="Intermediate state while evicting all lines in region";
76 P_S, AccessPermission:Invalid, desc="Intermediate state while downgrading all lines in region";
77
78 S_NP_PS, AccessPermission:Invalid, desc="Got an inv in S_P, waiting for all inv acks, then going to since the write is already out there NP_PS";
79 P_NP_NP, AccessPermission:Invalid, desc="Evicting region on repl, then got an inv. Need to re-evict";
80
81 P_NP_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
82 P_S_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
83 S_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
84 S_NP_PS_O, AccessPermission:Invalid, desc="Waiting for all outstanding requests";
85
86 SS_P, AccessPermission:Invalid, desc="Waiting for CPU write that we know is there";
87
88 P_NP_W, AccessPermission:Invalid, desc="Waiting for writeback ack";
89
90 NP_W, AccessPermission:Invalid, desc="Got a done ack before request, waiting for that victim";
91 }
92
93 enumeration(Event, desc="Region directory events") {
94 CPURead, desc="Access from CPU core";
95 CPUWrite, desc="Access from CPU core";
96 CPUWriteback, desc="Writeback request from CPU core";
97
98 ReplRegion, desc="Start a replace on a region";
99
100 PrivateNotify, desc="Update entry to private state";
101 SharedNotify, desc="Update entry to shared state";
102 WbNotify, desc="Writeback notification received";
103 InvRegion, desc="Start invalidating a region";
104 DowngradeRegion,desc="Start invalidating a region";
105
106 InvAck, desc="Ack from core";
107
108 DoneAck, desc="Ack from core that request has finished";
109 AllOutstanding, desc="All outstanding requests have now finished";
110
111 Evict, desc="Loopback to evict each block";
112 LastAck_PrbResp, desc="Done eviciting all the blocks, got the last ack from core, now respond to region dir";
113 LastAck_CleanWb, desc="Done eviciting all the blocks, got the last ack from core, now start clean writeback (note the dir has already been updated)";
114
115 StallAccess, desc="Wait for the done ack on the address before proceeding";
116 StallDoneAck, desc="Wait for the access on the address before proceeding";
117
118 StaleRequest, desc="Got a stale victim from the cache, fwd it without incrementing outstanding";
119 }
120
121 enumeration(RequestType, desc="To communicate stats from transitions to recordStats") {
122 TagArrayRead, desc="Read the data array";
123 TagArrayWrite, desc="Write the data array";
124 }
125
126 structure(BoolVec, external="yes") {
127 bool at(int);
128 void resize(int);
129 void clear();
130 int size();
131 }
132
133 structure(Entry, desc="Region entry", interface="AbstractCacheEntry") {
134 Addr addr, desc="Base address of this region";
135 State RegionState, desc="Region state";
136 DataBlock DataBlk, desc="Data for the block (always empty in region buffer)";
137 BoolVec ValidBlocks, desc="A vector to keep track of valid blocks";
138 int NumValidBlocks, desc="Number of trues in ValidBlocks to avoid iterating";
139 BoolVec UsedBlocks, desc="A vector to keep track of blocks ever valid";
140 bool dirty, desc="Dirty as best known by the region buffer";
141 // This is needed so we don't ack an invalidate until all requests are ordered
142 int NumOutstandingReqs, desc="Total outstanding private/shared requests";
143 BoolVec OutstandingReqs, desc="Blocks that have outstanding private/shared requests";
144 bool MustDowngrade, desc="Set when we got a downgrade before the shd or pvt permissions";
145 Cycles ProbeRequestTime, default="Cycles(0)", desc="Time region dir started the probe";
146 Cycles InitialRequestTime, default="Cycles(0)", desc="Time message was sent to region dir";
147 bool MsgSentToDir, desc="True if the current request required a message to the dir";
148 bool clearOnDone, default="false", desc="clear valid bit when request completes";
149 Addr clearOnDoneAddr, desc="clear valid bit when request completes";
150 }
151
152 structure(TBE, desc="...") {
153 State TBEState, desc="Transient state";
154 //int NumValidBlocks, desc="Number of blocks valid so we don't have to count a BoolVec";
155 BoolVec ValidBlocks, desc="A vector to keep track of valid blocks";
156 bool AllAcksReceived, desc="Got all necessary acks from dir";
157 bool DoneEvicting, desc="Done iterating through blocks checking for valids";
158 BoolVec AcksReceived, desc="Received acks for theses blocks\n";
159 bool SendAck, desc="If true, send an ack to the r-dir at end of inv";
160 ProbeRequestType MsgType, desc="Type of message to send while 'evicting' ";
161 int NumOutstandingReqs, desc="Total outstanding private/shared requests";
162 BoolVec OutstandingReqs, desc="Blocks that have outstanding private/shared requests";
163 MachineID Requestor, desc="Requestor for three hop transactions";
164 bool DemandRequest, default="false", desc="Associated with a demand request";
165 Addr DemandAddress, desc="Address for the demand request";
166 bool DoneAckReceived, default="false", desc="True if the done ack arrived before the message";
167 Addr DoneAckAddr, desc="Address of the done ack received early";
168 int OutstandingThreshold, desc="Number of outstanding requests to trigger AllOutstanding on";
169
170 ProbeRequestType NewMsgType, desc="Type of message to send while 'evicting' ";
171 MachineID NewRequestor, desc="Requestor for three hop transactions";
172 bool NewDemandRequest, default="false", desc="Associated with a demand request";
173 Addr NewDemandAddress, desc="Address for the demand request";
174 bool dirty, desc="dirty";
175 bool AllOutstandingTriggered, default="false", desc="bit for only one all outstanding";
176 int OutstandingAcks, default="0", desc="number of acks to wait for";
177 }
178
179 structure(TBETable, external="yes") {
180 TBE lookup(Addr);
181 void allocate(Addr);
182 void deallocate(Addr);
183 bool isPresent(Addr);
184 }
185
186 // Stores only region addresses
187 TBETable TBEs, template="<RegionBuffer_TBE>", constructor="m_number_of_TBEs";
188 int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()";
189
190 Tick clockEdge();
191 Tick cyclesToTicks(Cycles c);
192
193 void set_cache_entry(AbstractCacheEntry b);
194 void unset_cache_entry();
195 void set_tbe(TBE b);
196 void unset_tbe();
197 void wakeUpAllBuffers();
198 void wakeUpBuffers(Addr a);
199 Cycles curCycle();
200 MachineID mapAddressToMachine(Addr addr, MachineType mtype);
201
202 int blockBits, default="RubySystem::getBlockSizeBits()";
203 int blockBytes, default="RubySystem::getBlockSizeBytes()";
204 int regionBits, default="log2(m_blocksPerRegion)";
205
206 // Functions
207
208 int getRegionOffset(Addr addr) {
209 if (blocksPerRegion > 1) {
210 Addr offset := bitSelect(addr, blockBits, regionBits+blockBits-1);
211 int ret := addressToInt(offset);
212 assert(ret < blocksPerRegion);
213 return ret;
214 } else {
215 return 0;
216 }
217 }
218
219 Addr getRegionBase(Addr addr) {
220 return maskLowOrderBits(addr, blockBits+regionBits);
221 }
222
223 Addr getNextBlock(Addr addr) {
224 Addr a := addr;
225 return makeNextStrideAddress(a, 1);
226 }
227
228 MachineID getPeer(MachineID mach, Addr address) {
229 if (isOnCPU) {
230 return createMachineID(MachineType:CorePair, intToID(0));
231 } else if (noTCCdir) {
232 return mapAddressToRange(address,MachineType:TCC,
233 TCC_select_low_bit, TCC_select_num_bits);
234 } else {
235 return createMachineID(MachineType:TCCdir, intToID(0));
236 }
237 }
238
239 bool isOutstanding(TBE tbe, Entry cache_entry, Addr addr) {
240 if (is_valid(tbe) && tbe.OutstandingReqs.size() > 0) {
241 DPRINTF(RubySlicc, " outstanding tbe reqs %s %s %d %d\n",
242 tbe.OutstandingReqs, addr, getRegionOffset(addr),
243 tbe.OutstandingReqs.at(getRegionOffset(addr)));
244 return tbe.OutstandingReqs.at(getRegionOffset(addr));
245 } else if (is_valid(cache_entry)) {
246 DPRINTF(RubySlicc, " outstanding cache reqs %s %s %d %d\n",
247 cache_entry.OutstandingReqs, addr, getRegionOffset(addr),
248 cache_entry.OutstandingReqs.at(getRegionOffset(addr)));
249 return cache_entry.OutstandingReqs.at(getRegionOffset(addr));
250 } else {
251 return false;
252 }
253 }
254
255 bool isOnGPU() {
256 if (isOnCPU) {
257 return false;
258 }
259 return true;
260 }
261
262 bool isRead(CoherenceRequestType type) {
263 return (type == CoherenceRequestType:RdBlk || type == CoherenceRequestType:RdBlkS ||
264 type == CoherenceRequestType:VicClean);
265 }
266
267 bool presentOrAvail(Addr addr) {
268 return cacheMemory.isTagPresent(getRegionBase(addr)) || cacheMemory.cacheAvail(getRegionBase(addr));
269 }
270
271 // Returns a region entry!
272 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
273 return static_cast(Entry, "pointer", cacheMemory.lookup(getRegionBase(addr)));
274 }
275
276 TBE getTBE(Addr addr), return_by_pointer="yes" {
277 return TBEs.lookup(getRegionBase(addr));
278 }
279
280 DataBlock getDataBlock(Addr addr), return_by_ref="yes" {
281 return getCacheEntry(getRegionBase(addr)).DataBlk;
282 }
283
284 State getState(TBE tbe, Entry cache_entry, Addr addr) {
285 if (is_valid(tbe)) {
286 return tbe.TBEState;
287 } else if (is_valid(cache_entry)) {
288 return cache_entry.RegionState;
289 }
290 return State:NP;
291 }
292
293 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
294 if (is_valid(tbe)) {
295 tbe.TBEState := state;
296 }
297 if (is_valid(cache_entry)) {
298 cache_entry.RegionState := state;
299 }
300 }
301
302 AccessPermission getAccessPermission(Addr addr) {
303 TBE tbe := getTBE(addr);
304 if(is_valid(tbe)) {
305 return RegionBuffer_State_to_permission(tbe.TBEState);
306 }
307 Entry cache_entry := getCacheEntry(addr);
308 if(is_valid(cache_entry)) {
309 return RegionBuffer_State_to_permission(cache_entry.RegionState);
310 }
311 return AccessPermission:NotPresent;
312 }
313
314 void functionalRead(Addr addr, Packet *pkt) {
315 functionalMemoryRead(pkt);
316 }
317
318 int functionalWrite(Addr addr, Packet *pkt) {
319 if (functionalMemoryWrite(pkt)) {
320 return 1;
321 } else {
322 return 0;
323 }
324 }
325
326 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
327 if (is_valid(cache_entry)) {
328 cache_entry.changePermission(RegionBuffer_State_to_permission(state));
329 }
330 }
331
332 void recordRequestType(RequestType stat, Addr addr) {
333 if (stat == RequestType:TagArrayRead) {
334 cacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr);
335 } else if (stat == RequestType:TagArrayWrite) {
336 cacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr);
337 }
338 }
339
340 bool checkResourceAvailable(RequestType request_type, Addr addr) {
341 if (request_type == RequestType:TagArrayRead) {
342 return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
343 } else if (request_type == RequestType:TagArrayWrite) {
344 return cacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr);
345 } else {
346 error("Invalid RequestType type in checkResourceAvailable");
347 return true;
348 }
349 }
350
351 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
352
353 // Overloaded outgoing request nework for both probes to cores and reqeusts
354 // to the directory.
355 // Fix Me: These forwarded requests need to be on a separate virtual channel
356 // to avoid deadlock!
357 out_port(requestNetwork_out, CPURequestMsg, requestToNetwork);
358 out_port(probeNetwork_out, NBProbeRequestMsg, requestToNetwork);
359
360 out_port(responseNetwork_out, ResponseMsg, responseToRegDir);
361
362 in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=4) {
363 if (triggerQueue_in.isReady(clockEdge())) {
364 peek(triggerQueue_in, TriggerMsg) {
365 Entry cache_entry := getCacheEntry(in_msg.addr);
366 TBE tbe := getTBE(in_msg.addr);
367 DPRINTF(RubySlicc, "trigger msg: %s (%s)\n", in_msg, getRegionBase(in_msg.addr));
368 assert(is_valid(tbe));
369 if (in_msg.Type == TriggerType:AcksComplete) {
370 if (tbe.SendAck) {
371 trigger(Event:LastAck_PrbResp, in_msg.addr, cache_entry, tbe);
372 } else {
373 trigger(Event:LastAck_CleanWb, in_msg.addr, cache_entry, tbe);
374 }
375 } else if (in_msg.Type == TriggerType:AllOutstanding) {
376 trigger(Event:AllOutstanding, in_msg.addr, cache_entry, tbe);
377 } else {
378 assert(in_msg.Type == TriggerType:InvNext);
379 trigger(Event:Evict, in_msg.addr, cache_entry, tbe);
380 }
381 }
382 }
383 }
384
385 in_port(unblockNetwork_in, UnblockMsg, unblockFromDir, rank=3) {
386 if (unblockNetwork_in.isReady(clockEdge())) {
387 peek(unblockNetwork_in, UnblockMsg) {
388 TBE tbe := getTBE(in_msg.addr);
389 Entry cache_entry := getCacheEntry(in_msg.addr);
390 if (in_msg.DoneAck) {
391 if (isOutstanding(tbe, cache_entry, in_msg.addr)) {
392 trigger(Event:DoneAck, in_msg.addr, cache_entry, tbe);
393 } else {
394 trigger(Event:StallDoneAck, in_msg.addr, cache_entry, tbe);
395 }
396 } else {
397 assert(is_valid(tbe));
398 trigger(Event:InvAck, in_msg.addr, cache_entry, tbe);
399 }
400 }
401 }
402 }
403
404 in_port(probeNetwork_in, NBProbeRequestMsg, probeFromRegionDir, rank=2) {
405 if (probeNetwork_in.isReady(clockEdge())) {
406 peek(probeNetwork_in, NBProbeRequestMsg) {
407 TBE tbe := getTBE(in_msg.addr);
408 Entry cache_entry := getCacheEntry(in_msg.addr);
409 assert(getRegionBase(in_msg.addr) == in_msg.addr);
410 if (in_msg.Type == ProbeRequestType:PrbInv) {
411 trigger(Event:InvRegion, in_msg.addr, cache_entry, tbe);
412 } else if (in_msg.Type == ProbeRequestType:PrbDowngrade) {
413 trigger(Event:DowngradeRegion, in_msg.addr, cache_entry, tbe);
414 } else {
415 error("Unknown probe message\n");
416 }
417 }
418 }
419 }
420
421 in_port(notifyNetwork_in, CPURequestMsg, notifyFromRegionDir, rank=1) {
422 if (notifyNetwork_in.isReady(clockEdge())) {
423 peek(notifyNetwork_in, CPURequestMsg) {
424 TBE tbe := getTBE(in_msg.addr);
425 Entry cache_entry := getCacheEntry(in_msg.addr);
426 //Fix Me...add back in: assert(is_valid(cache_entry));
427 if (in_msg.Type == CoherenceRequestType:WbNotify) {
428 trigger(Event:WbNotify, in_msg.addr, cache_entry, tbe);
429 } else if (in_msg.Type == CoherenceRequestType:SharedNotify) {
430 trigger(Event:SharedNotify, in_msg.addr, cache_entry, tbe);
431 } else if (in_msg.Type == CoherenceRequestType:PrivateNotify) {
432 trigger(Event:PrivateNotify, in_msg.addr, cache_entry, tbe);
433 } else {
434 error("Unknown notify message\n");
435 }
436 }
437 }
438 }
439
440 // In from cores
441 // NOTE: We get the cache / TBE entry based on the region address,
442 // but pass the block address to the actions
443 in_port(requestNetwork_in, CPURequestMsg, requestFromCore, rank=0) {
444 if (requestNetwork_in.isReady(clockEdge())) {
445 peek(requestNetwork_in, CPURequestMsg) {
446 TBE tbe := getTBE(in_msg.addr);
447 Entry cache_entry := getCacheEntry(in_msg.addr);
448 if (is_valid(tbe) && tbe.DoneAckReceived && tbe.DoneAckAddr == in_msg.addr) {
449 DPRINTF(RubySlicc, "Stale/Stall request %s\n", in_msg.Type);
450 if (in_msg.Type == CoherenceRequestType:VicDirty || in_msg.Type == CoherenceRequestType:VicClean )
451 {
452 trigger(Event:StaleRequest, in_msg.addr, cache_entry, tbe);
453 } else {
454 trigger(Event:StallAccess, in_msg.addr, cache_entry, tbe);
455 }
456 } else if (isOutstanding(tbe, cache_entry, in_msg.addr)) {
457 DPRINTF(RubySlicc, "Stall outstanding request %s\n", in_msg.Type);
458 trigger(Event:StallAccess, in_msg.addr, cache_entry, tbe);
459 } else {
460 if (presentOrAvail(in_msg.addr)) {
461 if (in_msg.Type == CoherenceRequestType:RdBlkM ) {
462 trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
463 } else if (in_msg.Type == CoherenceRequestType:WriteThrough ) {
464 trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
465 } else if (in_msg.Type == CoherenceRequestType:Atomic ) {
466 trigger(Event:CPUWrite, in_msg.addr, cache_entry, tbe);
467 } else {
468 if (in_msg.Type == CoherenceRequestType:VicDirty ||
469 in_msg.Type == CoherenceRequestType:VicClean) {
470 trigger(Event:CPUWriteback, in_msg.addr, cache_entry, tbe);
471 } else {
472 trigger(Event:CPURead, in_msg.addr, cache_entry, tbe);
473 }
474 }
475 } else {
476 Addr victim := cacheMemory.cacheProbe(getRegionBase(in_msg.addr));
477 TBE victim_tbe := getTBE(victim);
478 Entry victim_entry := getCacheEntry(victim);
479 DPRINTF(RubySlicc, "Replacing region %s for %s(%s)\n", victim, in_msg.addr, getRegionBase(in_msg.addr));
480 trigger(Event:ReplRegion, victim, victim_entry, victim_tbe);
481 }
482 }
483 }
484 }
485 }
486
487 // Actions
488 action(f_fwdReqToDir, "f", desc="Forward CPU request to directory") {
489 peek(requestNetwork_in, CPURequestMsg) {
490 enqueue(requestNetwork_out, CPURequestMsg, toDirLatency) {
491 out_msg.addr := in_msg.addr;
492 out_msg.Type := in_msg.Type;
493 out_msg.DataBlk := in_msg.DataBlk;
494 out_msg.Dirty := in_msg.Dirty;
495 out_msg.Requestor := in_msg.Requestor;
496 out_msg.WTRequestor := in_msg.WTRequestor;
497 out_msg.Destination.add(mapAddressToMachine(in_msg.addr, MachineType:Directory));
498 out_msg.Shared := in_msg.Shared;
499 out_msg.MessageSize := in_msg.MessageSize;
500 out_msg.Private := true;
501 out_msg.InitialRequestTime := curCycle();
502 out_msg.ProbeRequestStartTime := curCycle();
503 if (getState(tbe, cache_entry, address) == State:S) {
504 out_msg.ForceShared := true;
505 }
506 DPRINTF(RubySlicc, "Fwd: %s\n", out_msg);
507 //assert(getState(tbe, cache_entry, address) == State:P || getState(tbe, cache_entry, address) == State:S);
508 if (getState(tbe, cache_entry, address) == State:NP_W) {
509 APPEND_TRANSITION_COMMENT(" fwding stale request: ");
510 APPEND_TRANSITION_COMMENT(out_msg.Type);
511 }
512 }
513 }
514 }
515
516 action(u_updateRegionEntry, "u", desc="Update the entry for profiling") {
517 peek(requestNetwork_in, CPURequestMsg) {
518 if (is_valid(cache_entry)) {
519 if (in_msg.CtoDSinked == false) {
520 APPEND_TRANSITION_COMMENT(" incr outstanding ");
521 cache_entry.NumOutstandingReqs := 1 + cache_entry.NumOutstandingReqs;
522 assert(cache_entry.OutstandingReqs.at(getRegionOffset(address)) == false);
523 cache_entry.OutstandingReqs.at(getRegionOffset(address)) := true;
524 assert(cache_entry.NumOutstandingReqs == countBoolVec(cache_entry.OutstandingReqs));
525 } else {
526 APPEND_TRANSITION_COMMENT(" NOT incr outstanding ");
527 assert(in_msg.Type == CoherenceRequestType:RdBlkM || in_msg.Type == CoherenceRequestType:RdBlkS);
528 }
529 APPEND_TRANSITION_COMMENT(cache_entry.NumOutstandingReqs);
530 if (in_msg.Type == CoherenceRequestType:RdBlkM || in_msg.Type == CoherenceRequestType:Atomic ||
531 in_msg.Type == CoherenceRequestType:WriteThrough )
532 {
533 cache_entry.dirty := true;
534 }
535 if (in_msg.Type == CoherenceRequestType:VicDirty ||
536 in_msg.Type == CoherenceRequestType:VicClean) {
537 DPRINTF(RubySlicc, "Got %s for addr %s\n", in_msg.Type, address);
538 //assert(cache_entry.ValidBlocks.at(getRegionOffset(address)));
539 // can in fact be inv if core got an inv after a vicclean before it got here
540 if (cache_entry.ValidBlocks.at(getRegionOffset(address))) {
541 cache_entry.clearOnDone := true;
542 cache_entry.clearOnDoneAddr := address;
543 //cache_entry.ValidBlocks.at(getRegionOffset(address)) := false;
544 //cache_entry.NumValidBlocks := cache_entry.NumValidBlocks - 1;
545 }
546 } else {
547 if (cache_entry.ValidBlocks.at(getRegionOffset(address)) == false) {
548 cache_entry.NumValidBlocks := cache_entry.NumValidBlocks + 1;
549 }
550 DPRINTF(RubySlicc, "before valid addr %s bits %s\n",
551 in_msg.Type, address, cache_entry.ValidBlocks);
552 cache_entry.ValidBlocks.at(getRegionOffset(address)) := true;
553 DPRINTF(RubySlicc, "after valid addr %s bits %s\n",
554 in_msg.Type, address, cache_entry.ValidBlocks);
555 cache_entry.UsedBlocks.at(getRegionOffset(address)) := true;
556 }
557 assert(cache_entry.NumValidBlocks <= blocksPerRegion);
558 assert(cache_entry.NumValidBlocks >= 0);
559 APPEND_TRANSITION_COMMENT(" valid blocks ");
560 APPEND_TRANSITION_COMMENT(cache_entry.ValidBlocks);
561 } else {
562 error("This shouldn't happen anymore I think");
563 //tbe.ValidBlocks.at(getRegionOffest(address)) := true;
564 assert(getState(tbe, cache_entry, address) == State:P_NP);
565 }
566 }
567 }
568
569 action(uw_updatePossibleWriteback, "uw", desc="writeback request complete") {
570 peek(unblockNetwork_in, UnblockMsg) {
571 if (is_valid(cache_entry) && in_msg.validToInvalid &&
572 cache_entry.clearOnDone && cache_entry.clearOnDoneAddr == address) {
573 DPRINTF(RubySlicc, "I have no idea what is going on here\n");
574 cache_entry.ValidBlocks.at(getRegionOffset(address)) := false;
575 cache_entry.NumValidBlocks := cache_entry.NumValidBlocks - 1;
576 cache_entry.clearOnDone := false;
577 }
578 }
579 }
580
581
582 action(rp_requestPrivate, "rp", desc="Send private request r-dir") {
583 peek(requestNetwork_in, CPURequestMsg) {
584 // No need to send acks on replacements
585 assert(is_invalid(tbe));
586 enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
587 out_msg.addr := address; // use the actual address so the demand request can be fulfilled
588 out_msg.DemandAddress := address;
589 out_msg.Type := CoherenceRequestType:PrivateRequest;
590 out_msg.OriginalType := in_msg.Type;
591 out_msg.Requestor := machineID;
592 out_msg.WTRequestor := in_msg.WTRequestor;
593 out_msg.InitialRequestTime := curCycle();
594 // will this always be ok? probably not for multisocket
595 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
596 out_msg.MessageSize := MessageSizeType:Request_Control;
597 DPRINTF(RubySlicc, "Private request %s\n", out_msg);
598 }
599 cache_entry.ProbeRequestTime := curCycle();
600 cache_entry.MsgSentToDir := true;
601 APPEND_TRANSITION_COMMENT(getRegionBase(address));
602 }
603 }
604
605 action(ru_requestUpgrade, "ru", desc="Send upgrade request r-dir") {
606 peek(requestNetwork_in, CPURequestMsg) {
607 // No need to send acks on replacements
608 assert(is_invalid(tbe));
609 enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
610 out_msg.addr := address; // use the actual address so the demand request can be fulfilled
611 out_msg.Type := CoherenceRequestType:UpgradeRequest;
612 out_msg.OriginalType := in_msg.Type;
613 out_msg.Requestor := machineID;
614 out_msg.WTRequestor := in_msg.WTRequestor;
615 out_msg.InitialRequestTime := curCycle();
616 // will this always be ok? probably not for multisocket
617 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
618 out_msg.MessageSize := MessageSizeType:Request_Control;
619 }
620 cache_entry.ProbeRequestTime := curCycle();
621 cache_entry.MsgSentToDir := true;
622 APPEND_TRANSITION_COMMENT(getRegionBase(address));
623 }
624 }
625
626 action(rw_requestWriteback, "rq", desc="Send writeback request") {
627 // No need to send acks on replacements
628 enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
629 out_msg.addr := getRegionBase(address); // use the actual address so the demand request can be fulfilled
630 out_msg.Type := CoherenceRequestType:CleanWbRequest;
631 out_msg.Requestor := machineID;
632 // will this always be ok? probably not for multisocket
633 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
634 out_msg.MessageSize := MessageSizeType:Request_Control;
635 out_msg.Dirty := tbe.dirty;
636 APPEND_TRANSITION_COMMENT(getRegionBase(address));
637 }
638 }
639
640 action(rs_requestShared, "rs", desc="Send shared request r-dir") {
641 peek(requestNetwork_in, CPURequestMsg) {
642 // No need to send acks on replacements
643 assert(is_invalid(tbe));
644 enqueue(requestNetwork_out, CPURequestMsg, toRegionDirLatency) {
645 out_msg.addr := address; // use the actual address so the demand request can be fulfilled
646 out_msg.Type := CoherenceRequestType:SharedRequest;
647 out_msg.OriginalType := in_msg.Type;
648 out_msg.Requestor := machineID;
649 out_msg.WTRequestor := in_msg.WTRequestor;
650 out_msg.InitialRequestTime := curCycle();
651 // will this always be ok? probably not for multisocket
652 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
653 out_msg.MessageSize := MessageSizeType:Request_Control;
654 }
655 cache_entry.ProbeRequestTime := curCycle();
656 cache_entry.MsgSentToDir := true;
657 APPEND_TRANSITION_COMMENT(getRegionBase(address));
658 }
659 }
660
661 action(ai_ackRegionInv, "ai", desc="Send ack to r-dir on region inv if tbe says so") {
662 // No need to send acks on replacements
663 assert(is_valid(tbe));
664 enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
665 out_msg.addr := getRegionBase(address);
666 out_msg.Type := CoherenceResponseType:CPUPrbResp;
667 out_msg.Sender := machineID;
668 // will this always be ok? probably not for multisocket
669 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
670 out_msg.MessageSize := MessageSizeType:Response_Control;
671 }
672 }
673
674 action(ad_ackDircetory, "ad", desc="send probe response to directory") {
675 if (noTCCdir && tbe.MsgType == ProbeRequestType:PrbDowngrade && isOnGPU()) { //VIPER tcc doesnt understand PrbShrData
676 assert(tbe.DemandRequest); //So, let RegionBuffer take care of sending back ack
677 enqueue(responseNetwork_out, ResponseMsg, toDirLatency) {
678 out_msg.addr := tbe.DemandAddress;
679 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
680 out_msg.Sender := getPeer(machineID,address);
681 // will this always be ok? probably not for multisocket
682 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
683 out_msg.Dirty := false; // only true if sending back data i think
684 out_msg.Hit := false;
685 out_msg.Ntsl := false;
686 out_msg.State := CoherenceState:NA;
687 out_msg.NoAckNeeded := true;
688 out_msg.MessageSize := MessageSizeType:Response_Control;
689 DPRINTF(RubySlicc, "%s\n", out_msg);
690 }
691 }
692 }
693
694 action(aie_ackRegionExclusiveInv, "aie", desc="Send ack to r-dir on region inv if tbe says so") {
695 // No need to send acks on replacements
696 assert(is_valid(tbe));
697 enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
698 out_msg.addr := getRegionBase(address);
699 out_msg.Type := CoherenceResponseType:CPUPrbResp;
700 out_msg.Sender := machineID;
701 out_msg.NotCached := true;
702 // will this always be ok? probably not for multisocket
703 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
704 out_msg.MessageSize := MessageSizeType:Response_Control;
705 out_msg.Dirty := tbe.dirty;
706 }
707 }
708
709 action(ain_ackRegionInvNow, "ain", desc="Send ack to r-dir on region inv") {
710 enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
711 out_msg.addr := getRegionBase(address);
712 out_msg.Type := CoherenceResponseType:CPUPrbResp;
713 out_msg.Sender := machineID;
714 // will this always be ok? probably not for multisocket
715 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
716 out_msg.MessageSize := MessageSizeType:Response_Control;
717 }
718 }
719
720 action(aine_ackRegionInvExlusiveNow, "aine", desc="Send ack to r-dir on region inv with exlusive permission") {
721 enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
722 out_msg.addr := getRegionBase(address);
723 out_msg.Type := CoherenceResponseType:CPUPrbResp;
724 out_msg.Sender := machineID;
725 out_msg.NotCached := true;
726 // will this always be ok? probably not for multisocket
727 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
728 out_msg.MessageSize := MessageSizeType:Response_Control;
729 }
730 }
731
732 action(ap_ackPrivateNotify, "ap", desc="Send ack to r-dir on private notify") {
733 enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
734 out_msg.addr := getRegionBase(address);
735 out_msg.Type := CoherenceResponseType:PrivateAck;
736 out_msg.Sender := machineID;
737 // will this always be ok? probably not for multisocket
738 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
739 out_msg.MessageSize := MessageSizeType:Response_Control;
740 }
741 }
742
743 action(aw_ackWbNotify, "aw", desc="Send ack to r-dir on writeback notify") {
744 peek(notifyNetwork_in, CPURequestMsg) {
745 if (in_msg.NoAckNeeded == false) {
746 enqueue(responseNetwork_out, ResponseMsg, toRegionDirLatency) {
747 out_msg.addr := getRegionBase(address);
748 out_msg.Type := CoherenceResponseType:RegionWbAck;
749 out_msg.Sender := machineID;
750 // will this always be ok? probably not for multisocket
751 out_msg.Destination.add(mapAddressToMachine(address, MachineType:RegionDir));
752 out_msg.MessageSize := MessageSizeType:Response_Control;
753 }
754 }
755 }
756 }
757
758 action(e_evictCurrent, "e", desc="Evict this block in the region") {
759 // send force invalidate message to directory to invalidate this block
760 // must invalidate all blocks since region buffer could have privitized it
761 if (tbe.ValidBlocks.at(getRegionOffset(address)) &&
762 (tbe.DemandRequest == false || tbe.DemandAddress != address)) {
763 DPRINTF(RubySlicc, "trying to evict address %s (base: %s, offset: %d)\n", address, getRegionBase(address), getRegionOffset(address));
764 DPRINTF(RubySlicc, "tbe valid blocks %s\n", tbe.ValidBlocks);
765
766 enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
767 out_msg.addr := address;
768 out_msg.Type := tbe.MsgType;
769 out_msg.ReturnData := true;
770 if (address == tbe.DemandAddress) {
771 out_msg.DemandRequest := true;
772 }
773 out_msg.MessageSize := MessageSizeType:Control;
774 out_msg.Destination.add(getPeer(machineID,address));
775 DPRINTF(RubySlicc, "%s\n", out_msg);
776 }
777 APPEND_TRANSITION_COMMENT(" current ");
778 APPEND_TRANSITION_COMMENT(tbe.ValidBlocks.at(getRegionOffset(address)));
779 tbe.AllAcksReceived := false;
780 } else {
781 DPRINTF(RubySlicc, "Not evicting demand %s\n", address);
782 }
783 }
784
785 action(ed_evictDemand, "ed", desc="Evict the demand request if it's valid") {
786 if (noTCCdir && tbe.MsgType == ProbeRequestType:PrbDowngrade && isOnGPU()) {
787 tbe.OutstandingAcks := 0;
788 tbe.AllAcksReceived := true;
789 tbe.DoneEvicting := true;
790 enqueue(triggerQueue_out, TriggerMsg, 1) {
791 out_msg.Type := TriggerType:AcksComplete;
792 out_msg.addr := getRegionBase(address);
793 }
794 } else if (tbe.DemandRequest) {
795 enqueue(probeNetwork_out, NBProbeRequestMsg, 1) {
796 out_msg.addr := tbe.DemandAddress;
797 out_msg.Type := tbe.MsgType;
798 out_msg.ReturnData := true;
799 out_msg.DemandRequest := true;
800 out_msg.MessageSize := MessageSizeType:Control;
801 out_msg.Destination.add(getPeer(machineID,address));
802 DPRINTF(RubySlicc, "%s\n", out_msg);
803 tbe.AllAcksReceived := false;
804 }
805 if (tbe.ValidBlocks.at(getRegionOffset(tbe.DemandAddress)) == false) {
806 tbe.OutstandingAcks := tbe.OutstandingAcks + 1;
807 }
808 APPEND_TRANSITION_COMMENT("Evicting demand ");
809 APPEND_TRANSITION_COMMENT(tbe.DemandAddress);
810 }
811 APPEND_TRANSITION_COMMENT("waiting acks ");
812 APPEND_TRANSITION_COMMENT(tbe.OutstandingAcks);
813 }
814
815 action(adp_AckDemandProbe, "fp", desc="forward demand probe even if we know that the core is invalid") {
816 peek(probeNetwork_in, NBProbeRequestMsg) {
817 if (in_msg.DemandRequest) {
818 enqueue(responseNetwork_out, ResponseMsg, toDirLatency) {
819 out_msg.addr := in_msg.DemandAddress;
820 out_msg.Type := CoherenceResponseType:CPUPrbResp; // L3 and CPUs respond in same way to probes
821 out_msg.Sender := getPeer(machineID,address);
822 // will this always be ok? probably not for multisocket
823 out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory));
824 out_msg.Dirty := false; // only true if sending back data i think
825 out_msg.Hit := false;
826 out_msg.Ntsl := false;
827 out_msg.State := CoherenceState:NA;
828 out_msg.NoAckNeeded := true;
829 out_msg.MessageSize := MessageSizeType:Response_Control;
830 DPRINTF(RubySlicc, "%s\n", out_msg);
831 }
832 }
833 }
834 }
835
836 action(en_enqueueNextEvict, "en", desc="Queue evict the next block in the region") {
837 // increment in_msg.addr by blockSize bytes and enqueue on triggerPort
838 // Only enqueue if the next address doesn't overrun the region bound
839 if (getRegionBase(getNextBlock(address)) == getRegionBase(address)) {
840 enqueue(triggerQueue_out, TriggerMsg, nextEvictLatency) {
841 out_msg.Type := TriggerType:InvNext;
842 out_msg.addr := getNextBlock(address);
843 }
844 } else {
845 tbe.DoneEvicting := true;
846 DPRINTF(RubySlicc, "Done evicing region %s\n", getRegionBase(address));
847 DPRINTF(RubySlicc, "Waiting for %s acks\n", tbe.OutstandingAcks);
848 if (tbe.AllAcksReceived == true) {
849 enqueue(triggerQueue_out, TriggerMsg, 1) {
850 out_msg.Type := TriggerType:AcksComplete;
851 out_msg.addr := getRegionBase(address);
852 }
853 }
854 }
855 }
856
857 action(ef_enqueueFirstEvict, "ef", desc="Queue the first block in the region to be evicted") {
858 if (tbe.DoneEvicting == false) {
859 enqueue(triggerQueue_out, TriggerMsg, nextEvictLatency) {
860 out_msg.Type := TriggerType:InvNext;
861 out_msg.addr := getRegionBase(address);
862 }
863 }
864 }
865
866 action(ra_receiveAck, "ra", desc="Mark TBE entry as received this ack") {
867 DPRINTF(RubySlicc, "received ack for %s reg: %s vec: %s pos: %d\n",
868 address, getRegionBase(address), tbe.ValidBlocks, getRegionOffset(address));
869 peek(unblockNetwork_in, UnblockMsg) {
870 //
871 // Note the tbe ValidBlock vec will be a conservative list of the
872 // valid blocks since the cache entry ValidBlock vec is set on the
873 // request
874 //
875 if (in_msg.wasValid) {
876 assert(tbe.ValidBlocks.at(getRegionOffset(address)));
877 }
878 }
879 tbe.OutstandingAcks := tbe.OutstandingAcks - 1;
880 tbe.AcksReceived.at(getRegionOffset(address)) := true;
881 assert(tbe.OutstandingAcks >= 0);
882 if (tbe.OutstandingAcks == 0) {
883 tbe.AllAcksReceived := true;
884 if (tbe.DoneEvicting) {
885 enqueue(triggerQueue_out, TriggerMsg, 1) {
886 out_msg.Type := TriggerType:AcksComplete;
887 out_msg.addr := getRegionBase(address);
888 }
889 }
890 }
891
892 APPEND_TRANSITION_COMMENT(getRegionBase(address));
893 APPEND_TRANSITION_COMMENT(" Acks left receive ");
894 APPEND_TRANSITION_COMMENT(tbe.OutstandingAcks);
895 }
896
897 action(do_decrementOutstanding, "do", desc="Decrement outstanding requests") {
898 APPEND_TRANSITION_COMMENT(" decr outstanding ");
899 if (is_valid(cache_entry)) {
900 cache_entry.NumOutstandingReqs := cache_entry.NumOutstandingReqs - 1;
901 assert(cache_entry.OutstandingReqs.at(getRegionOffset(address)));
902 cache_entry.OutstandingReqs.at(getRegionOffset(address)) := false;
903 assert(cache_entry.NumOutstandingReqs >= 0);
904 assert(cache_entry.NumOutstandingReqs == countBoolVec(cache_entry.OutstandingReqs));
905 APPEND_TRANSITION_COMMENT(cache_entry.NumOutstandingReqs);
906 }
907 if (is_valid(tbe)) {
908 tbe.NumOutstandingReqs := tbe.NumOutstandingReqs - 1;
909 assert(tbe.OutstandingReqs.at(getRegionOffset(address)));
910 tbe.OutstandingReqs.at(getRegionOffset(address)) := false;
911 assert(tbe.NumOutstandingReqs >= 0);
912 assert(tbe.NumOutstandingReqs == countBoolVec(tbe.OutstandingReqs));
913 APPEND_TRANSITION_COMMENT(tbe.NumOutstandingReqs);
914 }
915 }
916
917 action(co_checkOutstanding, "co", desc="check if there are no more outstanding requests") {
918 assert(is_valid(tbe));
919 if ((tbe.NumOutstandingReqs <= tbe.OutstandingThreshold) &&
920 (tbe.AllOutstandingTriggered == false)) {
921 APPEND_TRANSITION_COMMENT(" no more outstanding: ");
922 APPEND_TRANSITION_COMMENT(tbe.NumOutstandingReqs);
923 APPEND_TRANSITION_COMMENT(tbe.OutstandingThreshold);
924 enqueue(triggerQueue_out, TriggerMsg, 1) {
925 out_msg.Type := TriggerType:AllOutstanding;
926 if (tbe.DemandRequest) {
927 out_msg.addr := tbe.DemandAddress;
928 } else {
929 out_msg.addr := getRegionBase(address);
930 }
931 DPRINTF(RubySlicc, "co enqueuing %s\n", out_msg);
932 tbe.AllOutstandingTriggered := true;
933 }
934 } else {
935 APPEND_TRANSITION_COMMENT(" still more outstanding ");
936 }
937 }
938
939 action(ro_resetAllOutstanding, "ro", desc="Reset all outstanding") {
940 tbe.AllOutstandingTriggered := false;
941 }
942
943 action(so_setOutstandingCheckOne, "so", desc="Check outstanding is waiting for 1, not 0") {
944 // Need this for S_P because one request is outstanding between here and r-dir
945 tbe.OutstandingThreshold := 1;
946 }
947
948 action(a_allocateRegionEntry, "a", desc="Allocate a new entry") {
949 set_cache_entry(cacheMemory.allocate(getRegionBase(address), new Entry));
950 cache_entry.ValidBlocks.clear();
951 cache_entry.ValidBlocks.resize(blocksPerRegion);
952 cache_entry.UsedBlocks.clear();
953 cache_entry.UsedBlocks.resize(blocksPerRegion);
954 cache_entry.dirty := false;
955 cache_entry.NumOutstandingReqs := 0;
956 cache_entry.OutstandingReqs.clear();
957 cache_entry.OutstandingReqs.resize(blocksPerRegion);
958 }
959
960 action(d_deallocateRegionEntry, "d", desc="Deallocate region entry") {
961 cacheMemory.deallocate(getRegionBase(address));
962 unset_cache_entry();
963 }
964
965 action(t_allocateTBE, "t", desc="allocate TBE Entry") {
966 check_allocate(TBEs);
967 TBEs.allocate(getRegionBase(address));
968 set_tbe(getTBE(address));
969 tbe.OutstandingAcks := 0;
970 tbe.AllAcksReceived := true; // starts true since the region could be empty
971 tbe.DoneEvicting := false;
972 tbe.AcksReceived.clear();
973 tbe.AcksReceived.resize(blocksPerRegion);
974 tbe.SendAck := false;
975 tbe.OutstandingThreshold := 0;
976 if (is_valid(cache_entry)) {
977 tbe.NumOutstandingReqs := cache_entry.NumOutstandingReqs;
978 tbe.OutstandingReqs := cache_entry.OutstandingReqs;
979 assert(tbe.NumOutstandingReqs == countBoolVec(tbe.OutstandingReqs));
980 tbe.dirty := cache_entry.dirty;
981 tbe.ValidBlocks := cache_entry.ValidBlocks;
982 tbe.OutstandingAcks := countBoolVec(tbe.ValidBlocks);
983 APPEND_TRANSITION_COMMENT(" tbe valid blocks ");
984 APPEND_TRANSITION_COMMENT(tbe.ValidBlocks);
985 APPEND_TRANSITION_COMMENT(" cache valid blocks ");
986 APPEND_TRANSITION_COMMENT(cache_entry.ValidBlocks);
987 } else {
988 tbe.dirty := false;
989 }
990 }
991
992 action(m_markSendAck, "m", desc="Mark TBE that we need to ack at end") {
993 assert(is_valid(tbe));
994 tbe.SendAck := true;
995 }
996
997 action(db_markDirtyBit, "db", desc="Mark TBE dirty bit") {
998 peek(unblockNetwork_in, UnblockMsg) {
999 if (is_valid(tbe)) {
1000 tbe.dirty := tbe.dirty || in_msg.Dirty;
1001 }
1002 }
1003 }
1004
1005 action(dr_markDoneAckReceived, "dr", desc="Mark TBE that a done ack has been received") {
1006 assert(is_valid(tbe));
1007 tbe.DoneAckReceived := true;
1008 tbe.DoneAckAddr := address;
1009 APPEND_TRANSITION_COMMENT(" marking done ack on TBE ");
1010 }
1011
1012 action(se_setTBE, "se", desc="Set msg type to evict") {
1013 peek(probeNetwork_in, NBProbeRequestMsg) {
1014 tbe.MsgType := in_msg.Type;
1015 tbe.Requestor := in_msg.Requestor;
1016 tbe.DemandAddress := in_msg.DemandAddress;
1017 tbe.DemandRequest := in_msg.DemandRequest;
1018 }
1019 }
1020
1021 action(sne_setNewTBE, "sne", desc="Set msg type to evict") {
1022 peek(probeNetwork_in, NBProbeRequestMsg) {
1023 tbe.NewMsgType := in_msg.Type;
1024 tbe.NewRequestor := in_msg.Requestor;
1025 tbe.NewDemandAddress := in_msg.DemandAddress;
1026 tbe.NewDemandRequest := in_msg.DemandRequest;
1027 }
1028 }
1029
1030 action(soe_setOldTBE, "soe", desc="Set msg type to evict") {
1031 tbe.MsgType := tbe.NewMsgType;
1032 tbe.Requestor := tbe.NewRequestor;
1033 tbe.DemandAddress := tbe.NewDemandAddress;
1034 tbe.DemandRequest := tbe.NewDemandRequest;
1035 tbe.OutstandingAcks := countBoolVec(tbe.ValidBlocks);
1036 tbe.AllAcksReceived := true; // starts true since the region could be empty
1037 tbe.DoneEvicting := false;
1038 tbe.AcksReceived.clear();
1039 tbe.AcksReceived.resize(blocksPerRegion);
1040 tbe.SendAck := false;
1041 }
1042
1043 action(ser_setTBE, "ser", desc="Set msg type to evict repl") {
1044 tbe.MsgType := ProbeRequestType:PrbInv;
1045 }
1046
1047 action(md_setMustDowngrade, "md", desc="When permissions finally get here, must be shared") {
1048 assert(is_valid(cache_entry));
1049 cache_entry.MustDowngrade := true;
1050 }
1051
1052 action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") {
1053 TBEs.deallocate(getRegionBase(address));
1054 unset_tbe();
1055 }
1056
1057 action(p_popRequestQueue, "p", desc="Pop the request queue") {
1058 requestNetwork_in.dequeue(clockEdge());
1059 }
1060
1061 action(pl_popUnblockQueue, "pl", desc="Pop the unblock queue") {
1062 unblockNetwork_in.dequeue(clockEdge());
1063 }
1064
1065 action(pn_popNotifyQueue, "pn", desc="Pop the notify queue") {
1066 notifyNetwork_in.dequeue(clockEdge());
1067 }
1068
1069 action(pp_popProbeQueue, "pp", desc="Pop the probe queue") {
1070 probeNetwork_in.dequeue(clockEdge());
1071 }
1072
1073 action(pt_popTriggerQueue, "pt", desc="Pop the trigger queue") {
1074 DPRINTF(RubySlicc, "Trigger Before Contents: %s\n", triggerQueue_in);
1075 triggerQueue_in.dequeue(clockEdge());
1076 DPRINTF(RubySlicc, "Trigger After Contents: %s\n", triggerQueue_in);
1077 }
1078
1079 // Must always use wake all, since non-region address wait on region addresses
1080 action(wa_wakeUpAllDependents, "wa", desc="Wake up any requests waiting for this region") {
1081 wakeUpAllBuffers();
1082 }
1083
1084 action(zz_stallAndWaitRequestQueue, "\z", desc="recycle request queue") {
1085 Addr regAddr := getRegionBase(address);
1086 DPRINTF(RubySlicc, "Stalling address %s\n", regAddr);
1087 stall_and_wait(requestNetwork_in, regAddr);
1088 }
1089
1090 action(yy_stallAndWaitProbeQueue, "\y", desc="stall probe queue") {
1091 Addr regAddr := getRegionBase(address);
1092 stall_and_wait(probeNetwork_in, regAddr);
1093 }
1094
1095 action(yyy_recycleProbeQueue, "\yy", desc="recycle probe queue") {
1096 probeNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1097 }
1098
1099 action(zzz_recycleRequestQueue, "\zz", desc="recycle request queue") {
1100 requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1101 }
1102
1103 action(www_recycleUnblockNetwork, "\ww", desc="recycle unblock queue") {
1104 unblockNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
1105 }
1106
1107 action(z_stall, "z", desc="stall request queue") {
1108 // fake state
1109 }
1110
1111 action(mru_setMRU, "mru", desc="set MRU") {
1112 cacheMemory.setMRU(address, cache_entry.NumValidBlocks);
1113 }
1114
1115 // Transitions
1116
1117 transition({NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, P_NP_W, P_NP_NP, NP_W}, {CPURead, CPUWriteback, CPUWrite}) {} {
1118 zz_stallAndWaitRequestQueue;
1119 }
1120
1121 transition(SS_P, {CPURead, CPUWriteback}) {
1122 zz_stallAndWaitRequestQueue;
1123 }
1124
1125 transition({NP, S, P, NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P, NP_W, P_NP_NP}, StallAccess) {} {
1126 zz_stallAndWaitRequestQueue;
1127 }
1128
1129 transition({S, P, NP_PS, S_P, S_NP_PS, P_NP, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P, P_NP_W, P_NP_NP, NP_W}, StallDoneAck) {
1130 www_recycleUnblockNetwork;
1131 }
1132
1133 transition(NP, StallDoneAck, NP_W) {
1134 t_allocateTBE;
1135 db_markDirtyBit;
1136 dr_markDoneAckReceived;
1137 pl_popUnblockQueue;
1138 }
1139
1140 transition(NP_W, StaleRequest, NP) {
1141 f_fwdReqToDir;
1142 dt_deallocateTBE;
1143 wa_wakeUpAllDependents;
1144 p_popRequestQueue;
1145 }
1146
1147 transition(P_NP_O, DowngradeRegion) {} {
1148 z_stall; // should stall and wait
1149 }
1150
1151 transition({NP_PS, S_NP_PS, S_P, P_S, P_NP_O, S_NP_PS_O, P_S_O, S_O, SS_P}, ReplRegion) {} {
1152 zz_stallAndWaitRequestQueue; // can't let things get out of order!
1153 }
1154
1155 transition({P_NP_O, S_O, SS_P}, InvRegion) {} {
1156 yyy_recycleProbeQueue; // can't be z_stall because there could be a RdBlkM in the requestQueue which has the sinked flag which is blocking the inv
1157 }
1158
1159 transition(P_NP, {InvRegion, DowngradeRegion}, P_NP_NP) {} {
1160 sne_setNewTBE;
1161 pp_popProbeQueue;
1162 }
1163
1164 transition(S_P, DowngradeRegion) {} {
1165 adp_AckDemandProbe;
1166 ain_ackRegionInvNow;
1167 pp_popProbeQueue;
1168 }
1169
1170 transition(P_NP_W, InvRegion) {
1171 adp_AckDemandProbe;
1172 ain_ackRegionInvNow;
1173 pp_popProbeQueue;
1174 }
1175
1176 transition(P_NP_W, DowngradeRegion) {
1177 adp_AckDemandProbe;
1178 aine_ackRegionInvExlusiveNow;
1179 pp_popProbeQueue;
1180 }
1181
1182 transition({P, S}, {CPURead, CPUWriteback}) {TagArrayRead, TagArrayWrite} {
1183 mru_setMRU;
1184 f_fwdReqToDir;
1185 u_updateRegionEntry;
1186 p_popRequestQueue;
1187 }
1188
1189 transition(P, CPUWrite) {TagArrayRead, TagArrayWrite} {
1190 mru_setMRU;
1191 f_fwdReqToDir;
1192 u_updateRegionEntry;
1193 p_popRequestQueue;
1194 }
1195
1196 transition(S, CPUWrite, S_O) {TagArrayRead} {
1197 mru_setMRU;
1198 t_allocateTBE;
1199 co_checkOutstanding;
1200 zz_stallAndWaitRequestQueue;
1201 }
1202
1203 transition(S_O, AllOutstanding, SS_P) {
1204 wa_wakeUpAllDependents;
1205 ro_resetAllOutstanding;
1206 pt_popTriggerQueue;
1207 }
1208
1209 transition(SS_P, CPUWrite, S_P) {
1210 mru_setMRU;
1211 dt_deallocateTBE;
1212 ru_requestUpgrade;
1213 u_updateRegionEntry;
1214 p_popRequestQueue;
1215 }
1216
1217 transition(NP, {CPURead, CPUWriteback}, NP_PS) {TagArrayRead, TagArrayWrite} {
1218 a_allocateRegionEntry;
1219 rs_requestShared;
1220 u_updateRegionEntry;
1221 p_popRequestQueue;//zz_stallAndWaitRequestQueue;
1222 }
1223
1224 transition(NP, CPUWrite, NP_PS) {TagArrayRead, TagArrayWrite} {
1225 a_allocateRegionEntry;
1226 rp_requestPrivate;
1227 u_updateRegionEntry;
1228 p_popRequestQueue;//zz_stallAndWaitRequestQueue;
1229 }
1230
1231 transition(NP_PS, PrivateNotify, P) {} {
1232 ap_ackPrivateNotify;
1233 wa_wakeUpAllDependents;
1234 pn_popNotifyQueue;
1235 }
1236
1237 transition(S_P, PrivateNotify, P) {} {
1238 ap_ackPrivateNotify;
1239 wa_wakeUpAllDependents;
1240 pn_popNotifyQueue;
1241 }
1242
1243 transition(NP_PS, SharedNotify, S) {} {
1244 ap_ackPrivateNotify;
1245 wa_wakeUpAllDependents;
1246 pn_popNotifyQueue;
1247 }
1248
1249 transition(P_NP_W, WbNotify, NP) {} {
1250 aw_ackWbNotify;
1251 wa_wakeUpAllDependents;
1252 dt_deallocateTBE;
1253 pn_popNotifyQueue;
1254 }
1255
1256 transition({P, S}, ReplRegion, P_NP_O) {TagArrayRead, TagArrayWrite} {
1257 t_allocateTBE;
1258 ser_setTBE;
1259 d_deallocateRegionEntry;
1260 co_checkOutstanding;
1261 }
1262
1263 transition({P, S}, InvRegion, P_NP_O) {TagArrayRead, TagArrayWrite} {
1264 t_allocateTBE;
1265 se_setTBE;
1266 m_markSendAck;
1267 d_deallocateRegionEntry;
1268 co_checkOutstanding;
1269 pp_popProbeQueue;
1270 }
1271
1272 transition(P_NP_O, AllOutstanding, P_NP) {} {
1273 ed_evictDemand;
1274 ef_enqueueFirstEvict;
1275 ro_resetAllOutstanding;
1276 pt_popTriggerQueue;
1277 }
1278
1279 transition(S_P, InvRegion, S_NP_PS_O) {TagArrayRead} {
1280 t_allocateTBE;
1281 se_setTBE;
1282 m_markSendAck;
1283 so_setOutstandingCheckOne;
1284 co_checkOutstanding;
1285 pp_popProbeQueue;
1286 }
1287
1288 transition(S_NP_PS_O, AllOutstanding, S_NP_PS) {
1289 ed_evictDemand;
1290 ef_enqueueFirstEvict;
1291 ro_resetAllOutstanding;
1292 pt_popTriggerQueue;
1293 }
1294
1295 transition(P, DowngradeRegion, P_S_O) {TagArrayRead, TagArrayWrite} {
1296 t_allocateTBE;
1297 se_setTBE;
1298 m_markSendAck;
1299 co_checkOutstanding;
1300 pp_popProbeQueue;
1301 }
1302
1303 transition(P_S_O, AllOutstanding, P_S) {} {
1304 ed_evictDemand;
1305 ef_enqueueFirstEvict;
1306 ro_resetAllOutstanding;
1307 pt_popTriggerQueue;
1308 }
1309
1310 transition({P, S}, DoneAck) {TagArrayWrite} {
1311 do_decrementOutstanding;
1312 wa_wakeUpAllDependents;
1313 db_markDirtyBit;
1314 uw_updatePossibleWriteback;
1315 pl_popUnblockQueue;
1316 }
1317
1318 transition({S_P, NP_PS, S_NP_PS}, DoneAck) {TagArrayWrite} {
1319 www_recycleUnblockNetwork;
1320 }
1321
1322 transition({P_NP_O, S_NP_PS_O, P_S_O, S_O}, DoneAck) {} {
1323 do_decrementOutstanding;
1324 co_checkOutstanding;
1325 db_markDirtyBit;
1326 uw_updatePossibleWriteback;
1327 pl_popUnblockQueue;
1328 }
1329
1330 transition({P_NP, P_S, S_NP_PS, P_NP_NP}, Evict) {} {
1331 e_evictCurrent;
1332 en_enqueueNextEvict;
1333 pt_popTriggerQueue;
1334 }
1335
1336 transition({P_NP, P_S, S_NP_PS, P_NP_NP}, InvAck) {} {
1337 ra_receiveAck;
1338 db_markDirtyBit;
1339 pl_popUnblockQueue;
1340 }
1341
1342 transition(P_NP, LastAck_CleanWb, P_NP_W) {} {
1343 rw_requestWriteback;
1344 pt_popTriggerQueue;
1345 }
1346
1347 transition(P_NP_NP, LastAck_CleanWb, P_NP) {} {
1348 soe_setOldTBE;
1349 m_markSendAck;
1350 ed_evictDemand;
1351 ef_enqueueFirstEvict;
1352 pt_popTriggerQueue;
1353 }
1354
1355 transition(P_NP, LastAck_PrbResp, NP) {} {
1356 aie_ackRegionExclusiveInv;
1357 dt_deallocateTBE;
1358 wa_wakeUpAllDependents;
1359 pt_popTriggerQueue;
1360 }
1361
1362 transition(S_NP_PS, LastAck_PrbResp, NP_PS) {} {
1363 aie_ackRegionExclusiveInv;
1364 dt_deallocateTBE;
1365 wa_wakeUpAllDependents;
1366 pt_popTriggerQueue;
1367 }
1368
1369 transition(P_S, LastAck_PrbResp, S) {} {
1370 ai_ackRegionInv;
1371 ad_ackDircetory;
1372 dt_deallocateTBE;
1373 wa_wakeUpAllDependents;
1374 pt_popTriggerQueue;
1375 }
1376
1377 }
1378