ruby: slicc: have a static MachineType
[gem5.git] / src / mem / protocol / MESI_Two_Level-L1cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
30 : Sequencer * sequencer;
31 CacheMemory * L1Icache;
32 CacheMemory * L1Dcache;
33 Prefetcher * prefetcher;
34 int l2_select_num_bits;
35 Cycles l1_request_latency := 2;
36 Cycles l1_response_latency := 2;
37 Cycles to_l2_latency := 1;
38 bool send_evictions;
39 bool enable_prefetch := "False";
40
41 // Message Queues
42 // From this node's L1 cache TO the network
43
44 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
45 MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
46 vnet_type="request";
47
48 // a local L1 -> this L2 bank
49 MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
50 vnet_type="response";
51
52 MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
53 vnet_type="unblock";
54
55
56 // To this node's L1 cache FROM the network
57 // a L2 bank -> this L1
58 MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
59 vnet_type="request";
60
61 // a L2 bank -> this L1
62 MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
63 vnet_type="response";
64
65 // Request Buffer for prefetches
66 MessageBuffer * optionalQueue;
67
68 // Buffer for requests generated by the processor core.
69 MessageBuffer * mandatoryQueue;
70 {
71 // STATES
72 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
73 // Base states
74 NP, AccessPermission:Invalid, desc="Not present in either cache";
75 I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
76 S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
77 E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
78 M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
79
80 // Transient States
81 IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
82 IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
83 SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
84 IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
85
86 M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
87 SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
88
89 // Transient States in which block is being prefetched
90 PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
91 PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
92 PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
93 PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
94 }
95
96 // EVENTS
97 enumeration(Event, desc="Cache events") {
98 // L1 events
99 Load, desc="Load request from the home processor";
100 Ifetch, desc="I-fetch request from the home processor";
101 Store, desc="Store request from the home processor";
102
103 Inv, desc="Invalidate request from L2 bank";
104
105 // internal generated request
106 L1_Replacement, desc="L1 Replacement", format="!r";
107
108 // other requests
109 Fwd_GETX, desc="GETX from other processor";
110 Fwd_GETS, desc="GETS from other processor";
111 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
112
113 Data, desc="Data for processor";
114 Data_Exclusive, desc="Data for processor";
115 DataS_fromL1, desc="data for GETS request, need to unblock directory";
116 Data_all_Acks, desc="Data for processor, all acks";
117
118 Ack, desc="Ack for processor";
119 Ack_all, desc="Last ack for processor";
120
121 WB_Ack, desc="Ack for replacement";
122
123 PF_Load, desc="load request from prefetcher";
124 PF_Ifetch, desc="instruction fetch request from prefetcher";
125 PF_Store, desc="exclusive load request from prefetcher";
126 }
127
128 // TYPES
129
130 // CacheEntry
131 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
132 State CacheState, desc="cache state";
133 DataBlock DataBlk, desc="data for the block";
134 bool Dirty, default="false", desc="data is dirty";
135 bool isPrefetch, desc="Set if this block was prefetched";
136 }
137
138 // TBE fields
139 structure(TBE, desc="...") {
140 Addr addr, desc="Physical address for this TBE";
141 State TBEState, desc="Transient state";
142 DataBlock DataBlk, desc="Buffer for the data block";
143 bool Dirty, default="false", desc="data is dirty";
144 bool isPrefetch, desc="Set if this was caused by a prefetch";
145 int pendingAcks, default="0", desc="number of pending acks";
146 }
147
148 structure(TBETable, external="yes") {
149 TBE lookup(Addr);
150 void allocate(Addr);
151 void deallocate(Addr);
152 bool isPresent(Addr);
153 }
154
155 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
156
157 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
158
159 Tick clockEdge();
160 Cycles ticksToCycles(Tick t);
161 void set_cache_entry(AbstractCacheEntry a);
162 void unset_cache_entry();
163 void set_tbe(TBE a);
164 void unset_tbe();
165 void wakeUpBuffers(Addr a);
166 void profileMsgDelay(int virtualNetworkType, Cycles c);
167
168 // inclusive cache returns L1 entries only
169 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
170 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
171 if(is_valid(L1Dcache_entry)) {
172 return L1Dcache_entry;
173 }
174
175 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
176 return L1Icache_entry;
177 }
178
179 Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
180 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
181 return L1Dcache_entry;
182 }
183
184 Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
185 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
186 return L1Icache_entry;
187 }
188
189 State getState(TBE tbe, Entry cache_entry, Addr addr) {
190 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
191
192 if(is_valid(tbe)) {
193 return tbe.TBEState;
194 } else if (is_valid(cache_entry)) {
195 return cache_entry.CacheState;
196 }
197 return State:NP;
198 }
199
200 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
201 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
202
203 // MUST CHANGE
204 if(is_valid(tbe)) {
205 tbe.TBEState := state;
206 }
207
208 if (is_valid(cache_entry)) {
209 cache_entry.CacheState := state;
210 }
211 }
212
213 AccessPermission getAccessPermission(Addr addr) {
214 TBE tbe := TBEs[addr];
215 if(is_valid(tbe)) {
216 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
217 return L1Cache_State_to_permission(tbe.TBEState);
218 }
219
220 Entry cache_entry := getCacheEntry(addr);
221 if(is_valid(cache_entry)) {
222 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
223 return L1Cache_State_to_permission(cache_entry.CacheState);
224 }
225
226 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
227 return AccessPermission:NotPresent;
228 }
229
230 void functionalRead(Addr addr, Packet *pkt) {
231 TBE tbe := TBEs[addr];
232 if(is_valid(tbe)) {
233 testAndRead(addr, tbe.DataBlk, pkt);
234 } else {
235 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
236 }
237 }
238
239 int functionalWrite(Addr addr, Packet *pkt) {
240 int num_functional_writes := 0;
241
242 TBE tbe := TBEs[addr];
243 if(is_valid(tbe)) {
244 num_functional_writes := num_functional_writes +
245 testAndWrite(addr, tbe.DataBlk, pkt);
246 return num_functional_writes;
247 }
248
249 num_functional_writes := num_functional_writes +
250 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
251 return num_functional_writes;
252 }
253
254 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
255 if (is_valid(cache_entry)) {
256 cache_entry.changePermission(L1Cache_State_to_permission(state));
257 }
258 }
259
260 Event mandatory_request_type_to_event(RubyRequestType type) {
261 if (type == RubyRequestType:LD) {
262 return Event:Load;
263 } else if (type == RubyRequestType:IFETCH) {
264 return Event:Ifetch;
265 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
266 return Event:Store;
267 } else {
268 error("Invalid RubyRequestType");
269 }
270 }
271
272 Event prefetch_request_type_to_event(RubyRequestType type) {
273 if (type == RubyRequestType:LD) {
274 return Event:PF_Load;
275 } else if (type == RubyRequestType:IFETCH) {
276 return Event:PF_Ifetch;
277 } else if ((type == RubyRequestType:ST) ||
278 (type == RubyRequestType:ATOMIC)) {
279 return Event:PF_Store;
280 } else {
281 error("Invalid RubyRequestType");
282 }
283 }
284
285 int getPendingAcks(TBE tbe) {
286 return tbe.pendingAcks;
287 }
288
289 out_port(requestL1Network_out, RequestMsg, requestFromL1Cache);
290 out_port(responseL1Network_out, ResponseMsg, responseFromL1Cache);
291 out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
292 out_port(optionalQueue_out, RubyRequest, optionalQueue);
293
294
295 // Prefetch queue between the controller and the prefetcher
296 // As per Spracklen et al. (HPCA 2005), the prefetch queue should be
297 // implemented as a LIFO structure. The structure would allow for fast
298 // searches of all entries in the queue, not just the head msg. All
299 // msgs in the structure can be invalidated if a demand miss matches.
300 in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
301 if (optionalQueue_in.isReady(clockEdge())) {
302 peek(optionalQueue_in, RubyRequest) {
303 // Instruction Prefetch
304 if (in_msg.Type == RubyRequestType:IFETCH) {
305 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
306 if (is_valid(L1Icache_entry)) {
307 // The block to be prefetched is already present in the
308 // cache. We should drop this request.
309 trigger(prefetch_request_type_to_event(in_msg.Type),
310 in_msg.LineAddress,
311 L1Icache_entry, TBEs[in_msg.LineAddress]);
312 }
313
314 // Check to see if it is in the OTHER L1
315 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
316 if (is_valid(L1Dcache_entry)) {
317 // The block is in the wrong L1 cache. We should drop
318 // this request.
319 trigger(prefetch_request_type_to_event(in_msg.Type),
320 in_msg.LineAddress,
321 L1Dcache_entry, TBEs[in_msg.LineAddress]);
322 }
323
324 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
325 // L1 does't have the line, but we have space for it
326 // in the L1 so let's see if the L2 has it
327 trigger(prefetch_request_type_to_event(in_msg.Type),
328 in_msg.LineAddress,
329 L1Icache_entry, TBEs[in_msg.LineAddress]);
330 } else {
331 // No room in the L1, so we need to make room in the L1
332 trigger(Event:L1_Replacement,
333 L1Icache.cacheProbe(in_msg.LineAddress),
334 getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
335 TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
336 }
337 } else {
338 // Data prefetch
339 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
340 if (is_valid(L1Dcache_entry)) {
341 // The block to be prefetched is already present in the
342 // cache. We should drop this request.
343 trigger(prefetch_request_type_to_event(in_msg.Type),
344 in_msg.LineAddress,
345 L1Dcache_entry, TBEs[in_msg.LineAddress]);
346 }
347
348 // Check to see if it is in the OTHER L1
349 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
350 if (is_valid(L1Icache_entry)) {
351 // The block is in the wrong L1. Just drop the prefetch
352 // request.
353 trigger(prefetch_request_type_to_event(in_msg.Type),
354 in_msg.LineAddress,
355 L1Icache_entry, TBEs[in_msg.LineAddress]);
356 }
357
358 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
359 // L1 does't have the line, but we have space for it in
360 // the L1 let's see if the L2 has it
361 trigger(prefetch_request_type_to_event(in_msg.Type),
362 in_msg.LineAddress,
363 L1Dcache_entry, TBEs[in_msg.LineAddress]);
364 } else {
365 // No room in the L1, so we need to make room in the L1
366 trigger(Event:L1_Replacement,
367 L1Dcache.cacheProbe(in_msg.LineAddress),
368 getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
369 TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
370 }
371 }
372 }
373 }
374 }
375
376 // Response L1 Network - response msg to this L1 cache
377 in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
378 if (responseL1Network_in.isReady(clockEdge())) {
379 peek(responseL1Network_in, ResponseMsg, block_on="addr") {
380 assert(in_msg.Destination.isElement(machineID));
381
382 Entry cache_entry := getCacheEntry(in_msg.addr);
383 TBE tbe := TBEs[in_msg.addr];
384
385 if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
386 trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
387 } else if(in_msg.Type == CoherenceResponseType:DATA) {
388 if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
389 getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
390 getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
391 getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
392 machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
393
394 trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
395
396 } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
397 trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
398 } else {
399 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
400 }
401 } else if (in_msg.Type == CoherenceResponseType:ACK) {
402 if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
403 trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
404 } else {
405 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
406 }
407 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
408 trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
409 } else {
410 error("Invalid L1 response type");
411 }
412 }
413 }
414 }
415
416 // Request InterChip network - request from this L1 cache to the shared L2
417 in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
418 if(requestL1Network_in.isReady(clockEdge())) {
419 peek(requestL1Network_in, RequestMsg, block_on="addr") {
420 assert(in_msg.Destination.isElement(machineID));
421
422 Entry cache_entry := getCacheEntry(in_msg.addr);
423 TBE tbe := TBEs[in_msg.addr];
424
425 if (in_msg.Type == CoherenceRequestType:INV) {
426 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
427 } else if (in_msg.Type == CoherenceRequestType:GETX ||
428 in_msg.Type == CoherenceRequestType:UPGRADE) {
429 // upgrade transforms to GETX due to race
430 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
431 } else if (in_msg.Type == CoherenceRequestType:GETS) {
432 trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
433 } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
434 trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
435 } else {
436 error("Invalid forwarded request type");
437 }
438 }
439 }
440 }
441
442 // Mandatory Queue betweens Node's CPU and it's L1 caches
443 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
444 if (mandatoryQueue_in.isReady(clockEdge())) {
445 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
446
447 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
448
449 if (in_msg.Type == RubyRequestType:IFETCH) {
450 // ** INSTRUCTION ACCESS ***
451
452 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
453 if (is_valid(L1Icache_entry)) {
454 // The tag matches for the L1, so the L1 asks the L2 for it.
455 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
456 L1Icache_entry, TBEs[in_msg.LineAddress]);
457 } else {
458
459 // Check to see if it is in the OTHER L1
460 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
461 if (is_valid(L1Dcache_entry)) {
462 // The block is in the wrong L1, put the request on the queue to the shared L2
463 trigger(Event:L1_Replacement, in_msg.LineAddress,
464 L1Dcache_entry, TBEs[in_msg.LineAddress]);
465 }
466
467 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
468 // L1 does't have the line, but we have space for it
469 // in the L1 so let's see if the L2 has it.
470 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
471 L1Icache_entry, TBEs[in_msg.LineAddress]);
472 } else {
473 // No room in the L1, so we need to make room in the L1
474 trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
475 getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
476 TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
477 }
478 }
479 } else {
480
481 // *** DATA ACCESS ***
482 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
483 if (is_valid(L1Dcache_entry)) {
484 // The tag matches for the L1, so the L1 ask the L2 for it
485 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
486 L1Dcache_entry, TBEs[in_msg.LineAddress]);
487 } else {
488
489 // Check to see if it is in the OTHER L1
490 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
491 if (is_valid(L1Icache_entry)) {
492 // The block is in the wrong L1, put the request on the queue to the shared L2
493 trigger(Event:L1_Replacement, in_msg.LineAddress,
494 L1Icache_entry, TBEs[in_msg.LineAddress]);
495 }
496
497 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
498 // L1 does't have the line, but we have space for it
499 // in the L1 let's see if the L2 has it.
500 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
501 L1Dcache_entry, TBEs[in_msg.LineAddress]);
502 } else {
503 // No room in the L1, so we need to make room in the L1
504 trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
505 getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
506 TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
507 }
508 }
509 }
510 }
511 }
512 }
513
514 void enqueuePrefetch(Addr address, RubyRequestType type) {
515 enqueue(optionalQueue_out, RubyRequest, 1) {
516 out_msg.LineAddress := address;
517 out_msg.Type := type;
518 out_msg.AccessMode := RubyAccessMode:Supervisor;
519 }
520 }
521
522 // ACTIONS
523 action(a_issueGETS, "a", desc="Issue GETS") {
524 peek(mandatoryQueue_in, RubyRequest) {
525 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
526 out_msg.addr := address;
527 out_msg.Type := CoherenceRequestType:GETS;
528 out_msg.Requestor := machineID;
529 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
530 l2_select_low_bit, l2_select_num_bits, intToID(0)));
531 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
532 address, out_msg.Destination);
533 out_msg.MessageSize := MessageSizeType:Control;
534 out_msg.Prefetch := in_msg.Prefetch;
535 out_msg.AccessMode := in_msg.AccessMode;
536 }
537 }
538 }
539
540 action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
541 peek(optionalQueue_in, RubyRequest) {
542 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
543 out_msg.addr := address;
544 out_msg.Type := CoherenceRequestType:GETS;
545 out_msg.Requestor := machineID;
546 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
547 l2_select_low_bit, l2_select_num_bits, intToID(0)));
548 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
549 address, out_msg.Destination);
550 out_msg.MessageSize := MessageSizeType:Control;
551 out_msg.Prefetch := in_msg.Prefetch;
552 out_msg.AccessMode := in_msg.AccessMode;
553 }
554 }
555 }
556
557 action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
558 peek(mandatoryQueue_in, RubyRequest) {
559 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
560 out_msg.addr := address;
561 out_msg.Type := CoherenceRequestType:GET_INSTR;
562 out_msg.Requestor := machineID;
563 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
564 l2_select_low_bit, l2_select_num_bits, intToID(0)));
565 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
566 address, out_msg.Destination);
567 out_msg.MessageSize := MessageSizeType:Control;
568 out_msg.Prefetch := in_msg.Prefetch;
569 out_msg.AccessMode := in_msg.AccessMode;
570 }
571 }
572 }
573
574 action(pai_issuePfGETINSTR, "pai",
575 desc="Issue GETINSTR for prefetch request") {
576 peek(optionalQueue_in, RubyRequest) {
577 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
578 out_msg.addr := address;
579 out_msg.Type := CoherenceRequestType:GET_INSTR;
580 out_msg.Requestor := machineID;
581 out_msg.Destination.add(
582 mapAddressToRange(address, MachineType:L2Cache,
583 l2_select_low_bit, l2_select_num_bits, intToID(0)));
584 out_msg.MessageSize := MessageSizeType:Control;
585 out_msg.Prefetch := in_msg.Prefetch;
586 out_msg.AccessMode := in_msg.AccessMode;
587
588 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
589 address, out_msg.Destination);
590 }
591 }
592 }
593
594 action(b_issueGETX, "b", desc="Issue GETX") {
595 peek(mandatoryQueue_in, RubyRequest) {
596 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
597 out_msg.addr := address;
598 out_msg.Type := CoherenceRequestType:GETX;
599 out_msg.Requestor := machineID;
600 DPRINTF(RubySlicc, "%s\n", machineID);
601 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
602 l2_select_low_bit, l2_select_num_bits, intToID(0)));
603 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
604 address, out_msg.Destination);
605 out_msg.MessageSize := MessageSizeType:Control;
606 out_msg.Prefetch := in_msg.Prefetch;
607 out_msg.AccessMode := in_msg.AccessMode;
608 }
609 }
610 }
611
612 action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
613 peek(optionalQueue_in, RubyRequest) {
614 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
615 out_msg.addr := address;
616 out_msg.Type := CoherenceRequestType:GETX;
617 out_msg.Requestor := machineID;
618 DPRINTF(RubySlicc, "%s\n", machineID);
619
620 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
621 l2_select_low_bit, l2_select_num_bits, intToID(0)));
622
623 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
624 address, out_msg.Destination);
625 out_msg.MessageSize := MessageSizeType:Control;
626 out_msg.Prefetch := in_msg.Prefetch;
627 out_msg.AccessMode := in_msg.AccessMode;
628 }
629 }
630 }
631
632 action(c_issueUPGRADE, "c", desc="Issue GETX") {
633 peek(mandatoryQueue_in, RubyRequest) {
634 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
635 out_msg.addr := address;
636 out_msg.Type := CoherenceRequestType:UPGRADE;
637 out_msg.Requestor := machineID;
638 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
639 l2_select_low_bit, l2_select_num_bits, intToID(0)));
640 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
641 address, out_msg.Destination);
642 out_msg.MessageSize := MessageSizeType:Control;
643 out_msg.Prefetch := in_msg.Prefetch;
644 out_msg.AccessMode := in_msg.AccessMode;
645 }
646 }
647 }
648
649 action(d_sendDataToRequestor, "d", desc="send data to requestor") {
650 peek(requestL1Network_in, RequestMsg) {
651 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
652 assert(is_valid(cache_entry));
653 out_msg.addr := address;
654 out_msg.Type := CoherenceResponseType:DATA;
655 out_msg.DataBlk := cache_entry.DataBlk;
656 out_msg.Dirty := cache_entry.Dirty;
657 out_msg.Sender := machineID;
658 out_msg.Destination.add(in_msg.Requestor);
659 out_msg.MessageSize := MessageSizeType:Response_Data;
660 }
661 }
662 }
663
664 action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
665 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
666 assert(is_valid(cache_entry));
667 out_msg.addr := address;
668 out_msg.Type := CoherenceResponseType:DATA;
669 out_msg.DataBlk := cache_entry.DataBlk;
670 out_msg.Dirty := cache_entry.Dirty;
671 out_msg.Sender := machineID;
672 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
673 l2_select_low_bit, l2_select_num_bits, intToID(0)));
674 out_msg.MessageSize := MessageSizeType:Response_Data;
675 }
676 }
677
678 action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
679 peek(requestL1Network_in, RequestMsg) {
680 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
681 assert(is_valid(tbe));
682 out_msg.addr := address;
683 out_msg.Type := CoherenceResponseType:DATA;
684 out_msg.DataBlk := tbe.DataBlk;
685 out_msg.Dirty := tbe.Dirty;
686 out_msg.Sender := machineID;
687 out_msg.Destination.add(in_msg.Requestor);
688 out_msg.MessageSize := MessageSizeType:Response_Data;
689 }
690 }
691 }
692
693 action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
694 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
695 assert(is_valid(tbe));
696 out_msg.addr := address;
697 out_msg.Type := CoherenceResponseType:DATA;
698 out_msg.DataBlk := tbe.DataBlk;
699 out_msg.Dirty := tbe.Dirty;
700 out_msg.Sender := machineID;
701 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
702 l2_select_low_bit, l2_select_num_bits, intToID(0)));
703 out_msg.MessageSize := MessageSizeType:Response_Data;
704 }
705 }
706
707 action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
708 peek(requestL1Network_in, RequestMsg) {
709 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
710 out_msg.addr := address;
711 out_msg.Type := CoherenceResponseType:ACK;
712 out_msg.Sender := machineID;
713 out_msg.Destination.add(in_msg.Requestor);
714 out_msg.MessageSize := MessageSizeType:Response_Control;
715 }
716 }
717 }
718
719 action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
720 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
721 assert(is_valid(cache_entry));
722 out_msg.addr := address;
723 out_msg.Type := CoherenceResponseType:DATA;
724 out_msg.DataBlk := cache_entry.DataBlk;
725 out_msg.Dirty := cache_entry.Dirty;
726 out_msg.Sender := machineID;
727 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
728 l2_select_low_bit, l2_select_num_bits, intToID(0)));
729 out_msg.MessageSize := MessageSizeType:Writeback_Data;
730 }
731 }
732
733 action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
734 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
735 assert(is_valid(tbe));
736 out_msg.addr := address;
737 out_msg.Type := CoherenceResponseType:DATA;
738 out_msg.DataBlk := tbe.DataBlk;
739 out_msg.Dirty := tbe.Dirty;
740 out_msg.Sender := machineID;
741 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
742 l2_select_low_bit, l2_select_num_bits, intToID(0)));
743 out_msg.MessageSize := MessageSizeType:Writeback_Data;
744 }
745 }
746
747 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
748 peek(requestL1Network_in, RequestMsg) {
749 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
750 out_msg.addr := address;
751 out_msg.Type := CoherenceResponseType:ACK;
752 out_msg.Sender := machineID;
753 out_msg.Destination.add(in_msg.Requestor);
754 out_msg.MessageSize := MessageSizeType:Response_Control;
755 out_msg.AckCount := 1;
756 }
757 }
758 }
759
760 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
761 if (send_evictions) {
762 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
763 sequencer.evictionCallback(address);
764 }
765 }
766
767 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
768 enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
769 assert(is_valid(cache_entry));
770 out_msg.addr := address;
771 out_msg.Type := CoherenceRequestType:PUTX;
772 out_msg.DataBlk := cache_entry.DataBlk;
773 out_msg.Dirty := cache_entry.Dirty;
774 out_msg.Requestor:= machineID;
775 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
776 l2_select_low_bit, l2_select_num_bits, intToID(0)));
777 if (cache_entry.Dirty) {
778 out_msg.MessageSize := MessageSizeType:Writeback_Data;
779 } else {
780 out_msg.MessageSize := MessageSizeType:Writeback_Control;
781 }
782 }
783 }
784
785 action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
786 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
787 out_msg.addr := address;
788 out_msg.Type := CoherenceResponseType:UNBLOCK;
789 out_msg.Sender := machineID;
790 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
791 l2_select_low_bit, l2_select_num_bits, intToID(0)));
792 out_msg.MessageSize := MessageSizeType:Response_Control;
793 DPRINTF(RubySlicc, "%#x\n", address);
794 }
795 }
796
797 action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
798 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
799 out_msg.addr := address;
800 out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
801 out_msg.Sender := machineID;
802 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
803 l2_select_low_bit, l2_select_num_bits, intToID(0)));
804 out_msg.MessageSize := MessageSizeType:Response_Control;
805 DPRINTF(RubySlicc, "%#x\n", address);
806
807 }
808 }
809
810 action(dg_invalidate_sc, "dg",
811 desc="Invalidate store conditional as the cache lost permissions") {
812 sequencer.invalidateSC(address);
813 }
814
815 action(h_load_hit, "hd",
816 desc="Notify sequencer the load completed.")
817 {
818 assert(is_valid(cache_entry));
819 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
820 L1Dcache.setMRU(cache_entry);
821 sequencer.readCallback(address, cache_entry.DataBlk);
822 }
823
824 action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
825 {
826 assert(is_valid(cache_entry));
827 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
828 L1Icache.setMRU(cache_entry);
829 sequencer.readCallback(address, cache_entry.DataBlk);
830 }
831
832 action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
833 {
834 assert(is_valid(cache_entry));
835 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
836 L1Icache.setMRU(address);
837 L1Dcache.setMRU(address);
838 sequencer.readCallback(address, cache_entry.DataBlk, true);
839 }
840
841 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
842 {
843 assert(is_valid(cache_entry));
844 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
845 L1Dcache.setMRU(cache_entry);
846 sequencer.writeCallback(address, cache_entry.DataBlk);
847 cache_entry.Dirty := true;
848 }
849
850 action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
851 {
852 assert(is_valid(cache_entry));
853 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
854 L1Icache.setMRU(address);
855 L1Dcache.setMRU(address);
856 sequencer.writeCallback(address, cache_entry.DataBlk, true);
857 cache_entry.Dirty := true;
858 }
859
860 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
861 check_allocate(TBEs);
862 assert(is_valid(cache_entry));
863 TBEs.allocate(address);
864 set_tbe(TBEs[address]);
865 tbe.isPrefetch := false;
866 tbe.Dirty := cache_entry.Dirty;
867 tbe.DataBlk := cache_entry.DataBlk;
868 }
869
870 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
871 mandatoryQueue_in.dequeue(clockEdge());
872 }
873
874 action(l_popRequestQueue, "l",
875 desc="Pop incoming request queue and profile the delay within this virtual network") {
876 Tick delay := requestL1Network_in.dequeue(clockEdge());
877 profileMsgDelay(2, ticksToCycles(delay));
878 }
879
880 action(o_popIncomingResponseQueue, "o",
881 desc="Pop Incoming Response queue and profile the delay within this virtual network") {
882 Tick delay := responseL1Network_in.dequeue(clockEdge());
883 profileMsgDelay(1, ticksToCycles(delay));
884 }
885
886 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
887 TBEs.deallocate(address);
888 unset_tbe();
889 }
890
891 action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
892 peek(responseL1Network_in, ResponseMsg) {
893 assert(is_valid(cache_entry));
894 cache_entry.DataBlk := in_msg.DataBlk;
895 cache_entry.Dirty := in_msg.Dirty;
896 }
897 }
898
899 action(q_updateAckCount, "q", desc="Update ack count") {
900 peek(responseL1Network_in, ResponseMsg) {
901 assert(is_valid(tbe));
902 tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
903 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
904 APPEND_TRANSITION_COMMENT(" p: ");
905 APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
906 }
907 }
908
909 action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
910 if (L1Dcache.isTagPresent(address)) {
911 L1Dcache.deallocate(address);
912 } else {
913 L1Icache.deallocate(address);
914 }
915 unset_cache_entry();
916 }
917
918 action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
919 if (is_invalid(cache_entry)) {
920 set_cache_entry(L1Dcache.allocate(address, new Entry));
921 }
922 }
923
924 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
925 if (is_invalid(cache_entry)) {
926 set_cache_entry(L1Icache.allocate(address, new Entry));
927 }
928 }
929
930 action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle L1 request queue") {
931 stall_and_wait(mandatoryQueue_in, address);
932 }
933
934 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
935 wakeUpBuffers(address);
936 }
937
938 action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
939 ++L1Icache.demand_misses;
940 }
941
942 action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
943 ++L1Icache.demand_hits;
944 }
945
946 action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
947 ++L1Dcache.demand_misses;
948 }
949
950 action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
951 ++L1Dcache.demand_hits;
952 }
953
954 action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
955 peek(mandatoryQueue_in, RubyRequest) {
956 if (enable_prefetch) {
957 prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
958 }
959 }
960 }
961
962 action(ppm_observePfMiss, "\ppm",
963 desc="Inform the prefetcher about the partial miss") {
964 peek(mandatoryQueue_in, RubyRequest) {
965 prefetcher.observePfMiss(in_msg.LineAddress);
966 }
967 }
968
969 action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
970 optionalQueue_in.dequeue(clockEdge());
971 }
972
973 action(mp_markPrefetched, "mp", desc="Write data from response queue to cache") {
974 assert(is_valid(cache_entry));
975 cache_entry.isPrefetch := true;
976 }
977
978
979 //*****************************************************
980 // TRANSITIONS
981 //*****************************************************
982
983 // Transitions for Load/Store/Replacement/WriteBack from transient states
984 transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
985 z_stallAndWaitMandatoryQueue;
986 }
987
988 transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
989 z_stallAndWaitMandatoryQueue;
990 }
991
992 transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
993 z_stallAndWaitMandatoryQueue;
994 }
995
996 // Transitions from Idle
997 transition({NP,I}, L1_Replacement) {
998 ff_deallocateL1CacheBlock;
999 }
1000
1001 transition({S,E,M,IS,IM,SM,IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
1002 {PF_Load, PF_Store, PF_Ifetch}) {
1003 pq_popPrefetchQueue;
1004 }
1005
1006 transition({NP,I}, Load, IS) {
1007 oo_allocateL1DCacheBlock;
1008 i_allocateTBE;
1009 a_issueGETS;
1010 uu_profileDataMiss;
1011 po_observeMiss;
1012 k_popMandatoryQueue;
1013 }
1014
1015 transition({NP,I}, PF_Load, PF_IS) {
1016 oo_allocateL1DCacheBlock;
1017 i_allocateTBE;
1018 pa_issuePfGETS;
1019 pq_popPrefetchQueue;
1020 }
1021
1022 transition(PF_IS, Load, IS) {
1023 uu_profileDataMiss;
1024 ppm_observePfMiss;
1025 k_popMandatoryQueue;
1026 }
1027
1028 transition(PF_IS_I, Load, IS_I) {
1029 uu_profileDataMiss;
1030 ppm_observePfMiss;
1031 k_popMandatoryQueue;
1032 }
1033
1034 transition({NP,I}, Ifetch, IS) {
1035 pp_allocateL1ICacheBlock;
1036 i_allocateTBE;
1037 ai_issueGETINSTR;
1038 uu_profileInstMiss;
1039 po_observeMiss;
1040 k_popMandatoryQueue;
1041 }
1042
1043 transition({NP,I}, PF_Ifetch, PF_IS) {
1044 pp_allocateL1ICacheBlock;
1045 i_allocateTBE;
1046 pai_issuePfGETINSTR;
1047 pq_popPrefetchQueue;
1048 }
1049
1050 // We proactively assume that the prefetch is in to
1051 // the instruction cache
1052 transition(PF_IS, Ifetch, IS) {
1053 uu_profileDataMiss;
1054 ppm_observePfMiss;
1055 k_popMandatoryQueue;
1056 }
1057
1058 transition({NP,I}, Store, IM) {
1059 oo_allocateL1DCacheBlock;
1060 i_allocateTBE;
1061 b_issueGETX;
1062 uu_profileDataMiss;
1063 po_observeMiss;
1064 k_popMandatoryQueue;
1065 }
1066
1067 transition({NP,I}, PF_Store, PF_IM) {
1068 oo_allocateL1DCacheBlock;
1069 i_allocateTBE;
1070 pb_issuePfGETX;
1071 pq_popPrefetchQueue;
1072 }
1073
1074 transition(PF_IM, Store, IM) {
1075 uu_profileDataMiss;
1076 ppm_observePfMiss;
1077 k_popMandatoryQueue;
1078 }
1079
1080 transition(PF_SM, Store, SM) {
1081 uu_profileDataMiss;
1082 ppm_observePfMiss;
1083 k_popMandatoryQueue;
1084 }
1085
1086 transition({NP, I}, Inv) {
1087 fi_sendInvAck;
1088 l_popRequestQueue;
1089 }
1090
1091 // Transitions from Shared
1092 transition({S,E,M}, Load) {
1093 h_load_hit;
1094 uu_profileDataHit;
1095 k_popMandatoryQueue;
1096 }
1097
1098 transition({S,E,M}, Ifetch) {
1099 h_ifetch_hit;
1100 uu_profileInstHit;
1101 k_popMandatoryQueue;
1102 }
1103
1104 transition(S, Store, SM) {
1105 i_allocateTBE;
1106 c_issueUPGRADE;
1107 uu_profileDataMiss;
1108 k_popMandatoryQueue;
1109 }
1110
1111 transition(S, L1_Replacement, I) {
1112 forward_eviction_to_cpu;
1113 ff_deallocateL1CacheBlock;
1114 }
1115
1116 transition(S, Inv, I) {
1117 forward_eviction_to_cpu;
1118 fi_sendInvAck;
1119 l_popRequestQueue;
1120 }
1121
1122 // Transitions from Exclusive
1123
1124 transition({E,M}, Store, M) {
1125 hh_store_hit;
1126 uu_profileDataHit;
1127 k_popMandatoryQueue;
1128 }
1129
1130 transition(E, L1_Replacement, M_I) {
1131 // silent E replacement??
1132 forward_eviction_to_cpu;
1133 i_allocateTBE;
1134 g_issuePUTX; // send data, but hold in case forwarded request
1135 ff_deallocateL1CacheBlock;
1136 }
1137
1138 transition(E, Inv, I) {
1139 // don't send data
1140 forward_eviction_to_cpu;
1141 fi_sendInvAck;
1142 l_popRequestQueue;
1143 }
1144
1145 transition(E, Fwd_GETX, I) {
1146 forward_eviction_to_cpu;
1147 d_sendDataToRequestor;
1148 l_popRequestQueue;
1149 }
1150
1151 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1152 d_sendDataToRequestor;
1153 d2_sendDataToL2;
1154 l_popRequestQueue;
1155 }
1156
1157 // Transitions from Modified
1158
1159 transition(M, L1_Replacement, M_I) {
1160 forward_eviction_to_cpu;
1161 i_allocateTBE;
1162 g_issuePUTX; // send data, but hold in case forwarded request
1163 ff_deallocateL1CacheBlock;
1164 }
1165
1166 transition(M_I, WB_Ack, I) {
1167 s_deallocateTBE;
1168 o_popIncomingResponseQueue;
1169 kd_wakeUpDependents;
1170 }
1171
1172 transition(M, Inv, I) {
1173 forward_eviction_to_cpu;
1174 f_sendDataToL2;
1175 l_popRequestQueue;
1176 }
1177
1178 transition(M_I, Inv, SINK_WB_ACK) {
1179 ft_sendDataToL2_fromTBE;
1180 l_popRequestQueue;
1181 }
1182
1183 transition(M, Fwd_GETX, I) {
1184 forward_eviction_to_cpu;
1185 d_sendDataToRequestor;
1186 l_popRequestQueue;
1187 }
1188
1189 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1190 d_sendDataToRequestor;
1191 d2_sendDataToL2;
1192 l_popRequestQueue;
1193 }
1194
1195 transition(M_I, Fwd_GETX, SINK_WB_ACK) {
1196 dt_sendDataToRequestor_fromTBE;
1197 l_popRequestQueue;
1198 }
1199
1200 transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
1201 dt_sendDataToRequestor_fromTBE;
1202 d2t_sendDataToL2_fromTBE;
1203 l_popRequestQueue;
1204 }
1205
1206 // Transitions from IS
1207 transition({IS, IS_I}, Inv, IS_I) {
1208 fi_sendInvAck;
1209 l_popRequestQueue;
1210 }
1211
1212 transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
1213 fi_sendInvAck;
1214 l_popRequestQueue;
1215 }
1216
1217 transition(IS, Data_all_Acks, S) {
1218 u_writeDataToL1Cache;
1219 hx_load_hit;
1220 s_deallocateTBE;
1221 o_popIncomingResponseQueue;
1222 kd_wakeUpDependents;
1223 }
1224
1225 transition(PF_IS, Data_all_Acks, S) {
1226 u_writeDataToL1Cache;
1227 s_deallocateTBE;
1228 mp_markPrefetched;
1229 o_popIncomingResponseQueue;
1230 kd_wakeUpDependents;
1231 }
1232
1233 transition(IS_I, Data_all_Acks, I) {
1234 u_writeDataToL1Cache;
1235 hx_load_hit;
1236 s_deallocateTBE;
1237 o_popIncomingResponseQueue;
1238 kd_wakeUpDependents;
1239 }
1240
1241 transition(PF_IS_I, Data_all_Acks, I) {
1242 s_deallocateTBE;
1243 o_popIncomingResponseQueue;
1244 kd_wakeUpDependents;
1245 }
1246
1247 transition(IS, DataS_fromL1, S) {
1248 u_writeDataToL1Cache;
1249 j_sendUnblock;
1250 hx_load_hit;
1251 s_deallocateTBE;
1252 o_popIncomingResponseQueue;
1253 kd_wakeUpDependents;
1254 }
1255
1256 transition(PF_IS, DataS_fromL1, S) {
1257 u_writeDataToL1Cache;
1258 j_sendUnblock;
1259 s_deallocateTBE;
1260 o_popIncomingResponseQueue;
1261 kd_wakeUpDependents;
1262 }
1263
1264 transition(IS_I, DataS_fromL1, I) {
1265 u_writeDataToL1Cache;
1266 j_sendUnblock;
1267 hx_load_hit;
1268 s_deallocateTBE;
1269 o_popIncomingResponseQueue;
1270 kd_wakeUpDependents;
1271 }
1272
1273 transition(PF_IS_I, DataS_fromL1, I) {
1274 j_sendUnblock;
1275 s_deallocateTBE;
1276 o_popIncomingResponseQueue;
1277 kd_wakeUpDependents;
1278 }
1279
1280 // directory is blocked when sending exclusive data
1281 transition(IS_I, Data_Exclusive, E) {
1282 u_writeDataToL1Cache;
1283 hx_load_hit;
1284 jj_sendExclusiveUnblock;
1285 s_deallocateTBE;
1286 o_popIncomingResponseQueue;
1287 kd_wakeUpDependents;
1288 }
1289
1290 // directory is blocked when sending exclusive data
1291 transition(PF_IS_I, Data_Exclusive, E) {
1292 u_writeDataToL1Cache;
1293 jj_sendExclusiveUnblock;
1294 s_deallocateTBE;
1295 o_popIncomingResponseQueue;
1296 kd_wakeUpDependents;
1297 }
1298
1299 transition(IS, Data_Exclusive, E) {
1300 u_writeDataToL1Cache;
1301 hx_load_hit;
1302 jj_sendExclusiveUnblock;
1303 s_deallocateTBE;
1304 o_popIncomingResponseQueue;
1305 kd_wakeUpDependents;
1306 }
1307
1308 transition(PF_IS, Data_Exclusive, E) {
1309 u_writeDataToL1Cache;
1310 jj_sendExclusiveUnblock;
1311 s_deallocateTBE;
1312 mp_markPrefetched;
1313 o_popIncomingResponseQueue;
1314 kd_wakeUpDependents;
1315 }
1316
1317 // Transitions from IM
1318 transition(IM, Inv, IM) {
1319 fi_sendInvAck;
1320 l_popRequestQueue;
1321 }
1322
1323 transition({PF_IM, PF_SM}, Inv, PF_IM) {
1324 fi_sendInvAck;
1325 l_popRequestQueue;
1326 }
1327
1328 transition(IM, Data, SM) {
1329 u_writeDataToL1Cache;
1330 q_updateAckCount;
1331 o_popIncomingResponseQueue;
1332 }
1333
1334 transition(PF_IM, Data, PF_SM) {
1335 u_writeDataToL1Cache;
1336 q_updateAckCount;
1337 o_popIncomingResponseQueue;
1338 }
1339
1340 transition(IM, Data_all_Acks, M) {
1341 u_writeDataToL1Cache;
1342 hhx_store_hit;
1343 jj_sendExclusiveUnblock;
1344 s_deallocateTBE;
1345 o_popIncomingResponseQueue;
1346 kd_wakeUpDependents;
1347 }
1348
1349 transition(PF_IM, Data_all_Acks, M) {
1350 u_writeDataToL1Cache;
1351 jj_sendExclusiveUnblock;
1352 s_deallocateTBE;
1353 mp_markPrefetched;
1354 o_popIncomingResponseQueue;
1355 kd_wakeUpDependents;
1356 }
1357
1358 // transitions from SM
1359 transition(SM, Inv, IM) {
1360 forward_eviction_to_cpu;
1361 fi_sendInvAck;
1362 dg_invalidate_sc;
1363 l_popRequestQueue;
1364 }
1365
1366 transition({SM, IM, PF_SM, PF_IM}, Ack) {
1367 q_updateAckCount;
1368 o_popIncomingResponseQueue;
1369 }
1370
1371 transition(SM, Ack_all, M) {
1372 jj_sendExclusiveUnblock;
1373 hhx_store_hit;
1374 s_deallocateTBE;
1375 o_popIncomingResponseQueue;
1376 kd_wakeUpDependents;
1377 }
1378
1379 transition(PF_SM, Ack_all, M) {
1380 jj_sendExclusiveUnblock;
1381 s_deallocateTBE;
1382 mp_markPrefetched;
1383 o_popIncomingResponseQueue;
1384 kd_wakeUpDependents;
1385 }
1386
1387 transition(SINK_WB_ACK, Inv){
1388 fi_sendInvAck;
1389 l_popRequestQueue;
1390 }
1391
1392 transition(SINK_WB_ACK, WB_Ack, I){
1393 s_deallocateTBE;
1394 o_popIncomingResponseQueue;
1395 kd_wakeUpDependents;
1396 }
1397 }