mem-cache: Add match functions to QueueEntry
[gem5.git] / src / mem / protocol / MESI_Two_Level-L1cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
30 : Sequencer * sequencer;
31 CacheMemory * L1Icache;
32 CacheMemory * L1Dcache;
33 Prefetcher * prefetcher;
34 int l2_select_num_bits;
35 Cycles l1_request_latency := 2;
36 Cycles l1_response_latency := 2;
37 Cycles to_l2_latency := 1;
38 bool send_evictions;
39 bool enable_prefetch := "False";
40
41 // Message Queues
42 // From this node's L1 cache TO the network
43
44 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
45 MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
46 vnet_type="request";
47
48 // a local L1 -> this L2 bank
49 MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
50 vnet_type="response";
51
52 MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
53 vnet_type="unblock";
54
55
56 // To this node's L1 cache FROM the network
57 // a L2 bank -> this L1
58 MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
59 vnet_type="request";
60
61 // a L2 bank -> this L1
62 MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
63 vnet_type="response";
64
65 // Request Buffer for prefetches
66 MessageBuffer * optionalQueue;
67
68 // Buffer for requests generated by the processor core.
69 MessageBuffer * mandatoryQueue;
70 {
71 // STATES
72 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
73 // Base states
74 NP, AccessPermission:Invalid, desc="Not present in either cache";
75 I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
76 S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
77 E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
78 M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
79
80 // Transient States
81 IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
82 IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
83 SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
84 IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
85
86 M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
87 SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
88
89 // Transient States in which block is being prefetched
90 PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
91 PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
92 PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
93 PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
94 }
95
96 // EVENTS
97 enumeration(Event, desc="Cache events") {
98 // L1 events
99 Load, desc="Load request from the home processor";
100 Ifetch, desc="I-fetch request from the home processor";
101 Store, desc="Store request from the home processor";
102
103 Inv, desc="Invalidate request from L2 bank";
104
105 // internal generated request
106 L1_Replacement, desc="L1 Replacement", format="!r";
107 PF_L1_Replacement, desc="Prefetch L1 Replacement", format="!pr";
108
109 // other requests
110 Fwd_GETX, desc="GETX from other processor";
111 Fwd_GETS, desc="GETS from other processor";
112 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
113
114 Data, desc="Data for processor";
115 Data_Exclusive, desc="Data for processor";
116 DataS_fromL1, desc="data for GETS request, need to unblock directory";
117 Data_all_Acks, desc="Data for processor, all acks";
118
119 Ack, desc="Ack for processor";
120 Ack_all, desc="Last ack for processor";
121
122 WB_Ack, desc="Ack for replacement";
123
124 PF_Load, desc="load request from prefetcher";
125 PF_Ifetch, desc="instruction fetch request from prefetcher";
126 PF_Store, desc="exclusive load request from prefetcher";
127 }
128
129 // TYPES
130
131 // CacheEntry
132 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
133 State CacheState, desc="cache state";
134 DataBlock DataBlk, desc="data for the block";
135 bool Dirty, default="false", desc="data is dirty";
136 bool isPrefetch, desc="Set if this block was prefetched and not yet accessed";
137 }
138
139 // TBE fields
140 structure(TBE, desc="...") {
141 Addr addr, desc="Physical address for this TBE";
142 State TBEState, desc="Transient state";
143 DataBlock DataBlk, desc="Buffer for the data block";
144 bool Dirty, default="false", desc="data is dirty";
145 bool isPrefetch, desc="Set if this was caused by a prefetch";
146 int pendingAcks, default="0", desc="number of pending acks";
147 }
148
149 structure(TBETable, external="yes") {
150 TBE lookup(Addr);
151 void allocate(Addr);
152 void deallocate(Addr);
153 bool isPresent(Addr);
154 }
155
156 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
157
158 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
159
160 Tick clockEdge();
161 Cycles ticksToCycles(Tick t);
162 void set_cache_entry(AbstractCacheEntry a);
163 void unset_cache_entry();
164 void set_tbe(TBE a);
165 void unset_tbe();
166 void wakeUpBuffers(Addr a);
167 void profileMsgDelay(int virtualNetworkType, Cycles c);
168
169 // inclusive cache returns L1 entries only
170 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
171 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
172 if(is_valid(L1Dcache_entry)) {
173 return L1Dcache_entry;
174 }
175
176 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
177 return L1Icache_entry;
178 }
179
180 Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
181 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
182 return L1Dcache_entry;
183 }
184
185 Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
186 Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
187 return L1Icache_entry;
188 }
189
190 State getState(TBE tbe, Entry cache_entry, Addr addr) {
191 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
192
193 if(is_valid(tbe)) {
194 return tbe.TBEState;
195 } else if (is_valid(cache_entry)) {
196 return cache_entry.CacheState;
197 }
198 return State:NP;
199 }
200
201 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
202 assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
203
204 // MUST CHANGE
205 if(is_valid(tbe)) {
206 tbe.TBEState := state;
207 }
208
209 if (is_valid(cache_entry)) {
210 cache_entry.CacheState := state;
211 }
212 }
213
214 AccessPermission getAccessPermission(Addr addr) {
215 TBE tbe := TBEs[addr];
216 if(is_valid(tbe)) {
217 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
218 return L1Cache_State_to_permission(tbe.TBEState);
219 }
220
221 Entry cache_entry := getCacheEntry(addr);
222 if(is_valid(cache_entry)) {
223 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
224 return L1Cache_State_to_permission(cache_entry.CacheState);
225 }
226
227 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
228 return AccessPermission:NotPresent;
229 }
230
231 void functionalRead(Addr addr, Packet *pkt) {
232 TBE tbe := TBEs[addr];
233 if(is_valid(tbe)) {
234 testAndRead(addr, tbe.DataBlk, pkt);
235 } else {
236 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
237 }
238 }
239
240 int functionalWrite(Addr addr, Packet *pkt) {
241 int num_functional_writes := 0;
242
243 TBE tbe := TBEs[addr];
244 if(is_valid(tbe)) {
245 num_functional_writes := num_functional_writes +
246 testAndWrite(addr, tbe.DataBlk, pkt);
247 return num_functional_writes;
248 }
249
250 num_functional_writes := num_functional_writes +
251 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
252 return num_functional_writes;
253 }
254
255 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
256 if (is_valid(cache_entry)) {
257 cache_entry.changePermission(L1Cache_State_to_permission(state));
258 }
259 }
260
261 Event mandatory_request_type_to_event(RubyRequestType type) {
262 if (type == RubyRequestType:LD) {
263 return Event:Load;
264 } else if (type == RubyRequestType:IFETCH) {
265 return Event:Ifetch;
266 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
267 return Event:Store;
268 } else {
269 error("Invalid RubyRequestType");
270 }
271 }
272
273 Event prefetch_request_type_to_event(RubyRequestType type) {
274 if (type == RubyRequestType:LD) {
275 return Event:PF_Load;
276 } else if (type == RubyRequestType:IFETCH) {
277 return Event:PF_Ifetch;
278 } else if ((type == RubyRequestType:ST) ||
279 (type == RubyRequestType:ATOMIC)) {
280 return Event:PF_Store;
281 } else {
282 error("Invalid RubyRequestType");
283 }
284 }
285
286 int getPendingAcks(TBE tbe) {
287 return tbe.pendingAcks;
288 }
289
290 out_port(requestL1Network_out, RequestMsg, requestFromL1Cache);
291 out_port(responseL1Network_out, ResponseMsg, responseFromL1Cache);
292 out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
293 out_port(optionalQueue_out, RubyRequest, optionalQueue);
294
295
296 // Prefetch queue between the controller and the prefetcher
297 // As per Spracklen et al. (HPCA 2005), the prefetch queue should be
298 // implemented as a LIFO structure. The structure would allow for fast
299 // searches of all entries in the queue, not just the head msg. All
300 // msgs in the structure can be invalidated if a demand miss matches.
301 in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
302 if (optionalQueue_in.isReady(clockEdge())) {
303 peek(optionalQueue_in, RubyRequest) {
304 // Instruction Prefetch
305 if (in_msg.Type == RubyRequestType:IFETCH) {
306 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
307 if (is_valid(L1Icache_entry)) {
308 // The block to be prefetched is already present in the
309 // cache. We should drop this request.
310 trigger(prefetch_request_type_to_event(in_msg.Type),
311 in_msg.LineAddress,
312 L1Icache_entry, TBEs[in_msg.LineAddress]);
313 }
314
315 // Check to see if it is in the OTHER L1
316 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
317 if (is_valid(L1Dcache_entry)) {
318 // The block is in the wrong L1 cache. We should drop
319 // this request.
320 trigger(prefetch_request_type_to_event(in_msg.Type),
321 in_msg.LineAddress,
322 L1Dcache_entry, TBEs[in_msg.LineAddress]);
323 }
324
325 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
326 // L1 does't have the line, but we have space for it
327 // in the L1 so let's see if the L2 has it
328 trigger(prefetch_request_type_to_event(in_msg.Type),
329 in_msg.LineAddress,
330 L1Icache_entry, TBEs[in_msg.LineAddress]);
331 } else {
332 // No room in the L1, so we need to make room in the L1
333 trigger(Event:PF_L1_Replacement,
334 L1Icache.cacheProbe(in_msg.LineAddress),
335 getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
336 TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
337 }
338 } else {
339 // Data prefetch
340 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
341 if (is_valid(L1Dcache_entry)) {
342 // The block to be prefetched is already present in the
343 // cache. We should drop this request.
344 trigger(prefetch_request_type_to_event(in_msg.Type),
345 in_msg.LineAddress,
346 L1Dcache_entry, TBEs[in_msg.LineAddress]);
347 }
348
349 // Check to see if it is in the OTHER L1
350 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
351 if (is_valid(L1Icache_entry)) {
352 // The block is in the wrong L1. Just drop the prefetch
353 // request.
354 trigger(prefetch_request_type_to_event(in_msg.Type),
355 in_msg.LineAddress,
356 L1Icache_entry, TBEs[in_msg.LineAddress]);
357 }
358
359 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
360 // L1 does't have the line, but we have space for it in
361 // the L1 let's see if the L2 has it
362 trigger(prefetch_request_type_to_event(in_msg.Type),
363 in_msg.LineAddress,
364 L1Dcache_entry, TBEs[in_msg.LineAddress]);
365 } else {
366 // No room in the L1, so we need to make room in the L1
367 trigger(Event:PF_L1_Replacement,
368 L1Dcache.cacheProbe(in_msg.LineAddress),
369 getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
370 TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
371 }
372 }
373 }
374 }
375 }
376
377 // Response L1 Network - response msg to this L1 cache
378 in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
379 if (responseL1Network_in.isReady(clockEdge())) {
380 peek(responseL1Network_in, ResponseMsg, block_on="addr") {
381 assert(in_msg.Destination.isElement(machineID));
382
383 Entry cache_entry := getCacheEntry(in_msg.addr);
384 TBE tbe := TBEs[in_msg.addr];
385
386 if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
387 trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
388 } else if(in_msg.Type == CoherenceResponseType:DATA) {
389 if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
390 getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
391 getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
392 getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
393 machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
394
395 trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
396
397 } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
398 trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
399 } else {
400 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
401 }
402 } else if (in_msg.Type == CoherenceResponseType:ACK) {
403 if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
404 trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
405 } else {
406 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
407 }
408 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
409 trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
410 } else {
411 error("Invalid L1 response type");
412 }
413 }
414 }
415 }
416
417 // Request InterChip network - request from this L1 cache to the shared L2
418 in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
419 if(requestL1Network_in.isReady(clockEdge())) {
420 peek(requestL1Network_in, RequestMsg, block_on="addr") {
421 assert(in_msg.Destination.isElement(machineID));
422
423 Entry cache_entry := getCacheEntry(in_msg.addr);
424 TBE tbe := TBEs[in_msg.addr];
425
426 if (in_msg.Type == CoherenceRequestType:INV) {
427 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
428 } else if (in_msg.Type == CoherenceRequestType:GETX ||
429 in_msg.Type == CoherenceRequestType:UPGRADE) {
430 // upgrade transforms to GETX due to race
431 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
432 } else if (in_msg.Type == CoherenceRequestType:GETS) {
433 trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
434 } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
435 trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
436 } else {
437 error("Invalid forwarded request type");
438 }
439 }
440 }
441 }
442
443 // Mandatory Queue betweens Node's CPU and it's L1 caches
444 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
445 if (mandatoryQueue_in.isReady(clockEdge())) {
446 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
447
448 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
449
450 if (in_msg.Type == RubyRequestType:IFETCH) {
451 // ** INSTRUCTION ACCESS ***
452
453 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
454 if (is_valid(L1Icache_entry)) {
455 // The tag matches for the L1, so the L1 asks the L2 for it.
456 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
457 L1Icache_entry, TBEs[in_msg.LineAddress]);
458 } else {
459
460 // Check to see if it is in the OTHER L1
461 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
462 if (is_valid(L1Dcache_entry)) {
463 // The block is in the wrong L1, put the request on the queue to the shared L2
464 trigger(Event:L1_Replacement, in_msg.LineAddress,
465 L1Dcache_entry, TBEs[in_msg.LineAddress]);
466 }
467
468 if (L1Icache.cacheAvail(in_msg.LineAddress)) {
469 // L1 does't have the line, but we have space for it
470 // in the L1 so let's see if the L2 has it.
471 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
472 L1Icache_entry, TBEs[in_msg.LineAddress]);
473 } else {
474 // No room in the L1, so we need to make room in the L1
475 trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
476 getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
477 TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
478 }
479 }
480 } else {
481
482 // *** DATA ACCESS ***
483 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
484 if (is_valid(L1Dcache_entry)) {
485 // The tag matches for the L1, so the L1 ask the L2 for it
486 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
487 L1Dcache_entry, TBEs[in_msg.LineAddress]);
488 } else {
489
490 // Check to see if it is in the OTHER L1
491 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
492 if (is_valid(L1Icache_entry)) {
493 // The block is in the wrong L1, put the request on the queue to the shared L2
494 trigger(Event:L1_Replacement, in_msg.LineAddress,
495 L1Icache_entry, TBEs[in_msg.LineAddress]);
496 }
497
498 if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
499 // L1 does't have the line, but we have space for it
500 // in the L1 let's see if the L2 has it.
501 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
502 L1Dcache_entry, TBEs[in_msg.LineAddress]);
503 } else {
504 // No room in the L1, so we need to make room in the L1
505 trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
506 getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
507 TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
508 }
509 }
510 }
511 }
512 }
513 }
514
515 void enqueuePrefetch(Addr address, RubyRequestType type) {
516 enqueue(optionalQueue_out, RubyRequest, 1) {
517 out_msg.LineAddress := address;
518 out_msg.Type := type;
519 out_msg.AccessMode := RubyAccessMode:Supervisor;
520 }
521 }
522
523 // ACTIONS
524 action(a_issueGETS, "a", desc="Issue GETS") {
525 peek(mandatoryQueue_in, RubyRequest) {
526 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
527 out_msg.addr := address;
528 out_msg.Type := CoherenceRequestType:GETS;
529 out_msg.Requestor := machineID;
530 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
531 l2_select_low_bit, l2_select_num_bits, intToID(0)));
532 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
533 address, out_msg.Destination);
534 out_msg.MessageSize := MessageSizeType:Control;
535 out_msg.Prefetch := in_msg.Prefetch;
536 out_msg.AccessMode := in_msg.AccessMode;
537 }
538 }
539 }
540
541 action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
542 peek(optionalQueue_in, RubyRequest) {
543 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
544 out_msg.addr := address;
545 out_msg.Type := CoherenceRequestType:GETS;
546 out_msg.Requestor := machineID;
547 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
548 l2_select_low_bit, l2_select_num_bits, intToID(0)));
549 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
550 address, out_msg.Destination);
551 out_msg.MessageSize := MessageSizeType:Control;
552 out_msg.Prefetch := in_msg.Prefetch;
553 out_msg.AccessMode := in_msg.AccessMode;
554 }
555 }
556 }
557
558 action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
559 peek(mandatoryQueue_in, RubyRequest) {
560 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
561 out_msg.addr := address;
562 out_msg.Type := CoherenceRequestType:GET_INSTR;
563 out_msg.Requestor := machineID;
564 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
565 l2_select_low_bit, l2_select_num_bits, intToID(0)));
566 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
567 address, out_msg.Destination);
568 out_msg.MessageSize := MessageSizeType:Control;
569 out_msg.Prefetch := in_msg.Prefetch;
570 out_msg.AccessMode := in_msg.AccessMode;
571 }
572 }
573 }
574
575 action(pai_issuePfGETINSTR, "pai",
576 desc="Issue GETINSTR for prefetch request") {
577 peek(optionalQueue_in, RubyRequest) {
578 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
579 out_msg.addr := address;
580 out_msg.Type := CoherenceRequestType:GET_INSTR;
581 out_msg.Requestor := machineID;
582 out_msg.Destination.add(
583 mapAddressToRange(address, MachineType:L2Cache,
584 l2_select_low_bit, l2_select_num_bits, intToID(0)));
585 out_msg.MessageSize := MessageSizeType:Control;
586 out_msg.Prefetch := in_msg.Prefetch;
587 out_msg.AccessMode := in_msg.AccessMode;
588
589 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
590 address, out_msg.Destination);
591 }
592 }
593 }
594
595 action(b_issueGETX, "b", desc="Issue GETX") {
596 peek(mandatoryQueue_in, RubyRequest) {
597 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
598 out_msg.addr := address;
599 out_msg.Type := CoherenceRequestType:GETX;
600 out_msg.Requestor := machineID;
601 DPRINTF(RubySlicc, "%s\n", machineID);
602 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
603 l2_select_low_bit, l2_select_num_bits, intToID(0)));
604 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
605 address, out_msg.Destination);
606 out_msg.MessageSize := MessageSizeType:Control;
607 out_msg.Prefetch := in_msg.Prefetch;
608 out_msg.AccessMode := in_msg.AccessMode;
609 }
610 }
611 }
612
613 action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
614 peek(optionalQueue_in, RubyRequest) {
615 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
616 out_msg.addr := address;
617 out_msg.Type := CoherenceRequestType:GETX;
618 out_msg.Requestor := machineID;
619 DPRINTF(RubySlicc, "%s\n", machineID);
620
621 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
622 l2_select_low_bit, l2_select_num_bits, intToID(0)));
623
624 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
625 address, out_msg.Destination);
626 out_msg.MessageSize := MessageSizeType:Control;
627 out_msg.Prefetch := in_msg.Prefetch;
628 out_msg.AccessMode := in_msg.AccessMode;
629 }
630 }
631 }
632
633 action(c_issueUPGRADE, "c", desc="Issue GETX") {
634 peek(mandatoryQueue_in, RubyRequest) {
635 enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
636 out_msg.addr := address;
637 out_msg.Type := CoherenceRequestType:UPGRADE;
638 out_msg.Requestor := machineID;
639 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
640 l2_select_low_bit, l2_select_num_bits, intToID(0)));
641 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
642 address, out_msg.Destination);
643 out_msg.MessageSize := MessageSizeType:Control;
644 out_msg.Prefetch := in_msg.Prefetch;
645 out_msg.AccessMode := in_msg.AccessMode;
646 }
647 }
648 }
649
650 action(d_sendDataToRequestor, "d", desc="send data to requestor") {
651 peek(requestL1Network_in, RequestMsg) {
652 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
653 assert(is_valid(cache_entry));
654 out_msg.addr := address;
655 out_msg.Type := CoherenceResponseType:DATA;
656 out_msg.DataBlk := cache_entry.DataBlk;
657 out_msg.Dirty := cache_entry.Dirty;
658 out_msg.Sender := machineID;
659 out_msg.Destination.add(in_msg.Requestor);
660 out_msg.MessageSize := MessageSizeType:Response_Data;
661 }
662 }
663 }
664
665 action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
666 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
667 assert(is_valid(cache_entry));
668 out_msg.addr := address;
669 out_msg.Type := CoherenceResponseType:DATA;
670 out_msg.DataBlk := cache_entry.DataBlk;
671 out_msg.Dirty := cache_entry.Dirty;
672 out_msg.Sender := machineID;
673 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
674 l2_select_low_bit, l2_select_num_bits, intToID(0)));
675 out_msg.MessageSize := MessageSizeType:Response_Data;
676 }
677 }
678
679 action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
680 peek(requestL1Network_in, RequestMsg) {
681 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
682 assert(is_valid(tbe));
683 out_msg.addr := address;
684 out_msg.Type := CoherenceResponseType:DATA;
685 out_msg.DataBlk := tbe.DataBlk;
686 out_msg.Dirty := tbe.Dirty;
687 out_msg.Sender := machineID;
688 out_msg.Destination.add(in_msg.Requestor);
689 out_msg.MessageSize := MessageSizeType:Response_Data;
690 }
691 }
692 }
693
694 action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
695 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
696 assert(is_valid(tbe));
697 out_msg.addr := address;
698 out_msg.Type := CoherenceResponseType:DATA;
699 out_msg.DataBlk := tbe.DataBlk;
700 out_msg.Dirty := tbe.Dirty;
701 out_msg.Sender := machineID;
702 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
703 l2_select_low_bit, l2_select_num_bits, intToID(0)));
704 out_msg.MessageSize := MessageSizeType:Response_Data;
705 }
706 }
707
708 action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
709 peek(requestL1Network_in, RequestMsg) {
710 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
711 out_msg.addr := address;
712 out_msg.Type := CoherenceResponseType:ACK;
713 out_msg.Sender := machineID;
714 out_msg.Destination.add(in_msg.Requestor);
715 out_msg.MessageSize := MessageSizeType:Response_Control;
716 }
717 }
718 }
719
720 action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
721 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
722 assert(is_valid(cache_entry));
723 out_msg.addr := address;
724 out_msg.Type := CoherenceResponseType:DATA;
725 out_msg.DataBlk := cache_entry.DataBlk;
726 out_msg.Dirty := cache_entry.Dirty;
727 out_msg.Sender := machineID;
728 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
729 l2_select_low_bit, l2_select_num_bits, intToID(0)));
730 out_msg.MessageSize := MessageSizeType:Writeback_Data;
731 }
732 }
733
734 action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
735 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
736 assert(is_valid(tbe));
737 out_msg.addr := address;
738 out_msg.Type := CoherenceResponseType:DATA;
739 out_msg.DataBlk := tbe.DataBlk;
740 out_msg.Dirty := tbe.Dirty;
741 out_msg.Sender := machineID;
742 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
743 l2_select_low_bit, l2_select_num_bits, intToID(0)));
744 out_msg.MessageSize := MessageSizeType:Writeback_Data;
745 }
746 }
747
748 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
749 peek(requestL1Network_in, RequestMsg) {
750 enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
751 out_msg.addr := address;
752 out_msg.Type := CoherenceResponseType:ACK;
753 out_msg.Sender := machineID;
754 out_msg.Destination.add(in_msg.Requestor);
755 out_msg.MessageSize := MessageSizeType:Response_Control;
756 out_msg.AckCount := 1;
757 }
758 }
759 }
760
761 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
762 if (send_evictions) {
763 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
764 sequencer.evictionCallback(address);
765 }
766 }
767
768 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
769 enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
770 assert(is_valid(cache_entry));
771 out_msg.addr := address;
772 out_msg.Type := CoherenceRequestType:PUTX;
773 out_msg.DataBlk := cache_entry.DataBlk;
774 out_msg.Dirty := cache_entry.Dirty;
775 out_msg.Requestor:= machineID;
776 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
777 l2_select_low_bit, l2_select_num_bits, intToID(0)));
778 if (cache_entry.Dirty) {
779 out_msg.MessageSize := MessageSizeType:Writeback_Data;
780 } else {
781 out_msg.MessageSize := MessageSizeType:Writeback_Control;
782 }
783 }
784 }
785
786 action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
787 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
788 out_msg.addr := address;
789 out_msg.Type := CoherenceResponseType:UNBLOCK;
790 out_msg.Sender := machineID;
791 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
792 l2_select_low_bit, l2_select_num_bits, intToID(0)));
793 out_msg.MessageSize := MessageSizeType:Response_Control;
794 DPRINTF(RubySlicc, "%#x\n", address);
795 }
796 }
797
798 action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
799 enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
800 out_msg.addr := address;
801 out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
802 out_msg.Sender := machineID;
803 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
804 l2_select_low_bit, l2_select_num_bits, intToID(0)));
805 out_msg.MessageSize := MessageSizeType:Response_Control;
806 DPRINTF(RubySlicc, "%#x\n", address);
807
808 }
809 }
810
811 action(dg_invalidate_sc, "dg",
812 desc="Invalidate store conditional as the cache lost permissions") {
813 sequencer.invalidateSC(address);
814 }
815
816 action(h_load_hit, "hd",
817 desc="Notify sequencer the load completed.")
818 {
819 assert(is_valid(cache_entry));
820 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
821 L1Dcache.setMRU(cache_entry);
822 sequencer.readCallback(address, cache_entry.DataBlk);
823 }
824
825 action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
826 {
827 assert(is_valid(cache_entry));
828 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
829 L1Icache.setMRU(cache_entry);
830 sequencer.readCallback(address, cache_entry.DataBlk);
831 }
832
833 action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
834 {
835 assert(is_valid(cache_entry));
836 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
837 L1Icache.setMRU(address);
838 L1Dcache.setMRU(address);
839 sequencer.readCallback(address, cache_entry.DataBlk, true);
840 }
841
842 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
843 {
844 assert(is_valid(cache_entry));
845 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
846 L1Dcache.setMRU(cache_entry);
847 sequencer.writeCallback(address, cache_entry.DataBlk);
848 cache_entry.Dirty := true;
849 }
850
851 action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
852 {
853 assert(is_valid(cache_entry));
854 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
855 L1Icache.setMRU(address);
856 L1Dcache.setMRU(address);
857 sequencer.writeCallback(address, cache_entry.DataBlk, true);
858 cache_entry.Dirty := true;
859 }
860
861 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
862 check_allocate(TBEs);
863 assert(is_valid(cache_entry));
864 TBEs.allocate(address);
865 set_tbe(TBEs[address]);
866 tbe.isPrefetch := false;
867 tbe.Dirty := cache_entry.Dirty;
868 tbe.DataBlk := cache_entry.DataBlk;
869 }
870
871 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
872 mandatoryQueue_in.dequeue(clockEdge());
873 }
874
875 action(l_popRequestQueue, "l",
876 desc="Pop incoming request queue and profile the delay within this virtual network") {
877 Tick delay := requestL1Network_in.dequeue(clockEdge());
878 profileMsgDelay(2, ticksToCycles(delay));
879 }
880
881 action(o_popIncomingResponseQueue, "o",
882 desc="Pop Incoming Response queue and profile the delay within this virtual network") {
883 Tick delay := responseL1Network_in.dequeue(clockEdge());
884 profileMsgDelay(1, ticksToCycles(delay));
885 }
886
887 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
888 TBEs.deallocate(address);
889 unset_tbe();
890 }
891
892 action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
893 peek(responseL1Network_in, ResponseMsg) {
894 assert(is_valid(cache_entry));
895 cache_entry.DataBlk := in_msg.DataBlk;
896 cache_entry.Dirty := in_msg.Dirty;
897 }
898 }
899
900 action(q_updateAckCount, "q", desc="Update ack count") {
901 peek(responseL1Network_in, ResponseMsg) {
902 assert(is_valid(tbe));
903 tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
904 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
905 APPEND_TRANSITION_COMMENT(" p: ");
906 APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
907 }
908 }
909
910 action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
911 if (L1Dcache.isTagPresent(address)) {
912 L1Dcache.deallocate(address);
913 } else {
914 L1Icache.deallocate(address);
915 }
916 unset_cache_entry();
917 }
918
919 action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
920 if (is_invalid(cache_entry)) {
921 set_cache_entry(L1Dcache.allocate(address, new Entry));
922 }
923 }
924
925 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
926 if (is_invalid(cache_entry)) {
927 set_cache_entry(L1Icache.allocate(address, new Entry));
928 }
929 }
930
931 action(z_stallAndWaitMandatoryQueue, "\z", desc="Stall and wait the L1 mandatory request queue") {
932 stall_and_wait(mandatoryQueue_in, address);
933 }
934
935 action(z_stallAndWaitOptionalQueue, "\pz", desc="Stall and wait the L1 prefetch request queue") {
936 stall_and_wait(optionalQueue_in, address);
937 }
938
939 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
940 wakeUpBuffers(address);
941 }
942
943 action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
944 ++L1Icache.demand_misses;
945 }
946
947 action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
948 ++L1Icache.demand_hits;
949 }
950
951 action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
952 ++L1Dcache.demand_misses;
953 }
954
955 action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
956 ++L1Dcache.demand_hits;
957 }
958
959 action(po_observeHit, "\ph", desc="Inform the prefetcher about the hit") {
960 peek(mandatoryQueue_in, RubyRequest) {
961 if (cache_entry.isPrefetch) {
962 prefetcher.observePfHit(in_msg.LineAddress);
963 cache_entry.isPrefetch := false;
964 }
965 }
966 }
967
968 action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
969 peek(mandatoryQueue_in, RubyRequest) {
970 if (enable_prefetch) {
971 prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
972 }
973 }
974 }
975
976 action(ppm_observePfMiss, "\ppm",
977 desc="Inform the prefetcher about the partial miss") {
978 peek(mandatoryQueue_in, RubyRequest) {
979 prefetcher.observePfMiss(in_msg.LineAddress);
980 }
981 }
982
983 action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
984 optionalQueue_in.dequeue(clockEdge());
985 }
986
987 action(mp_markPrefetched, "mp", desc="Set the isPrefetch flag") {
988 assert(is_valid(cache_entry));
989 cache_entry.isPrefetch := true;
990 }
991
992
993 //*****************************************************
994 // TRANSITIONS
995 //*****************************************************
996
997 // Transitions for Load/Store/Replacement/WriteBack from transient states
998 transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
999 z_stallAndWaitMandatoryQueue;
1000 }
1001
1002 transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
1003 z_stallAndWaitMandatoryQueue;
1004 }
1005
1006 transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
1007 z_stallAndWaitMandatoryQueue;
1008 }
1009
1010 transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, PF_IS, PF_IS_I, PF_IM, PF_SM}, PF_L1_Replacement) {
1011 z_stallAndWaitOptionalQueue;
1012 }
1013
1014 // Transitions from Idle
1015 transition({NP,I}, {L1_Replacement, PF_L1_Replacement}) {
1016 ff_deallocateL1CacheBlock;
1017 }
1018
1019 transition({S,E,M,IS,IM,SM,IS_I,PF_IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
1020 {PF_Load, PF_Store, PF_Ifetch}) {
1021 pq_popPrefetchQueue;
1022 }
1023
1024 transition({NP,I}, Load, IS) {
1025 oo_allocateL1DCacheBlock;
1026 i_allocateTBE;
1027 a_issueGETS;
1028 uu_profileDataMiss;
1029 po_observeMiss;
1030 k_popMandatoryQueue;
1031 }
1032
1033 transition({NP,I}, PF_Load, PF_IS) {
1034 oo_allocateL1DCacheBlock;
1035 i_allocateTBE;
1036 pa_issuePfGETS;
1037 pq_popPrefetchQueue;
1038 }
1039
1040 transition(PF_IS, Load, IS) {
1041 uu_profileDataMiss;
1042 ppm_observePfMiss;
1043 k_popMandatoryQueue;
1044 }
1045
1046 transition(PF_IS_I, Load, IS_I) {
1047 uu_profileDataMiss;
1048 ppm_observePfMiss;
1049 k_popMandatoryQueue;
1050 }
1051
1052 transition(PF_IS_I, Ifetch, IS_I) {
1053 uu_profileInstMiss;
1054 ppm_observePfMiss;
1055 k_popMandatoryQueue;
1056 }
1057
1058 transition({NP,I}, Ifetch, IS) {
1059 pp_allocateL1ICacheBlock;
1060 i_allocateTBE;
1061 ai_issueGETINSTR;
1062 uu_profileInstMiss;
1063 po_observeMiss;
1064 k_popMandatoryQueue;
1065 }
1066
1067 transition({NP,I}, PF_Ifetch, PF_IS) {
1068 pp_allocateL1ICacheBlock;
1069 i_allocateTBE;
1070 pai_issuePfGETINSTR;
1071 pq_popPrefetchQueue;
1072 }
1073
1074 // We proactively assume that the prefetch is in to
1075 // the instruction cache
1076 transition(PF_IS, Ifetch, IS) {
1077 uu_profileDataMiss;
1078 ppm_observePfMiss;
1079 k_popMandatoryQueue;
1080 }
1081
1082 transition({NP,I}, Store, IM) {
1083 oo_allocateL1DCacheBlock;
1084 i_allocateTBE;
1085 b_issueGETX;
1086 uu_profileDataMiss;
1087 po_observeMiss;
1088 k_popMandatoryQueue;
1089 }
1090
1091 transition({NP,I}, PF_Store, PF_IM) {
1092 oo_allocateL1DCacheBlock;
1093 i_allocateTBE;
1094 pb_issuePfGETX;
1095 pq_popPrefetchQueue;
1096 }
1097
1098 transition(PF_IM, Store, IM) {
1099 uu_profileDataMiss;
1100 ppm_observePfMiss;
1101 k_popMandatoryQueue;
1102 }
1103
1104 transition(PF_SM, Store, SM) {
1105 uu_profileDataMiss;
1106 ppm_observePfMiss;
1107 k_popMandatoryQueue;
1108 }
1109
1110 transition({NP, I}, Inv) {
1111 fi_sendInvAck;
1112 l_popRequestQueue;
1113 }
1114
1115 // Transitions from Shared
1116 transition({S,E,M}, Load) {
1117 h_load_hit;
1118 uu_profileDataHit;
1119 po_observeHit;
1120 k_popMandatoryQueue;
1121 }
1122
1123 transition({S,E,M}, Ifetch) {
1124 h_ifetch_hit;
1125 uu_profileInstHit;
1126 po_observeHit;
1127 k_popMandatoryQueue;
1128 }
1129
1130 transition(S, Store, SM) {
1131 i_allocateTBE;
1132 c_issueUPGRADE;
1133 uu_profileDataMiss;
1134 k_popMandatoryQueue;
1135 }
1136
1137 transition(S, {L1_Replacement, PF_L1_Replacement}, I) {
1138 forward_eviction_to_cpu;
1139 ff_deallocateL1CacheBlock;
1140 }
1141
1142 transition(S, Inv, I) {
1143 forward_eviction_to_cpu;
1144 fi_sendInvAck;
1145 l_popRequestQueue;
1146 }
1147
1148 // Transitions from Exclusive
1149
1150 transition({E,M}, Store, M) {
1151 hh_store_hit;
1152 uu_profileDataHit;
1153 po_observeHit;
1154 k_popMandatoryQueue;
1155 }
1156
1157 transition(E, {L1_Replacement, PF_L1_Replacement}, M_I) {
1158 // silent E replacement??
1159 forward_eviction_to_cpu;
1160 i_allocateTBE;
1161 g_issuePUTX; // send data, but hold in case forwarded request
1162 ff_deallocateL1CacheBlock;
1163 }
1164
1165 transition(E, Inv, I) {
1166 // don't send data
1167 forward_eviction_to_cpu;
1168 fi_sendInvAck;
1169 l_popRequestQueue;
1170 }
1171
1172 transition(E, Fwd_GETX, I) {
1173 forward_eviction_to_cpu;
1174 d_sendDataToRequestor;
1175 l_popRequestQueue;
1176 }
1177
1178 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1179 d_sendDataToRequestor;
1180 d2_sendDataToL2;
1181 l_popRequestQueue;
1182 }
1183
1184 // Transitions from Modified
1185
1186 transition(M, {L1_Replacement, PF_L1_Replacement}, M_I) {
1187 forward_eviction_to_cpu;
1188 i_allocateTBE;
1189 g_issuePUTX; // send data, but hold in case forwarded request
1190 ff_deallocateL1CacheBlock;
1191 }
1192
1193 transition(M_I, WB_Ack, I) {
1194 s_deallocateTBE;
1195 o_popIncomingResponseQueue;
1196 kd_wakeUpDependents;
1197 }
1198
1199 transition(M, Inv, I) {
1200 forward_eviction_to_cpu;
1201 f_sendDataToL2;
1202 l_popRequestQueue;
1203 }
1204
1205 transition(M_I, Inv, SINK_WB_ACK) {
1206 ft_sendDataToL2_fromTBE;
1207 l_popRequestQueue;
1208 }
1209
1210 transition(M, Fwd_GETX, I) {
1211 forward_eviction_to_cpu;
1212 d_sendDataToRequestor;
1213 l_popRequestQueue;
1214 }
1215
1216 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1217 d_sendDataToRequestor;
1218 d2_sendDataToL2;
1219 l_popRequestQueue;
1220 }
1221
1222 transition(M_I, Fwd_GETX, SINK_WB_ACK) {
1223 dt_sendDataToRequestor_fromTBE;
1224 l_popRequestQueue;
1225 }
1226
1227 transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
1228 dt_sendDataToRequestor_fromTBE;
1229 d2t_sendDataToL2_fromTBE;
1230 l_popRequestQueue;
1231 }
1232
1233 // Transitions from IS
1234 transition({IS, IS_I}, Inv, IS_I) {
1235 fi_sendInvAck;
1236 l_popRequestQueue;
1237 }
1238
1239 transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
1240 fi_sendInvAck;
1241 l_popRequestQueue;
1242 }
1243
1244 transition(IS, Data_all_Acks, S) {
1245 u_writeDataToL1Cache;
1246 hx_load_hit;
1247 s_deallocateTBE;
1248 o_popIncomingResponseQueue;
1249 kd_wakeUpDependents;
1250 }
1251
1252 transition(PF_IS, Data_all_Acks, S) {
1253 u_writeDataToL1Cache;
1254 s_deallocateTBE;
1255 mp_markPrefetched;
1256 o_popIncomingResponseQueue;
1257 kd_wakeUpDependents;
1258 }
1259
1260 transition(IS_I, Data_all_Acks, I) {
1261 u_writeDataToL1Cache;
1262 hx_load_hit;
1263 s_deallocateTBE;
1264 o_popIncomingResponseQueue;
1265 kd_wakeUpDependents;
1266 }
1267
1268 transition(PF_IS_I, Data_all_Acks, I) {
1269 s_deallocateTBE;
1270 o_popIncomingResponseQueue;
1271 kd_wakeUpDependents;
1272 }
1273
1274 transition(IS, DataS_fromL1, S) {
1275 u_writeDataToL1Cache;
1276 j_sendUnblock;
1277 hx_load_hit;
1278 s_deallocateTBE;
1279 o_popIncomingResponseQueue;
1280 kd_wakeUpDependents;
1281 }
1282
1283 transition(PF_IS, DataS_fromL1, S) {
1284 u_writeDataToL1Cache;
1285 j_sendUnblock;
1286 s_deallocateTBE;
1287 o_popIncomingResponseQueue;
1288 kd_wakeUpDependents;
1289 }
1290
1291 transition(IS_I, DataS_fromL1, I) {
1292 u_writeDataToL1Cache;
1293 j_sendUnblock;
1294 hx_load_hit;
1295 s_deallocateTBE;
1296 o_popIncomingResponseQueue;
1297 kd_wakeUpDependents;
1298 }
1299
1300 transition(PF_IS_I, DataS_fromL1, I) {
1301 j_sendUnblock;
1302 s_deallocateTBE;
1303 o_popIncomingResponseQueue;
1304 kd_wakeUpDependents;
1305 }
1306
1307 // directory is blocked when sending exclusive data
1308 transition(IS_I, Data_Exclusive, E) {
1309 u_writeDataToL1Cache;
1310 hx_load_hit;
1311 jj_sendExclusiveUnblock;
1312 s_deallocateTBE;
1313 o_popIncomingResponseQueue;
1314 kd_wakeUpDependents;
1315 }
1316
1317 // directory is blocked when sending exclusive data
1318 transition(PF_IS_I, Data_Exclusive, E) {
1319 u_writeDataToL1Cache;
1320 jj_sendExclusiveUnblock;
1321 s_deallocateTBE;
1322 o_popIncomingResponseQueue;
1323 kd_wakeUpDependents;
1324 }
1325
1326 transition(IS, Data_Exclusive, E) {
1327 u_writeDataToL1Cache;
1328 hx_load_hit;
1329 jj_sendExclusiveUnblock;
1330 s_deallocateTBE;
1331 o_popIncomingResponseQueue;
1332 kd_wakeUpDependents;
1333 }
1334
1335 transition(PF_IS, Data_Exclusive, E) {
1336 u_writeDataToL1Cache;
1337 jj_sendExclusiveUnblock;
1338 s_deallocateTBE;
1339 mp_markPrefetched;
1340 o_popIncomingResponseQueue;
1341 kd_wakeUpDependents;
1342 }
1343
1344 // Transitions from IM
1345 transition(IM, Inv, IM) {
1346 fi_sendInvAck;
1347 l_popRequestQueue;
1348 }
1349
1350 transition({PF_IM, PF_SM}, Inv, PF_IM) {
1351 fi_sendInvAck;
1352 l_popRequestQueue;
1353 }
1354
1355 transition(IM, Data, SM) {
1356 u_writeDataToL1Cache;
1357 q_updateAckCount;
1358 o_popIncomingResponseQueue;
1359 }
1360
1361 transition(PF_IM, Data, PF_SM) {
1362 u_writeDataToL1Cache;
1363 q_updateAckCount;
1364 o_popIncomingResponseQueue;
1365 }
1366
1367 transition(IM, Data_all_Acks, M) {
1368 u_writeDataToL1Cache;
1369 hhx_store_hit;
1370 jj_sendExclusiveUnblock;
1371 s_deallocateTBE;
1372 o_popIncomingResponseQueue;
1373 kd_wakeUpDependents;
1374 }
1375
1376 transition(PF_IM, Data_all_Acks, M) {
1377 u_writeDataToL1Cache;
1378 jj_sendExclusiveUnblock;
1379 s_deallocateTBE;
1380 mp_markPrefetched;
1381 o_popIncomingResponseQueue;
1382 kd_wakeUpDependents;
1383 }
1384
1385 // transitions from SM
1386 transition(SM, Inv, IM) {
1387 forward_eviction_to_cpu;
1388 fi_sendInvAck;
1389 dg_invalidate_sc;
1390 l_popRequestQueue;
1391 }
1392
1393 transition({SM, IM, PF_SM, PF_IM}, Ack) {
1394 q_updateAckCount;
1395 o_popIncomingResponseQueue;
1396 }
1397
1398 transition(SM, Ack_all, M) {
1399 jj_sendExclusiveUnblock;
1400 hhx_store_hit;
1401 s_deallocateTBE;
1402 o_popIncomingResponseQueue;
1403 kd_wakeUpDependents;
1404 }
1405
1406 transition(PF_SM, Ack_all, M) {
1407 jj_sendExclusiveUnblock;
1408 s_deallocateTBE;
1409 mp_markPrefetched;
1410 o_popIncomingResponseQueue;
1411 kd_wakeUpDependents;
1412 }
1413
1414 transition(SINK_WB_ACK, Inv){
1415 fi_sendInvAck;
1416 l_popRequestQueue;
1417 }
1418
1419 transition(SINK_WB_ACK, WB_Ack, I){
1420 s_deallocateTBE;
1421 o_popIncomingResponseQueue;
1422 kd_wakeUpDependents;
1423 }
1424 }