x86: changes to apic, keyboard
[gem5.git] / src / mem / protocol / MESI_CMP_directory-L1cache.sm
1
2 /*
3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(L1Cache, "MESI Directory L1 Cache CMP")
31 : Sequencer * sequencer,
32 CacheMemory * L1IcacheMemory,
33 CacheMemory * L1DcacheMemory,
34 Prefetcher * prefetcher = 'NULL',
35 int l2_select_num_bits,
36 Cycles l1_request_latency = 2,
37 Cycles l1_response_latency = 2,
38 Cycles to_l2_latency = 1,
39 bool send_evictions,
40 bool enable_prefetch = "False"
41 {
42 // NODE L1 CACHE
43 // From this node's L1 cache TO the network
44 // a local L1 -> this L2 bank, currently ordered with directory forwarded requests
45 MessageBuffer requestFromL1Cache, network="To", virtual_network="0", ordered="false", vnet_type="request";
46 // a local L1 -> this L2 bank
47 MessageBuffer responseFromL1Cache, network="To", virtual_network="1", ordered="false", vnet_type="response";
48 MessageBuffer unblockFromL1Cache, network="To", virtual_network="2", ordered="false", vnet_type="unblock";
49
50
51 // To this node's L1 cache FROM the network
52 // a L2 bank -> this L1
53 MessageBuffer requestToL1Cache, network="From", virtual_network="0", ordered="false", vnet_type="request";
54 // a L2 bank -> this L1
55 MessageBuffer responseToL1Cache, network="From", virtual_network="1", ordered="false", vnet_type="response";
56 // Request Buffer for prefetches
57 MessageBuffer optionalQueue, ordered="false";
58
59
60 // STATES
61 state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
62 // Base states
63 NP, AccessPermission:Invalid, desc="Not present in either cache";
64 I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
65 S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
66 E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
67 M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
68
69 // Transient States
70 IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
71 IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
72 SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
73 IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
74
75 M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
76 SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
77
78 // Transient States in which block is being prefetched
79 PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
80 PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
81 PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
82 PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
83 }
84
85 // EVENTS
86 enumeration(Event, desc="Cache events") {
87 // L1 events
88 Load, desc="Load request from the home processor";
89 Ifetch, desc="I-fetch request from the home processor";
90 Store, desc="Store request from the home processor";
91
92 Inv, desc="Invalidate request from L2 bank";
93
94 // internal generated request
95 L1_Replacement, desc="L1 Replacement", format="!r";
96
97 // other requests
98 Fwd_GETX, desc="GETX from other processor";
99 Fwd_GETS, desc="GETS from other processor";
100 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
101
102 Data, desc="Data for processor";
103 Data_Exclusive, desc="Data for processor";
104 DataS_fromL1, desc="data for GETS request, need to unblock directory";
105 Data_all_Acks, desc="Data for processor, all acks";
106
107 Ack, desc="Ack for processor";
108 Ack_all, desc="Last ack for processor";
109
110 WB_Ack, desc="Ack for replacement";
111
112 PF_Load, desc="load request from prefetcher";
113 PF_Ifetch, desc="instruction fetch request from prefetcher";
114 PF_Store, desc="exclusive load request from prefetcher";
115 }
116
117 // TYPES
118
119 // CacheEntry
120 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
121 State CacheState, desc="cache state";
122 DataBlock DataBlk, desc="data for the block";
123 bool Dirty, default="false", desc="data is dirty";
124 bool isPrefetch, desc="Set if this block was prefetched";
125 }
126
127 // TBE fields
128 structure(TBE, desc="...") {
129 Address Address, desc="Physical address for this TBE";
130 State TBEState, desc="Transient state";
131 DataBlock DataBlk, desc="Buffer for the data block";
132 bool Dirty, default="false", desc="data is dirty";
133 bool isPrefetch, desc="Set if this was caused by a prefetch";
134 int pendingAcks, default="0", desc="number of pending acks";
135 }
136
137 structure(TBETable, external="yes") {
138 TBE lookup(Address);
139 void allocate(Address);
140 void deallocate(Address);
141 bool isPresent(Address);
142 }
143
144 TBETable L1_TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
145
146 MessageBuffer mandatoryQueue, ordered="false";
147
148 int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
149
150 void set_cache_entry(AbstractCacheEntry a);
151 void unset_cache_entry();
152 void set_tbe(TBE a);
153 void unset_tbe();
154 void wakeUpBuffers(Address a);
155
156 // inclusive cache returns L1 entries only
157 Entry getCacheEntry(Address addr), return_by_pointer="yes" {
158 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
159 if(is_valid(L1Dcache_entry)) {
160 return L1Dcache_entry;
161 }
162
163 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
164 return L1Icache_entry;
165 }
166
167 Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
168 Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
169 return L1Dcache_entry;
170 }
171
172 Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
173 Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
174 return L1Icache_entry;
175 }
176
177 State getState(TBE tbe, Entry cache_entry, Address addr) {
178 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
179
180 if(is_valid(tbe)) {
181 return tbe.TBEState;
182 } else if (is_valid(cache_entry)) {
183 return cache_entry.CacheState;
184 }
185 return State:NP;
186 }
187
188 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
189 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
190
191 // MUST CHANGE
192 if(is_valid(tbe)) {
193 tbe.TBEState := state;
194 }
195
196 if (is_valid(cache_entry)) {
197 cache_entry.CacheState := state;
198 }
199 }
200
201 AccessPermission getAccessPermission(Address addr) {
202 TBE tbe := L1_TBEs[addr];
203 if(is_valid(tbe)) {
204 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
205 return L1Cache_State_to_permission(tbe.TBEState);
206 }
207
208 Entry cache_entry := getCacheEntry(addr);
209 if(is_valid(cache_entry)) {
210 DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
211 return L1Cache_State_to_permission(cache_entry.CacheState);
212 }
213
214 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
215 return AccessPermission:NotPresent;
216 }
217
218 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
219 TBE tbe := L1_TBEs[addr];
220 if(is_valid(tbe)) {
221 return tbe.DataBlk;
222 }
223
224 return getCacheEntry(addr).DataBlk;
225 }
226
227 void setAccessPermission(Entry cache_entry, Address addr, State state) {
228 if (is_valid(cache_entry)) {
229 cache_entry.changePermission(L1Cache_State_to_permission(state));
230 }
231 }
232
233 Event mandatory_request_type_to_event(RubyRequestType type) {
234 if (type == RubyRequestType:LD) {
235 return Event:Load;
236 } else if (type == RubyRequestType:IFETCH) {
237 return Event:Ifetch;
238 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
239 return Event:Store;
240 } else {
241 error("Invalid RubyRequestType");
242 }
243 }
244
245 Event prefetch_request_type_to_event(RubyRequestType type) {
246 if (type == RubyRequestType:LD) {
247 return Event:PF_Load;
248 } else if (type == RubyRequestType:IFETCH) {
249 return Event:PF_Ifetch;
250 } else if ((type == RubyRequestType:ST) ||
251 (type == RubyRequestType:ATOMIC)) {
252 return Event:PF_Store;
253 } else {
254 error("Invalid RubyRequestType");
255 }
256 }
257
258 int getPendingAcks(TBE tbe) {
259 return tbe.pendingAcks;
260 }
261
262 out_port(requestIntraChipL1Network_out, RequestMsg, requestFromL1Cache);
263 out_port(responseIntraChipL1Network_out, ResponseMsg, responseFromL1Cache);
264 out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
265 out_port(optionalQueue_out, RubyRequest, optionalQueue);
266
267
268 // Prefetch queue between the controller and the prefetcher
269 // As per Spracklen et al. (HPCA 2005), the prefetch queue should be
270 // implemented as a LIFO structure. The structure would allow for fast
271 // searches of all entries in the queue, not just the head msg. All
272 // msgs in the structure can be invalidated if a demand miss matches.
273 in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
274 if (optionalQueue_in.isReady()) {
275 peek(optionalQueue_in, RubyRequest) {
276 // Instruction Prefetch
277 if (in_msg.Type == RubyRequestType:IFETCH) {
278 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
279 if (is_valid(L1Icache_entry)) {
280 // The block to be prefetched is already present in the
281 // cache. We should drop this request.
282 trigger(prefetch_request_type_to_event(in_msg.Type),
283 in_msg.LineAddress,
284 L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
285 }
286
287 // Check to see if it is in the OTHER L1
288 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
289 if (is_valid(L1Dcache_entry)) {
290 // The block is in the wrong L1 cache. We should drop
291 // this request.
292 trigger(prefetch_request_type_to_event(in_msg.Type),
293 in_msg.LineAddress,
294 L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
295 }
296
297 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
298 // L1 does't have the line, but we have space for it
299 // in the L1 so let's see if the L2 has it
300 trigger(prefetch_request_type_to_event(in_msg.Type),
301 in_msg.LineAddress,
302 L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
303 } else {
304 // No room in the L1, so we need to make room in the L1
305 trigger(Event:L1_Replacement,
306 L1IcacheMemory.cacheProbe(in_msg.LineAddress),
307 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
308 L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
309 }
310 } else {
311 // Data prefetch
312 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
313 if (is_valid(L1Dcache_entry)) {
314 // The block to be prefetched is already present in the
315 // cache. We should drop this request.
316 trigger(prefetch_request_type_to_event(in_msg.Type),
317 in_msg.LineAddress,
318 L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
319 }
320
321 // Check to see if it is in the OTHER L1
322 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
323 if (is_valid(L1Icache_entry)) {
324 // The block is in the wrong L1. Just drop the prefetch
325 // request.
326 trigger(prefetch_request_type_to_event(in_msg.Type),
327 in_msg.LineAddress,
328 L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
329 }
330
331 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
332 // L1 does't have the line, but we have space for it in
333 // the L1 let's see if the L2 has it
334 trigger(prefetch_request_type_to_event(in_msg.Type),
335 in_msg.LineAddress,
336 L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
337 } else {
338 // No room in the L1, so we need to make room in the L1
339 trigger(Event:L1_Replacement,
340 L1DcacheMemory.cacheProbe(in_msg.LineAddress),
341 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
342 L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
343 }
344 }
345 }
346 }
347 }
348
349 // Response IntraChip L1 Network - response msg to this L1 cache
350 in_port(responseIntraChipL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
351 if (responseIntraChipL1Network_in.isReady()) {
352 peek(responseIntraChipL1Network_in, ResponseMsg, block_on="Address") {
353 assert(in_msg.Destination.isElement(machineID));
354
355 Entry cache_entry := getCacheEntry(in_msg.Address);
356 TBE tbe := L1_TBEs[in_msg.Address];
357
358 if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
359 trigger(Event:Data_Exclusive, in_msg.Address, cache_entry, tbe);
360 } else if(in_msg.Type == CoherenceResponseType:DATA) {
361 if ((getState(tbe, cache_entry, in_msg.Address) == State:IS ||
362 getState(tbe, cache_entry, in_msg.Address) == State:IS_I ||
363 getState(tbe, cache_entry, in_msg.Address) == State:PF_IS ||
364 getState(tbe, cache_entry, in_msg.Address) == State:PF_IS_I) &&
365 machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
366
367 trigger(Event:DataS_fromL1, in_msg.Address, cache_entry, tbe);
368
369 } else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
370 trigger(Event:Data_all_Acks, in_msg.Address, cache_entry, tbe);
371 } else {
372 trigger(Event:Data, in_msg.Address, cache_entry, tbe);
373 }
374 } else if (in_msg.Type == CoherenceResponseType:ACK) {
375 if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
376 trigger(Event:Ack_all, in_msg.Address, cache_entry, tbe);
377 } else {
378 trigger(Event:Ack, in_msg.Address, cache_entry, tbe);
379 }
380 } else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
381 trigger(Event:WB_Ack, in_msg.Address, cache_entry, tbe);
382 } else {
383 error("Invalid L1 response type");
384 }
385 }
386 }
387 }
388
389 // Request InterChip network - request from this L1 cache to the shared L2
390 in_port(requestIntraChipL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
391 if(requestIntraChipL1Network_in.isReady()) {
392 peek(requestIntraChipL1Network_in, RequestMsg, block_on="Address") {
393 assert(in_msg.Destination.isElement(machineID));
394
395 Entry cache_entry := getCacheEntry(in_msg.Address);
396 TBE tbe := L1_TBEs[in_msg.Address];
397
398 if (in_msg.Type == CoherenceRequestType:INV) {
399 trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
400 } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
401 // upgrade transforms to GETX due to race
402 trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
403 } else if (in_msg.Type == CoherenceRequestType:GETS) {
404 trigger(Event:Fwd_GETS, in_msg.Address, cache_entry, tbe);
405 } else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
406 trigger(Event:Fwd_GET_INSTR, in_msg.Address, cache_entry, tbe);
407 } else {
408 error("Invalid forwarded request type");
409 }
410 }
411 }
412 }
413
414 // Mandatory Queue betweens Node's CPU and it's L1 caches
415 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
416 if (mandatoryQueue_in.isReady()) {
417 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
418
419 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
420
421 if (in_msg.Type == RubyRequestType:IFETCH) {
422 // ** INSTRUCTION ACCESS ***
423
424 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
425 if (is_valid(L1Icache_entry)) {
426 // The tag matches for the L1, so the L1 asks the L2 for it.
427 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
428 L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
429 } else {
430
431 // Check to see if it is in the OTHER L1
432 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
433 if (is_valid(L1Dcache_entry)) {
434 // The block is in the wrong L1, put the request on the queue to the shared L2
435 trigger(Event:L1_Replacement, in_msg.LineAddress,
436 L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
437 }
438
439 if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
440 // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
441 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
442 L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
443 } else {
444 // No room in the L1, so we need to make room in the L1
445 trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress),
446 getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
447 L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
448 }
449 }
450 } else {
451
452 // *** DATA ACCESS ***
453 Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
454 if (is_valid(L1Dcache_entry)) {
455 // The tag matches for the L1, so the L1 ask the L2 for it
456 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
457 L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
458 } else {
459
460 // Check to see if it is in the OTHER L1
461 Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
462 if (is_valid(L1Icache_entry)) {
463 // The block is in the wrong L1, put the request on the queue to the shared L2
464 trigger(Event:L1_Replacement, in_msg.LineAddress,
465 L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
466 }
467
468 if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
469 // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
470 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
471 L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
472 } else {
473 // No room in the L1, so we need to make room in the L1
474 trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress),
475 getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
476 L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
477 }
478 }
479 }
480 }
481 }
482 }
483
484 void enqueuePrefetch(Address address, RubyRequestType type) {
485 enqueue(optionalQueue_out, RubyRequest, latency=1) {
486 out_msg.LineAddress := address;
487 out_msg.Type := type;
488 out_msg.AccessMode := RubyAccessMode:Supervisor;
489 }
490 }
491
492 // ACTIONS
493 action(a_issueGETS, "a", desc="Issue GETS") {
494 peek(mandatoryQueue_in, RubyRequest) {
495 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
496 out_msg.Address := address;
497 out_msg.Type := CoherenceRequestType:GETS;
498 out_msg.Requestor := machineID;
499 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
500 l2_select_low_bit, l2_select_num_bits));
501 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
502 address, out_msg.Destination);
503 out_msg.MessageSize := MessageSizeType:Control;
504 out_msg.Prefetch := in_msg.Prefetch;
505 out_msg.AccessMode := in_msg.AccessMode;
506 }
507 }
508 }
509
510 action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
511 peek(optionalQueue_in, RubyRequest) {
512 enqueue(requestIntraChipL1Network_out, RequestMsg,
513 latency=l1_request_latency) {
514 out_msg.Address := address;
515 out_msg.Type := CoherenceRequestType:GETS;
516 out_msg.Requestor := machineID;
517 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
518 l2_select_low_bit, l2_select_num_bits));
519 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
520 address, out_msg.Destination);
521 out_msg.MessageSize := MessageSizeType:Control;
522 out_msg.Prefetch := in_msg.Prefetch;
523 out_msg.AccessMode := in_msg.AccessMode;
524 }
525 }
526 }
527
528 action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
529 peek(mandatoryQueue_in, RubyRequest) {
530 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
531 out_msg.Address := address;
532 out_msg.Type := CoherenceRequestType:GET_INSTR;
533 out_msg.Requestor := machineID;
534 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
535 l2_select_low_bit, l2_select_num_bits));
536 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
537 address, out_msg.Destination);
538 out_msg.MessageSize := MessageSizeType:Control;
539 out_msg.Prefetch := in_msg.Prefetch;
540 out_msg.AccessMode := in_msg.AccessMode;
541 }
542 }
543 }
544
545 action(pai_issuePfGETINSTR, "pai",
546 desc="Issue GETINSTR for prefetch request") {
547 peek(optionalQueue_in, RubyRequest) {
548 enqueue(requestIntraChipL1Network_out, RequestMsg,
549 latency=l1_request_latency) {
550 out_msg.Address := address;
551 out_msg.Type := CoherenceRequestType:GET_INSTR;
552 out_msg.Requestor := machineID;
553 out_msg.Destination.add(
554 mapAddressToRange(address, MachineType:L2Cache,
555 l2_select_low_bit, l2_select_num_bits));
556 out_msg.MessageSize := MessageSizeType:Control;
557 out_msg.Prefetch := in_msg.Prefetch;
558 out_msg.AccessMode := in_msg.AccessMode;
559
560 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
561 address, out_msg.Destination);
562 }
563 }
564 }
565
566 action(b_issueGETX, "b", desc="Issue GETX") {
567 peek(mandatoryQueue_in, RubyRequest) {
568 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_request_latency) {
569 out_msg.Address := address;
570 out_msg.Type := CoherenceRequestType:GETX;
571 out_msg.Requestor := machineID;
572 DPRINTF(RubySlicc, "%s\n", machineID);
573 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
574 l2_select_low_bit, l2_select_num_bits));
575 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
576 address, out_msg.Destination);
577 out_msg.MessageSize := MessageSizeType:Control;
578 out_msg.Prefetch := in_msg.Prefetch;
579 out_msg.AccessMode := in_msg.AccessMode;
580 }
581 }
582 }
583
584 action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
585 peek(optionalQueue_in, RubyRequest) {
586 enqueue(requestIntraChipL1Network_out, RequestMsg,
587 latency=l1_request_latency) {
588 out_msg.Address := address;
589 out_msg.Type := CoherenceRequestType:GETX;
590 out_msg.Requestor := machineID;
591 DPRINTF(RubySlicc, "%s\n", machineID);
592
593 out_msg.Destination.add(mapAddressToRange(address,
594 MachineType:L2Cache,
595 l2_select_low_bit,
596 l2_select_num_bits));
597
598 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
599 address, out_msg.Destination);
600 out_msg.MessageSize := MessageSizeType:Control;
601 out_msg.Prefetch := in_msg.Prefetch;
602 out_msg.AccessMode := in_msg.AccessMode;
603 }
604 }
605 }
606
607 action(c_issueUPGRADE, "c", desc="Issue GETX") {
608 peek(mandatoryQueue_in, RubyRequest) {
609 enqueue(requestIntraChipL1Network_out, RequestMsg, latency= l1_request_latency) {
610 out_msg.Address := address;
611 out_msg.Type := CoherenceRequestType:UPGRADE;
612 out_msg.Requestor := machineID;
613 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
614 l2_select_low_bit, l2_select_num_bits));
615 DPRINTF(RubySlicc, "address: %s, destination: %s\n",
616 address, out_msg.Destination);
617 out_msg.MessageSize := MessageSizeType:Control;
618 out_msg.Prefetch := in_msg.Prefetch;
619 out_msg.AccessMode := in_msg.AccessMode;
620 }
621 }
622 }
623
624 action(d_sendDataToRequestor, "d", desc="send data to requestor") {
625 peek(requestIntraChipL1Network_in, RequestMsg) {
626 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
627 assert(is_valid(cache_entry));
628 out_msg.Address := address;
629 out_msg.Type := CoherenceResponseType:DATA;
630 out_msg.DataBlk := cache_entry.DataBlk;
631 out_msg.Dirty := cache_entry.Dirty;
632 out_msg.Sender := machineID;
633 out_msg.Destination.add(in_msg.Requestor);
634 out_msg.MessageSize := MessageSizeType:Response_Data;
635 }
636 }
637 }
638
639 action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
640 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
641 assert(is_valid(cache_entry));
642 out_msg.Address := address;
643 out_msg.Type := CoherenceResponseType:DATA;
644 out_msg.DataBlk := cache_entry.DataBlk;
645 out_msg.Dirty := cache_entry.Dirty;
646 out_msg.Sender := machineID;
647 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
648 l2_select_low_bit, l2_select_num_bits));
649 out_msg.MessageSize := MessageSizeType:Response_Data;
650 }
651 }
652
653 action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
654 peek(requestIntraChipL1Network_in, RequestMsg) {
655 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
656 assert(is_valid(tbe));
657 out_msg.Address := address;
658 out_msg.Type := CoherenceResponseType:DATA;
659 out_msg.DataBlk := tbe.DataBlk;
660 out_msg.Dirty := tbe.Dirty;
661 out_msg.Sender := machineID;
662 out_msg.Destination.add(in_msg.Requestor);
663 out_msg.MessageSize := MessageSizeType:Response_Data;
664 }
665 }
666 }
667
668 action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
669 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
670 assert(is_valid(tbe));
671 out_msg.Address := address;
672 out_msg.Type := CoherenceResponseType:DATA;
673 out_msg.DataBlk := tbe.DataBlk;
674 out_msg.Dirty := tbe.Dirty;
675 out_msg.Sender := machineID;
676 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
677 l2_select_low_bit, l2_select_num_bits));
678 out_msg.MessageSize := MessageSizeType:Response_Data;
679 }
680 }
681
682 action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
683 peek(requestIntraChipL1Network_in, RequestMsg) {
684 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
685 out_msg.Address := address;
686 out_msg.Type := CoherenceResponseType:ACK;
687 out_msg.Sender := machineID;
688 out_msg.Destination.add(in_msg.Requestor);
689 out_msg.MessageSize := MessageSizeType:Response_Control;
690 }
691 }
692 }
693
694 action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
695 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
696 assert(is_valid(cache_entry));
697 out_msg.Address := address;
698 out_msg.Type := CoherenceResponseType:DATA;
699 out_msg.DataBlk := cache_entry.DataBlk;
700 out_msg.Dirty := cache_entry.Dirty;
701 out_msg.Sender := machineID;
702 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
703 l2_select_low_bit, l2_select_num_bits));
704 out_msg.MessageSize := MessageSizeType:Writeback_Data;
705 }
706 }
707
708 action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
709 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
710 assert(is_valid(tbe));
711 out_msg.Address := address;
712 out_msg.Type := CoherenceResponseType:DATA;
713 out_msg.DataBlk := tbe.DataBlk;
714 out_msg.Dirty := tbe.Dirty;
715 out_msg.Sender := machineID;
716 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
717 l2_select_low_bit, l2_select_num_bits));
718 out_msg.MessageSize := MessageSizeType:Writeback_Data;
719 }
720 }
721
722 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
723 peek(requestIntraChipL1Network_in, RequestMsg) {
724 enqueue(responseIntraChipL1Network_out, ResponseMsg, latency=l1_response_latency) {
725 out_msg.Address := address;
726 out_msg.Type := CoherenceResponseType:ACK;
727 out_msg.Sender := machineID;
728 out_msg.Destination.add(in_msg.Requestor);
729 out_msg.MessageSize := MessageSizeType:Response_Control;
730 out_msg.AckCount := 1;
731 }
732 }
733 }
734
735 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
736 if (send_evictions) {
737 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
738 sequencer.evictionCallback(address);
739 }
740 }
741
742 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
743 enqueue(requestIntraChipL1Network_out, RequestMsg, latency=l1_response_latency) {
744 assert(is_valid(cache_entry));
745 out_msg.Address := address;
746 out_msg.Type := CoherenceRequestType:PUTX;
747 out_msg.DataBlk := cache_entry.DataBlk;
748 out_msg.Dirty := cache_entry.Dirty;
749 out_msg.Requestor:= machineID;
750 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
751 l2_select_low_bit, l2_select_num_bits));
752 if (cache_entry.Dirty) {
753 out_msg.MessageSize := MessageSizeType:Writeback_Data;
754 } else {
755 out_msg.MessageSize := MessageSizeType:Writeback_Control;
756 }
757 }
758 }
759
760 action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
761 enqueue(unblockNetwork_out, ResponseMsg, latency=to_l2_latency) {
762 out_msg.Address := address;
763 out_msg.Type := CoherenceResponseType:UNBLOCK;
764 out_msg.Sender := machineID;
765 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
766 l2_select_low_bit, l2_select_num_bits));
767 out_msg.MessageSize := MessageSizeType:Response_Control;
768 DPRINTF(RubySlicc, "%s\n", address);
769 }
770 }
771
772 action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
773 enqueue(unblockNetwork_out, ResponseMsg, latency=to_l2_latency) {
774 out_msg.Address := address;
775 out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
776 out_msg.Sender := machineID;
777 out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
778 l2_select_low_bit, l2_select_num_bits));
779 out_msg.MessageSize := MessageSizeType:Response_Control;
780 DPRINTF(RubySlicc, "%s\n", address);
781
782 }
783 }
784
785 action(dg_invalidate_sc, "dg",
786 desc="Invalidate store conditional as the cache lost permissions") {
787 sequencer.invalidateSC(address);
788 }
789
790 action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
791 assert(is_valid(cache_entry));
792 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
793 sequencer.readCallback(address, cache_entry.DataBlk);
794 }
795
796 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
797 assert(is_valid(cache_entry));
798 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
799 sequencer.writeCallback(address, cache_entry.DataBlk);
800 cache_entry.Dirty := true;
801 }
802
803 action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
804 check_allocate(L1_TBEs);
805 assert(is_valid(cache_entry));
806 L1_TBEs.allocate(address);
807 set_tbe(L1_TBEs[address]);
808 tbe.isPrefetch := false;
809 tbe.Dirty := cache_entry.Dirty;
810 tbe.DataBlk := cache_entry.DataBlk;
811 }
812
813 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
814 mandatoryQueue_in.dequeue();
815 }
816
817 action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
818 profileMsgDelay(2, requestIntraChipL1Network_in.dequeue_getDelayCycles());
819 }
820
821 action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
822 profileMsgDelay(1, responseIntraChipL1Network_in.dequeue_getDelayCycles());
823 }
824
825 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
826 L1_TBEs.deallocate(address);
827 unset_tbe();
828 }
829
830 action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
831 peek(responseIntraChipL1Network_in, ResponseMsg) {
832 assert(is_valid(cache_entry));
833 cache_entry.DataBlk := in_msg.DataBlk;
834 cache_entry.Dirty := in_msg.Dirty;
835 }
836 }
837
838 action(q_updateAckCount, "q", desc="Update ack count") {
839 peek(responseIntraChipL1Network_in, ResponseMsg) {
840 assert(is_valid(tbe));
841 tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
842 APPEND_TRANSITION_COMMENT(in_msg.AckCount);
843 APPEND_TRANSITION_COMMENT(" p: ");
844 APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
845 }
846 }
847
848 action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
849 if (L1DcacheMemory.isTagPresent(address)) {
850 L1DcacheMemory.deallocate(address);
851 } else {
852 L1IcacheMemory.deallocate(address);
853 }
854 unset_cache_entry();
855 }
856
857 action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
858 if (is_invalid(cache_entry)) {
859 set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
860 }
861 }
862
863 action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
864 if (is_invalid(cache_entry)) {
865 set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
866 }
867 }
868
869 action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle L1 request queue") {
870 stall_and_wait(mandatoryQueue_in, address);
871 }
872
873 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
874 wakeUpBuffers(address);
875 }
876
877 action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
878 peek(mandatoryQueue_in, RubyRequest) {
879 L1IcacheMemory.profileMiss(in_msg);
880 }
881 }
882
883 action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
884 peek(mandatoryQueue_in, RubyRequest) {
885 L1DcacheMemory.profileMiss(in_msg);
886 }
887 }
888
889 action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
890 peek(mandatoryQueue_in, RubyRequest) {
891 if (enable_prefetch) {
892 prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
893 }
894 }
895 }
896
897 action(ppm_observePfMiss, "\ppm",
898 desc="Inform the prefetcher about the partial miss") {
899 peek(mandatoryQueue_in, RubyRequest) {
900 prefetcher.observePfMiss(in_msg.LineAddress);
901 }
902 }
903
904 action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
905 optionalQueue_in.dequeue();
906 }
907
908 action(mp_markPrefetched, "mp", desc="Write data from response queue to cache") {
909 assert(is_valid(cache_entry));
910 cache_entry.isPrefetch := true;
911 }
912
913
914 //*****************************************************
915 // TRANSITIONS
916 //*****************************************************
917
918 // Transitions for Load/Store/Replacement/WriteBack from transient states
919 transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
920 z_stallAndWaitMandatoryQueue;
921 }
922
923 transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
924 z_stallAndWaitMandatoryQueue;
925 }
926
927 transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
928 z_stallAndWaitMandatoryQueue;
929 }
930
931 // Transitions from Idle
932 transition({NP,I}, L1_Replacement) {
933 ff_deallocateL1CacheBlock;
934 }
935
936 transition({S,E,M,IS,IM,SM,IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
937 {PF_Load, PF_Store}) {
938 pq_popPrefetchQueue;
939 }
940
941 transition({NP,I}, Load, IS) {
942 oo_allocateL1DCacheBlock;
943 i_allocateTBE;
944 a_issueGETS;
945 uu_profileDataMiss;
946 po_observeMiss;
947 k_popMandatoryQueue;
948 }
949
950 transition({NP,I}, PF_Load, PF_IS) {
951 oo_allocateL1DCacheBlock;
952 i_allocateTBE;
953 pa_issuePfGETS;
954 pq_popPrefetchQueue;
955 }
956
957 transition(PF_IS, Load, IS) {
958 uu_profileDataMiss;
959 ppm_observePfMiss;
960 k_popMandatoryQueue;
961 }
962
963 transition(PF_IS_I, Load, IS_I) {
964 uu_profileDataMiss;
965 ppm_observePfMiss;
966 k_popMandatoryQueue;
967 }
968
969 transition({NP,I}, Ifetch, IS) {
970 pp_allocateL1ICacheBlock;
971 i_allocateTBE;
972 ai_issueGETINSTR;
973 uu_profileInstMiss;
974 po_observeMiss;
975 k_popMandatoryQueue;
976 }
977
978 transition({NP,I}, PF_Ifetch, PF_IS) {
979 pp_allocateL1ICacheBlock;
980 i_allocateTBE;
981 pai_issuePfGETINSTR;
982 pq_popPrefetchQueue;
983 }
984
985 // We proactively assume that the prefetch is in to
986 // the instruction cache
987 transition(PF_IS, Ifetch, IS) {
988 uu_profileDataMiss;
989 ppm_observePfMiss;
990 k_popMandatoryQueue;
991 }
992
993 transition({NP,I}, Store, IM) {
994 oo_allocateL1DCacheBlock;
995 i_allocateTBE;
996 b_issueGETX;
997 uu_profileDataMiss;
998 po_observeMiss;
999 k_popMandatoryQueue;
1000 }
1001
1002 transition({NP,I}, PF_Store, PF_IM) {
1003 oo_allocateL1DCacheBlock;
1004 i_allocateTBE;
1005 pb_issuePfGETX;
1006 pq_popPrefetchQueue;
1007 }
1008
1009 transition(PF_IM, Store, IM) {
1010 uu_profileDataMiss;
1011 ppm_observePfMiss;
1012 k_popMandatoryQueue;
1013 }
1014
1015 transition(PF_SM, Store, SM) {
1016 uu_profileDataMiss;
1017 ppm_observePfMiss;
1018 k_popMandatoryQueue;
1019 }
1020
1021 transition({NP, I}, Inv) {
1022 fi_sendInvAck;
1023 l_popRequestQueue;
1024 }
1025
1026 // Transitions from Shared
1027 transition(S, {Load,Ifetch}) {
1028 h_load_hit;
1029 k_popMandatoryQueue;
1030 }
1031
1032 transition(S, Store, SM) {
1033 i_allocateTBE;
1034 c_issueUPGRADE;
1035 uu_profileDataMiss;
1036 k_popMandatoryQueue;
1037 }
1038
1039 transition(S, L1_Replacement, I) {
1040 forward_eviction_to_cpu;
1041 ff_deallocateL1CacheBlock;
1042 }
1043
1044 transition(S, Inv, I) {
1045 forward_eviction_to_cpu;
1046 fi_sendInvAck;
1047 l_popRequestQueue;
1048 }
1049
1050 // Transitions from Exclusive
1051
1052 transition(E, {Load, Ifetch}) {
1053 h_load_hit;
1054 k_popMandatoryQueue;
1055 }
1056
1057 transition(E, Store, M) {
1058 hh_store_hit;
1059 k_popMandatoryQueue;
1060 }
1061
1062 transition(E, L1_Replacement, M_I) {
1063 // silent E replacement??
1064 forward_eviction_to_cpu;
1065 i_allocateTBE;
1066 g_issuePUTX; // send data, but hold in case forwarded request
1067 ff_deallocateL1CacheBlock;
1068 }
1069
1070 transition(E, Inv, I) {
1071 // don't send data
1072 forward_eviction_to_cpu;
1073 fi_sendInvAck;
1074 l_popRequestQueue;
1075 }
1076
1077 transition(E, Fwd_GETX, I) {
1078 forward_eviction_to_cpu;
1079 d_sendDataToRequestor;
1080 l_popRequestQueue;
1081 }
1082
1083 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1084 d_sendDataToRequestor;
1085 d2_sendDataToL2;
1086 l_popRequestQueue;
1087 }
1088
1089 // Transitions from Modified
1090 transition(M, {Load, Ifetch}) {
1091 h_load_hit;
1092 k_popMandatoryQueue;
1093 }
1094
1095 transition(M, Store) {
1096 hh_store_hit;
1097 k_popMandatoryQueue;
1098 }
1099
1100 transition(M, L1_Replacement, M_I) {
1101 forward_eviction_to_cpu;
1102 i_allocateTBE;
1103 g_issuePUTX; // send data, but hold in case forwarded request
1104 ff_deallocateL1CacheBlock;
1105 }
1106
1107 transition(M_I, WB_Ack, I) {
1108 s_deallocateTBE;
1109 o_popIncomingResponseQueue;
1110 kd_wakeUpDependents;
1111 }
1112
1113 transition(M, Inv, I) {
1114 forward_eviction_to_cpu;
1115 f_sendDataToL2;
1116 l_popRequestQueue;
1117 }
1118
1119 transition(M_I, Inv, SINK_WB_ACK) {
1120 ft_sendDataToL2_fromTBE;
1121 l_popRequestQueue;
1122 }
1123
1124 transition(M, Fwd_GETX, I) {
1125 forward_eviction_to_cpu;
1126 d_sendDataToRequestor;
1127 l_popRequestQueue;
1128 }
1129
1130 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
1131 d_sendDataToRequestor;
1132 d2_sendDataToL2;
1133 l_popRequestQueue;
1134 }
1135
1136 transition(M_I, Fwd_GETX, SINK_WB_ACK) {
1137 dt_sendDataToRequestor_fromTBE;
1138 l_popRequestQueue;
1139 }
1140
1141 transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
1142 dt_sendDataToRequestor_fromTBE;
1143 d2t_sendDataToL2_fromTBE;
1144 l_popRequestQueue;
1145 }
1146
1147 // Transitions from IS
1148 transition({IS, IS_I}, Inv, IS_I) {
1149 fi_sendInvAck;
1150 l_popRequestQueue;
1151 }
1152
1153 transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
1154 fi_sendInvAck;
1155 l_popRequestQueue;
1156 }
1157
1158 transition(IS, Data_all_Acks, S) {
1159 u_writeDataToL1Cache;
1160 h_load_hit;
1161 s_deallocateTBE;
1162 o_popIncomingResponseQueue;
1163 kd_wakeUpDependents;
1164 }
1165
1166 transition(PF_IS, Data_all_Acks, S) {
1167 u_writeDataToL1Cache;
1168 s_deallocateTBE;
1169 mp_markPrefetched;
1170 o_popIncomingResponseQueue;
1171 kd_wakeUpDependents;
1172 }
1173
1174 transition(IS_I, Data_all_Acks, I) {
1175 u_writeDataToL1Cache;
1176 h_load_hit;
1177 s_deallocateTBE;
1178 o_popIncomingResponseQueue;
1179 kd_wakeUpDependents;
1180 }
1181
1182 transition(PF_IS_I, Data_all_Acks, I) {
1183 s_deallocateTBE;
1184 o_popIncomingResponseQueue;
1185 kd_wakeUpDependents;
1186 }
1187
1188 transition(IS, DataS_fromL1, S) {
1189 u_writeDataToL1Cache;
1190 j_sendUnblock;
1191 h_load_hit;
1192 s_deallocateTBE;
1193 o_popIncomingResponseQueue;
1194 kd_wakeUpDependents;
1195 }
1196
1197 transition(PF_IS, DataS_fromL1, S) {
1198 u_writeDataToL1Cache;
1199 j_sendUnblock;
1200 s_deallocateTBE;
1201 o_popIncomingResponseQueue;
1202 kd_wakeUpDependents;
1203 }
1204
1205 transition(IS_I, DataS_fromL1, I) {
1206 u_writeDataToL1Cache;
1207 j_sendUnblock;
1208 h_load_hit;
1209 s_deallocateTBE;
1210 o_popIncomingResponseQueue;
1211 kd_wakeUpDependents;
1212 }
1213
1214 transition(PF_IS_I, DataS_fromL1, I) {
1215 j_sendUnblock;
1216 s_deallocateTBE;
1217 o_popIncomingResponseQueue;
1218 kd_wakeUpDependents;
1219 }
1220
1221 // directory is blocked when sending exclusive data
1222 transition(IS_I, Data_Exclusive, E) {
1223 u_writeDataToL1Cache;
1224 h_load_hit;
1225 jj_sendExclusiveUnblock;
1226 s_deallocateTBE;
1227 o_popIncomingResponseQueue;
1228 kd_wakeUpDependents;
1229 }
1230
1231 // directory is blocked when sending exclusive data
1232 transition(PF_IS_I, Data_Exclusive, E) {
1233 u_writeDataToL1Cache;
1234 jj_sendExclusiveUnblock;
1235 s_deallocateTBE;
1236 o_popIncomingResponseQueue;
1237 kd_wakeUpDependents;
1238 }
1239
1240 transition(IS, Data_Exclusive, E) {
1241 u_writeDataToL1Cache;
1242 h_load_hit;
1243 jj_sendExclusiveUnblock;
1244 s_deallocateTBE;
1245 o_popIncomingResponseQueue;
1246 kd_wakeUpDependents;
1247 }
1248
1249 transition(PF_IS, Data_Exclusive, E) {
1250 u_writeDataToL1Cache;
1251 jj_sendExclusiveUnblock;
1252 s_deallocateTBE;
1253 mp_markPrefetched;
1254 o_popIncomingResponseQueue;
1255 kd_wakeUpDependents;
1256 }
1257
1258 // Transitions from IM
1259 transition(IM, Inv, IM) {
1260 fi_sendInvAck;
1261 l_popRequestQueue;
1262 }
1263
1264 transition({PF_IM, PF_SM}, Inv, PF_IM) {
1265 fi_sendInvAck;
1266 l_popRequestQueue;
1267 }
1268
1269 transition(IM, Data, SM) {
1270 u_writeDataToL1Cache;
1271 q_updateAckCount;
1272 o_popIncomingResponseQueue;
1273 }
1274
1275 transition(PF_IM, Data, PF_SM) {
1276 u_writeDataToL1Cache;
1277 q_updateAckCount;
1278 o_popIncomingResponseQueue;
1279 }
1280
1281 transition(IM, Data_all_Acks, M) {
1282 u_writeDataToL1Cache;
1283 hh_store_hit;
1284 jj_sendExclusiveUnblock;
1285 s_deallocateTBE;
1286 o_popIncomingResponseQueue;
1287 kd_wakeUpDependents;
1288 }
1289
1290 transition(PF_IM, Data_all_Acks, M) {
1291 u_writeDataToL1Cache;
1292 jj_sendExclusiveUnblock;
1293 s_deallocateTBE;
1294 mp_markPrefetched;
1295 o_popIncomingResponseQueue;
1296 kd_wakeUpDependents;
1297 }
1298
1299 // transitions from SM
1300 transition(SM, Inv, IM) {
1301 fi_sendInvAck;
1302 dg_invalidate_sc;
1303 l_popRequestQueue;
1304 }
1305
1306 transition({SM, IM, PF_SM, PF_IM}, Ack) {
1307 q_updateAckCount;
1308 o_popIncomingResponseQueue;
1309 }
1310
1311 transition(SM, Ack_all, M) {
1312 jj_sendExclusiveUnblock;
1313 hh_store_hit;
1314 s_deallocateTBE;
1315 o_popIncomingResponseQueue;
1316 kd_wakeUpDependents;
1317 }
1318
1319 transition(PF_SM, Ack_all, M) {
1320 jj_sendExclusiveUnblock;
1321 s_deallocateTBE;
1322 mp_markPrefetched;
1323 o_popIncomingResponseQueue;
1324 kd_wakeUpDependents;
1325 }
1326
1327 transition(SINK_WB_ACK, Inv){
1328 fi_sendInvAck;
1329 l_popRequestQueue;
1330 }
1331
1332 transition(SINK_WB_ACK, WB_Ack, I){
1333 s_deallocateTBE;
1334 o_popIncomingResponseQueue;
1335 kd_wakeUpDependents;
1336 }
1337 }