mem-cache: Create an address aware TempCacheBlk
[gem5.git] / src / mem / protocol / MESI_Three_Level-L0cache.sm
1 /*
2 * Copyright (c) 2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:L0Cache, "MESI Directory L0 Cache")
30 : Sequencer * sequencer;
31 CacheMemory * Icache;
32 CacheMemory * Dcache;
33 Cycles request_latency := 2;
34 Cycles response_latency := 2;
35 bool send_evictions;
36
37 // From this node's L0 cache to the network
38 MessageBuffer * bufferToL1, network="To";
39
40 // To this node's L0 cache FROM the network
41 MessageBuffer * bufferFromL1, network="From";
42
43 // Message queue between this controller and the processor
44 MessageBuffer * mandatoryQueue;
45 {
46 // STATES
47 state_declaration(State, desc="Cache states", default="L0Cache_State_I") {
48 // Base states
49
50 // The cache entry has not been allocated.
51 I, AccessPermission:Invalid;
52
53 // The cache entry is in shared mode. The processor can read this entry
54 // but it cannot write to it.
55 S, AccessPermission:Read_Only;
56
57 // The cache entry is in exclusive mode. The processor can read this
58 // entry. It can write to this entry without informing the directory.
59 // On writing, the entry moves to M state.
60 E, AccessPermission:Read_Only;
61
62 // The processor has read and write permissions on this entry.
63 M, AccessPermission:Read_Write;
64
65 // Transient States
66
67 // The cache controller has requested an instruction. It will be stored
68 // in the shared state so that the processor can read it.
69 Inst_IS, AccessPermission:Busy;
70
71 // The cache controller has requested that this entry be fetched in
72 // shared state so that the processor can read it.
73 IS, AccessPermission:Busy;
74
75 // The cache controller has requested that this entry be fetched in
76 // modify state so that the processor can read/write it.
77 IM, AccessPermission:Busy;
78
79 // The cache controller had read permission over the entry. But now the
80 // processor needs to write to it. So, the controller has requested for
81 // write permission.
82 SM, AccessPermission:Read_Only;
83 }
84
85 // EVENTS
86 enumeration(Event, desc="Cache events") {
87 // L0 events
88 Load, desc="Load request from the home processor";
89 Ifetch, desc="I-fetch request from the home processor";
90 Store, desc="Store request from the home processor";
91
92 Inv, desc="Invalidate request from L2 bank";
93
94 // internal generated request
95 L0_Replacement, desc="L0 Replacement", format="!r";
96
97 // other requests
98 Fwd_GETX, desc="GETX from other processor";
99 Fwd_GETS, desc="GETS from other processor";
100 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
101
102 Data, desc="Data for processor";
103 Data_Exclusive, desc="Data for processor";
104
105 Ack, desc="Ack for processor";
106 Ack_all, desc="Last ack for processor";
107
108 WB_Ack, desc="Ack for replacement";
109 }
110
111 // TYPES
112
113 // CacheEntry
114 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
115 State CacheState, desc="cache state";
116 DataBlock DataBlk, desc="data for the block";
117 bool Dirty, default="false", desc="data is dirty";
118 }
119
120 // TBE fields
121 structure(TBE, desc="...") {
122 Addr addr, desc="Physical address for this TBE";
123 State TBEState, desc="Transient state";
124 DataBlock DataBlk, desc="Buffer for the data block";
125 bool Dirty, default="false", desc="data is dirty";
126 int pendingAcks, default="0", desc="number of pending acks";
127 }
128
129 structure(TBETable, external="yes") {
130 TBE lookup(Addr);
131 void allocate(Addr);
132 void deallocate(Addr);
133 bool isPresent(Addr);
134 }
135
136 TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
137
138 Tick clockEdge();
139 Cycles ticksToCycles(Tick t);
140 void set_cache_entry(AbstractCacheEntry a);
141 void unset_cache_entry();
142 void set_tbe(TBE a);
143 void unset_tbe();
144 void wakeUpBuffers(Addr a);
145 void wakeUpAllBuffers(Addr a);
146 void profileMsgDelay(int virtualNetworkType, Cycles c);
147
148 // inclusive cache returns L0 entries only
149 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
150 Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
151 if(is_valid(Dcache_entry)) {
152 return Dcache_entry;
153 }
154
155 Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
156 return Icache_entry;
157 }
158
159 Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
160 Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
161 return Dcache_entry;
162 }
163
164 Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
165 Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
166 return Icache_entry;
167 }
168
169 State getState(TBE tbe, Entry cache_entry, Addr addr) {
170 assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
171
172 if(is_valid(tbe)) {
173 return tbe.TBEState;
174 } else if (is_valid(cache_entry)) {
175 return cache_entry.CacheState;
176 }
177 return State:I;
178 }
179
180 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
181 assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
182
183 // MUST CHANGE
184 if(is_valid(tbe)) {
185 tbe.TBEState := state;
186 }
187
188 if (is_valid(cache_entry)) {
189 cache_entry.CacheState := state;
190 }
191 }
192
193 AccessPermission getAccessPermission(Addr addr) {
194 TBE tbe := TBEs[addr];
195 if(is_valid(tbe)) {
196 DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
197 return L0Cache_State_to_permission(tbe.TBEState);
198 }
199
200 Entry cache_entry := getCacheEntry(addr);
201 if(is_valid(cache_entry)) {
202 DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(cache_entry.CacheState));
203 return L0Cache_State_to_permission(cache_entry.CacheState);
204 }
205
206 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
207 return AccessPermission:NotPresent;
208 }
209
210 void functionalRead(Addr addr, Packet *pkt) {
211 TBE tbe := TBEs[addr];
212 if(is_valid(tbe)) {
213 testAndRead(addr, tbe.DataBlk, pkt);
214 } else {
215 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
216 }
217 }
218
219 int functionalWrite(Addr addr, Packet *pkt) {
220 int num_functional_writes := 0;
221
222 TBE tbe := TBEs[addr];
223 if(is_valid(tbe)) {
224 num_functional_writes := num_functional_writes +
225 testAndWrite(addr, tbe.DataBlk, pkt);
226 return num_functional_writes;
227 }
228
229 num_functional_writes := num_functional_writes +
230 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
231 return num_functional_writes;
232 }
233
234 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
235 if (is_valid(cache_entry)) {
236 cache_entry.changePermission(L0Cache_State_to_permission(state));
237 }
238 }
239
240 Event mandatory_request_type_to_event(RubyRequestType type) {
241 if (type == RubyRequestType:LD) {
242 return Event:Load;
243 } else if (type == RubyRequestType:IFETCH) {
244 return Event:Ifetch;
245 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
246 return Event:Store;
247 } else {
248 error("Invalid RubyRequestType");
249 }
250 }
251
252 int getPendingAcks(TBE tbe) {
253 return tbe.pendingAcks;
254 }
255
256 out_port(requestNetwork_out, CoherenceMsg, bufferToL1);
257
258 // Messages for this L0 cache from the L1 cache
259 in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
260 if (messgeBuffer_in.isReady(clockEdge())) {
261 peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
262 assert(in_msg.Dest == machineID);
263
264 Entry cache_entry := getCacheEntry(in_msg.addr);
265 TBE tbe := TBEs[in_msg.addr];
266
267 if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
268 trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
269 } else if(in_msg.Class == CoherenceClass:DATA) {
270 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
271 } else if (in_msg.Class == CoherenceClass:ACK) {
272 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
273 } else if (in_msg.Class == CoherenceClass:WB_ACK) {
274 trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
275 } else if (in_msg.Class == CoherenceClass:INV) {
276 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
277 } else if (in_msg.Class == CoherenceClass:GETX ||
278 in_msg.Class == CoherenceClass:UPGRADE) {
279 // upgrade transforms to GETX due to race
280 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
281 } else if (in_msg.Class == CoherenceClass:GETS) {
282 trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
283 } else if (in_msg.Class == CoherenceClass:GET_INSTR) {
284 trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
285 } else {
286 error("Invalid forwarded request type");
287 }
288 }
289 }
290 }
291
292 // Mandatory Queue betweens Node's CPU and it's L0 caches
293 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
294 if (mandatoryQueue_in.isReady(clockEdge())) {
295 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
296
297 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
298
299 if (in_msg.Type == RubyRequestType:IFETCH) {
300 // ** INSTRUCTION ACCESS ***
301
302 Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
303 if (is_valid(Icache_entry)) {
304 // The tag matches for the L0, so the L0 asks the L2 for it.
305 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
306 Icache_entry, TBEs[in_msg.LineAddress]);
307 } else {
308
309 // Check to see if it is in the OTHER L0
310 Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
311 if (is_valid(Dcache_entry)) {
312 // The block is in the wrong L0, put the request on the queue to the shared L2
313 trigger(Event:L0_Replacement, in_msg.LineAddress,
314 Dcache_entry, TBEs[in_msg.LineAddress]);
315 }
316
317 if (Icache.cacheAvail(in_msg.LineAddress)) {
318 // L0 does't have the line, but we have space for it
319 // in the L0 so let's see if the L2 has it
320 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
321 Icache_entry, TBEs[in_msg.LineAddress]);
322 } else {
323 // No room in the L0, so we need to make room in the L0
324 trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
325 getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
326 TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
327 }
328 }
329 } else {
330
331 // *** DATA ACCESS ***
332 Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
333 if (is_valid(Dcache_entry)) {
334 // The tag matches for the L0, so the L0 ask the L1 for it
335 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
336 Dcache_entry, TBEs[in_msg.LineAddress]);
337 } else {
338
339 // Check to see if it is in the OTHER L0
340 Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
341 if (is_valid(Icache_entry)) {
342 // The block is in the wrong L0, put the request on the queue to the private L1
343 trigger(Event:L0_Replacement, in_msg.LineAddress,
344 Icache_entry, TBEs[in_msg.LineAddress]);
345 }
346
347 if (Dcache.cacheAvail(in_msg.LineAddress)) {
348 // L1 does't have the line, but we have space for it
349 // in the L0 let's see if the L1 has it
350 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
351 Dcache_entry, TBEs[in_msg.LineAddress]);
352 } else {
353 // No room in the L1, so we need to make room in the L0
354 trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
355 getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
356 TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
357 }
358 }
359 }
360 }
361 }
362 }
363
364 // ACTIONS
365 action(a_issueGETS, "a", desc="Issue GETS") {
366 peek(mandatoryQueue_in, RubyRequest) {
367 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
368 out_msg.addr := address;
369 out_msg.Class := CoherenceClass:GETS;
370 out_msg.Sender := machineID;
371 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
372 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
373 address, out_msg.Dest);
374 out_msg.MessageSize := MessageSizeType:Control;
375 out_msg.AccessMode := in_msg.AccessMode;
376 }
377 }
378 }
379
380 action(b_issueGETX, "b", desc="Issue GETX") {
381 peek(mandatoryQueue_in, RubyRequest) {
382 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
383 out_msg.addr := address;
384 out_msg.Class := CoherenceClass:GETX;
385 out_msg.Sender := machineID;
386 DPRINTF(RubySlicc, "%s\n", machineID);
387 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
388
389 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
390 address, out_msg.Dest);
391 out_msg.MessageSize := MessageSizeType:Control;
392 out_msg.AccessMode := in_msg.AccessMode;
393 }
394 }
395 }
396
397 action(c_issueUPGRADE, "c", desc="Issue GETX") {
398 peek(mandatoryQueue_in, RubyRequest) {
399 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
400 out_msg.addr := address;
401 out_msg.Class := CoherenceClass:UPGRADE;
402 out_msg.Sender := machineID;
403 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
404
405 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
406 address, out_msg.Dest);
407 out_msg.MessageSize := MessageSizeType:Control;
408 out_msg.AccessMode := in_msg.AccessMode;
409 }
410 }
411 }
412
413 action(f_sendDataToL1, "f", desc="send data to the L2 cache") {
414 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
415 assert(is_valid(cache_entry));
416 out_msg.addr := address;
417 out_msg.Class := CoherenceClass:INV_DATA;
418 out_msg.DataBlk := cache_entry.DataBlk;
419 out_msg.Dirty := cache_entry.Dirty;
420 out_msg.Sender := machineID;
421 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
422 out_msg.MessageSize := MessageSizeType:Writeback_Data;
423 }
424 cache_entry.Dirty := false;
425 }
426
427 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
428 peek(messgeBuffer_in, CoherenceMsg) {
429 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
430 out_msg.addr := address;
431 out_msg.Class := CoherenceClass:INV_ACK;
432 out_msg.Sender := machineID;
433 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
434 out_msg.MessageSize := MessageSizeType:Response_Control;
435 }
436 }
437 }
438
439 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
440 if (send_evictions) {
441 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
442 sequencer.evictionCallback(address);
443 }
444 }
445
446 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
447 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
448 assert(is_valid(cache_entry));
449 out_msg.addr := address;
450 out_msg.Class := CoherenceClass:PUTX;
451 out_msg.Dirty := cache_entry.Dirty;
452 out_msg.Sender:= machineID;
453 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
454
455 if (cache_entry.Dirty) {
456 out_msg.MessageSize := MessageSizeType:Writeback_Data;
457 out_msg.DataBlk := cache_entry.DataBlk;
458 } else {
459 out_msg.MessageSize := MessageSizeType:Writeback_Control;
460 }
461 }
462 }
463
464 action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
465 assert(is_valid(cache_entry));
466 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
467 Dcache.setMRU(cache_entry);
468 sequencer.readCallback(address, cache_entry.DataBlk);
469 }
470
471 action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
472 assert(is_valid(cache_entry));
473 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
474 Icache.setMRU(cache_entry);
475 sequencer.readCallback(address, cache_entry.DataBlk);
476 }
477
478 action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
479 assert(is_valid(cache_entry));
480 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
481 Dcache.setMRU(cache_entry);
482 sequencer.readCallback(address, cache_entry.DataBlk, true);
483 }
484
485 action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
486 assert(is_valid(cache_entry));
487 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
488 Icache.setMRU(cache_entry);
489 sequencer.readCallback(address, cache_entry.DataBlk, true);
490 }
491
492 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
493 assert(is_valid(cache_entry));
494 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
495 Dcache.setMRU(cache_entry);
496 sequencer.writeCallback(address, cache_entry.DataBlk);
497 cache_entry.Dirty := true;
498 }
499
500 action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
501 assert(is_valid(cache_entry));
502 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
503 Dcache.setMRU(cache_entry);
504 sequencer.writeCallback(address, cache_entry.DataBlk, true);
505 cache_entry.Dirty := true;
506 }
507
508 action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
509 check_allocate(TBEs);
510 assert(is_valid(cache_entry));
511 TBEs.allocate(address);
512 set_tbe(TBEs[address]);
513 tbe.Dirty := cache_entry.Dirty;
514 tbe.DataBlk := cache_entry.DataBlk;
515 }
516
517 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
518 mandatoryQueue_in.dequeue(clockEdge());
519 }
520
521 action(l_popRequestQueue, "l",
522 desc="Pop incoming request queue and profile the delay within this virtual network") {
523 Tick delay := messgeBuffer_in.dequeue(clockEdge());
524 profileMsgDelay(2, ticksToCycles(delay));
525 }
526
527 action(o_popIncomingResponseQueue, "o",
528 desc="Pop Incoming Response queue and profile the delay within this virtual network") {
529 Tick delay := messgeBuffer_in.dequeue(clockEdge());
530 profileMsgDelay(1, ticksToCycles(delay));
531 }
532
533 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
534 TBEs.deallocate(address);
535 unset_tbe();
536 }
537
538 action(u_writeDataToCache, "u", desc="Write data to cache") {
539 peek(messgeBuffer_in, CoherenceMsg) {
540 assert(is_valid(cache_entry));
541 cache_entry.DataBlk := in_msg.DataBlk;
542 }
543 }
544
545 action(u_writeInstToCache, "ui", desc="Write data to cache") {
546 peek(messgeBuffer_in, CoherenceMsg) {
547 assert(is_valid(cache_entry));
548 cache_entry.DataBlk := in_msg.DataBlk;
549 }
550 }
551
552 action(ff_deallocateCacheBlock, "\f",
553 desc="Deallocate L1 cache block.") {
554 if (Dcache.isTagPresent(address)) {
555 Dcache.deallocate(address);
556 } else {
557 Icache.deallocate(address);
558 }
559 unset_cache_entry();
560 }
561
562 action(oo_allocateDCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
563 if (is_invalid(cache_entry)) {
564 set_cache_entry(Dcache.allocate(address, new Entry));
565 }
566 }
567
568 action(pp_allocateICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
569 if (is_invalid(cache_entry)) {
570 set_cache_entry(Icache.allocate(address, new Entry));
571 }
572 }
573
574 action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle cpu request queue") {
575 stall_and_wait(mandatoryQueue_in, address);
576 }
577
578 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
579 wakeUpAllBuffers(address);
580 }
581
582 action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
583 ++Icache.demand_misses;
584 }
585
586 action(uu_profileInstHit, "\uih", desc="Profile the demand miss") {
587 ++Icache.demand_hits;
588 }
589
590 action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
591 ++Dcache.demand_misses;
592 }
593
594 action(uu_profileDataHit, "\udh", desc="Profile the demand miss") {
595 ++Dcache.demand_hits;
596 }
597
598 //*****************************************************
599 // TRANSITIONS
600 //*****************************************************
601
602 // Transitions for Load/Store/Replacement/WriteBack from transient states
603 transition({Inst_IS, IS, IM, SM}, {Load, Ifetch, Store, L0_Replacement}) {
604 z_stallAndWaitMandatoryQueue;
605 }
606
607 // Transitions from Idle
608 transition(I, Load, IS) {
609 oo_allocateDCacheBlock;
610 i_allocateTBE;
611 a_issueGETS;
612 uu_profileDataMiss;
613 k_popMandatoryQueue;
614 }
615
616 transition(I, Ifetch, Inst_IS) {
617 pp_allocateICacheBlock;
618 i_allocateTBE;
619 a_issueGETS;
620 uu_profileInstMiss;
621 k_popMandatoryQueue;
622 }
623
624 transition(I, Store, IM) {
625 oo_allocateDCacheBlock;
626 i_allocateTBE;
627 b_issueGETX;
628 uu_profileDataMiss;
629 k_popMandatoryQueue;
630 }
631
632 transition({I, IS, IM, Inst_IS}, Inv) {
633 fi_sendInvAck;
634 l_popRequestQueue;
635 }
636
637 transition(SM, Inv, IM) {
638 fi_sendInvAck;
639 l_popRequestQueue;
640 }
641
642 // Transitions from Shared
643 transition({S,E,M}, Load) {
644 h_load_hit;
645 uu_profileDataHit;
646 k_popMandatoryQueue;
647 }
648
649 transition({S,E,M}, Ifetch) {
650 h_ifetch_hit;
651 uu_profileInstHit;
652 k_popMandatoryQueue;
653 }
654
655 transition(S, Store, SM) {
656 i_allocateTBE;
657 c_issueUPGRADE;
658 uu_profileDataMiss;
659 k_popMandatoryQueue;
660 }
661
662 transition(S, L0_Replacement, I) {
663 forward_eviction_to_cpu;
664 ff_deallocateCacheBlock;
665 }
666
667 transition(S, Inv, I) {
668 forward_eviction_to_cpu;
669 fi_sendInvAck;
670 ff_deallocateCacheBlock;
671 l_popRequestQueue;
672 }
673
674 // Transitions from Exclusive
675 transition({E,M}, Store, M) {
676 hh_store_hit;
677 uu_profileDataHit;
678 k_popMandatoryQueue;
679 }
680
681 transition(E, L0_Replacement, I) {
682 forward_eviction_to_cpu;
683 g_issuePUTX;
684 ff_deallocateCacheBlock;
685 }
686
687 transition(E, {Inv, Fwd_GETX}, I) {
688 // don't send data
689 forward_eviction_to_cpu;
690 fi_sendInvAck;
691 ff_deallocateCacheBlock;
692 l_popRequestQueue;
693 }
694
695 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
696 f_sendDataToL1;
697 l_popRequestQueue;
698 }
699
700 // Transitions from Modified
701 transition(M, L0_Replacement, I) {
702 forward_eviction_to_cpu;
703 g_issuePUTX;
704 ff_deallocateCacheBlock;
705 }
706
707 transition(M, {Inv, Fwd_GETX}, I) {
708 forward_eviction_to_cpu;
709 f_sendDataToL1;
710 ff_deallocateCacheBlock;
711 l_popRequestQueue;
712 }
713
714 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
715 f_sendDataToL1;
716 l_popRequestQueue;
717 }
718
719 transition(IS, Data, S) {
720 u_writeDataToCache;
721 hx_load_hit;
722 s_deallocateTBE;
723 o_popIncomingResponseQueue;
724 kd_wakeUpDependents;
725 }
726
727 transition(IS, Data_Exclusive, E) {
728 u_writeDataToCache;
729 hx_load_hit;
730 s_deallocateTBE;
731 o_popIncomingResponseQueue;
732 kd_wakeUpDependents;
733 }
734
735 transition(Inst_IS, Data, S) {
736 u_writeInstToCache;
737 hx_ifetch_hit;
738 s_deallocateTBE;
739 o_popIncomingResponseQueue;
740 kd_wakeUpDependents;
741 }
742
743 transition(Inst_IS, Data_Exclusive, E) {
744 u_writeInstToCache;
745 hx_ifetch_hit;
746 s_deallocateTBE;
747 o_popIncomingResponseQueue;
748 kd_wakeUpDependents;
749 }
750
751 transition({IM,SM}, Data_Exclusive, M) {
752 u_writeDataToCache;
753 hhx_store_hit;
754 s_deallocateTBE;
755 o_popIncomingResponseQueue;
756 kd_wakeUpDependents;
757 }
758 }