mem-cache: Add match functions to QueueEntry
[gem5.git] / src / mem / protocol / MESI_Three_Level-L0cache.sm
1 /*
2 * Copyright (c) 2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(MachineType:L0Cache, "MESI Directory L0 Cache")
30 : Sequencer * sequencer;
31 CacheMemory * Icache;
32 CacheMemory * Dcache;
33 Cycles request_latency := 2;
34 Cycles response_latency := 2;
35 bool send_evictions;
36
37 // From this node's L0 cache to the network
38 MessageBuffer * bufferToL1, network="To";
39
40 // To this node's L0 cache FROM the network
41 MessageBuffer * bufferFromL1, network="From";
42
43 // Message queue between this controller and the processor
44 MessageBuffer * mandatoryQueue;
45 {
46 // STATES
47 state_declaration(State, desc="Cache states", default="L0Cache_State_I") {
48 // Base states
49
50 // The cache entry has not been allocated.
51 I, AccessPermission:Invalid;
52
53 // The cache entry is in shared mode. The processor can read this entry
54 // but it cannot write to it.
55 S, AccessPermission:Read_Only;
56
57 // The cache entry is in exclusive mode. The processor can read this
58 // entry. It can write to this entry without informing the directory.
59 // On writing, the entry moves to M state.
60 E, AccessPermission:Read_Only;
61
62 // The processor has read and write permissions on this entry.
63 M, AccessPermission:Read_Write;
64
65 // Transient States
66
67 // The cache controller has requested an instruction. It will be stored
68 // in the shared state so that the processor can read it.
69 Inst_IS, AccessPermission:Busy;
70
71 // The cache controller has requested that this entry be fetched in
72 // shared state so that the processor can read it.
73 IS, AccessPermission:Busy;
74
75 // The cache controller has requested that this entry be fetched in
76 // modify state so that the processor can read/write it.
77 IM, AccessPermission:Busy;
78
79 // The cache controller had read permission over the entry. But now the
80 // processor needs to write to it. So, the controller has requested for
81 // write permission.
82 SM, AccessPermission:Read_Only;
83 }
84
85 // EVENTS
86 enumeration(Event, desc="Cache events") {
87 // L0 events
88 Load, desc="Load request from the home processor";
89 Ifetch, desc="I-fetch request from the home processor";
90 Store, desc="Store request from the home processor";
91
92 Inv, desc="Invalidate request from L2 bank";
93
94 // internal generated request
95 L0_Replacement, desc="L0 Replacement", format="!r";
96
97 // other requests
98 Fwd_GETX, desc="GETX from other processor";
99 Fwd_GETS, desc="GETS from other processor";
100 Fwd_GET_INSTR, desc="GET_INSTR from other processor";
101
102 Data, desc="Data for processor";
103 Data_Exclusive, desc="Data for processor";
104 Data_Stale, desc="Data for processor, but not for storage";
105
106 Ack, desc="Ack for processor";
107 Ack_all, desc="Last ack for processor";
108
109 WB_Ack, desc="Ack for replacement";
110 }
111
112 // TYPES
113
114 // CacheEntry
115 structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
116 State CacheState, desc="cache state";
117 DataBlock DataBlk, desc="data for the block";
118 bool Dirty, default="false", desc="data is dirty";
119 }
120
121 // TBE fields
122 structure(TBE, desc="...") {
123 Addr addr, desc="Physical address for this TBE";
124 State TBEState, desc="Transient state";
125 DataBlock DataBlk, desc="Buffer for the data block";
126 bool Dirty, default="false", desc="data is dirty";
127 int pendingAcks, default="0", desc="number of pending acks";
128 }
129
130 structure(TBETable, external="yes") {
131 TBE lookup(Addr);
132 void allocate(Addr);
133 void deallocate(Addr);
134 bool isPresent(Addr);
135 }
136
137 TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
138
139 Tick clockEdge();
140 Cycles ticksToCycles(Tick t);
141 void set_cache_entry(AbstractCacheEntry a);
142 void unset_cache_entry();
143 void set_tbe(TBE a);
144 void unset_tbe();
145 void wakeUpBuffers(Addr a);
146 void wakeUpAllBuffers(Addr a);
147 void profileMsgDelay(int virtualNetworkType, Cycles c);
148
149 // inclusive cache returns L0 entries only
150 Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
151 Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
152 if(is_valid(Dcache_entry)) {
153 return Dcache_entry;
154 }
155
156 Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
157 return Icache_entry;
158 }
159
160 Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
161 Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
162 return Dcache_entry;
163 }
164
165 Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
166 Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
167 return Icache_entry;
168 }
169
170 State getState(TBE tbe, Entry cache_entry, Addr addr) {
171 assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
172
173 if(is_valid(tbe)) {
174 return tbe.TBEState;
175 } else if (is_valid(cache_entry)) {
176 return cache_entry.CacheState;
177 }
178 return State:I;
179 }
180
181 void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
182 assert((Dcache.isTagPresent(addr) && Icache.isTagPresent(addr)) == false);
183
184 // MUST CHANGE
185 if(is_valid(tbe)) {
186 tbe.TBEState := state;
187 }
188
189 if (is_valid(cache_entry)) {
190 cache_entry.CacheState := state;
191 }
192 }
193
194 AccessPermission getAccessPermission(Addr addr) {
195 TBE tbe := TBEs[addr];
196 if(is_valid(tbe)) {
197 DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
198 return L0Cache_State_to_permission(tbe.TBEState);
199 }
200
201 Entry cache_entry := getCacheEntry(addr);
202 if(is_valid(cache_entry)) {
203 DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(cache_entry.CacheState));
204 return L0Cache_State_to_permission(cache_entry.CacheState);
205 }
206
207 DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
208 return AccessPermission:NotPresent;
209 }
210
211 void functionalRead(Addr addr, Packet *pkt) {
212 TBE tbe := TBEs[addr];
213 if(is_valid(tbe)) {
214 testAndRead(addr, tbe.DataBlk, pkt);
215 } else {
216 testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
217 }
218 }
219
220 int functionalWrite(Addr addr, Packet *pkt) {
221 int num_functional_writes := 0;
222
223 TBE tbe := TBEs[addr];
224 if(is_valid(tbe)) {
225 num_functional_writes := num_functional_writes +
226 testAndWrite(addr, tbe.DataBlk, pkt);
227 return num_functional_writes;
228 }
229
230 num_functional_writes := num_functional_writes +
231 testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
232 return num_functional_writes;
233 }
234
235 void setAccessPermission(Entry cache_entry, Addr addr, State state) {
236 if (is_valid(cache_entry)) {
237 cache_entry.changePermission(L0Cache_State_to_permission(state));
238 }
239 }
240
241 Event mandatory_request_type_to_event(RubyRequestType type) {
242 if (type == RubyRequestType:LD) {
243 return Event:Load;
244 } else if (type == RubyRequestType:IFETCH) {
245 return Event:Ifetch;
246 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
247 return Event:Store;
248 } else {
249 error("Invalid RubyRequestType");
250 }
251 }
252
253 int getPendingAcks(TBE tbe) {
254 return tbe.pendingAcks;
255 }
256
257 out_port(requestNetwork_out, CoherenceMsg, bufferToL1);
258
259 // Messages for this L0 cache from the L1 cache
260 in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
261 if (messgeBuffer_in.isReady(clockEdge())) {
262 peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
263 assert(in_msg.Dest == machineID);
264
265 Entry cache_entry := getCacheEntry(in_msg.addr);
266 TBE tbe := TBEs[in_msg.addr];
267
268 if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
269 trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
270 } else if(in_msg.Class == CoherenceClass:DATA) {
271 trigger(Event:Data, in_msg.addr, cache_entry, tbe);
272 } else if(in_msg.Class == CoherenceClass:STALE_DATA) {
273 trigger(Event:Data_Stale, in_msg.addr, cache_entry, tbe);
274 } else if (in_msg.Class == CoherenceClass:ACK) {
275 trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
276 } else if (in_msg.Class == CoherenceClass:WB_ACK) {
277 trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
278 } else if (in_msg.Class == CoherenceClass:INV) {
279 trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
280 } else if (in_msg.Class == CoherenceClass:GETX ||
281 in_msg.Class == CoherenceClass:UPGRADE) {
282 // upgrade transforms to GETX due to race
283 trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
284 } else if (in_msg.Class == CoherenceClass:GETS) {
285 trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
286 } else if (in_msg.Class == CoherenceClass:GET_INSTR) {
287 trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
288 } else {
289 error("Invalid forwarded request type");
290 }
291 }
292 }
293 }
294
295 // Mandatory Queue betweens Node's CPU and it's L0 caches
296 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
297 if (mandatoryQueue_in.isReady(clockEdge())) {
298 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
299
300 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
301
302 if (in_msg.Type == RubyRequestType:IFETCH) {
303 // ** INSTRUCTION ACCESS ***
304
305 Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
306 if (is_valid(Icache_entry)) {
307 // The tag matches for the L0, so the L0 asks the L2 for it.
308 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
309 Icache_entry, TBEs[in_msg.LineAddress]);
310 } else {
311
312 // Check to see if it is in the OTHER L0
313 Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
314 if (is_valid(Dcache_entry)) {
315 // The block is in the wrong L0, put the request on the queue to the shared L2
316 trigger(Event:L0_Replacement, in_msg.LineAddress,
317 Dcache_entry, TBEs[in_msg.LineAddress]);
318 }
319
320 if (Icache.cacheAvail(in_msg.LineAddress)) {
321 // L0 does't have the line, but we have space for it
322 // in the L0 so let's see if the L2 has it
323 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
324 Icache_entry, TBEs[in_msg.LineAddress]);
325 } else {
326 // No room in the L0, so we need to make room in the L0
327 trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
328 getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
329 TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
330 }
331 }
332 } else {
333
334 // *** DATA ACCESS ***
335 Entry Dcache_entry := getDCacheEntry(in_msg.LineAddress);
336 if (is_valid(Dcache_entry)) {
337 // The tag matches for the L0, so the L0 ask the L1 for it
338 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
339 Dcache_entry, TBEs[in_msg.LineAddress]);
340 } else {
341
342 // Check to see if it is in the OTHER L0
343 Entry Icache_entry := getICacheEntry(in_msg.LineAddress);
344 if (is_valid(Icache_entry)) {
345 // The block is in the wrong L0, put the request on the queue to the private L1
346 trigger(Event:L0_Replacement, in_msg.LineAddress,
347 Icache_entry, TBEs[in_msg.LineAddress]);
348 }
349
350 if (Dcache.cacheAvail(in_msg.LineAddress)) {
351 // L1 does't have the line, but we have space for it
352 // in the L0 let's see if the L1 has it
353 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
354 Dcache_entry, TBEs[in_msg.LineAddress]);
355 } else {
356 // No room in the L1, so we need to make room in the L0
357 trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
358 getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
359 TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
360 }
361 }
362 }
363 }
364 }
365 }
366
367 // ACTIONS
368 action(a_issueGETS, "a", desc="Issue GETS") {
369 peek(mandatoryQueue_in, RubyRequest) {
370 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
371 out_msg.addr := address;
372 out_msg.Class := CoherenceClass:GETS;
373 out_msg.Sender := machineID;
374 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
375 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
376 address, out_msg.Dest);
377 out_msg.MessageSize := MessageSizeType:Control;
378 out_msg.AccessMode := in_msg.AccessMode;
379 }
380 }
381 }
382
383 action(b_issueGETX, "b", desc="Issue GETX") {
384 peek(mandatoryQueue_in, RubyRequest) {
385 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
386 out_msg.addr := address;
387 out_msg.Class := CoherenceClass:GETX;
388 out_msg.Sender := machineID;
389 DPRINTF(RubySlicc, "%s\n", machineID);
390 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
391
392 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
393 address, out_msg.Dest);
394 out_msg.MessageSize := MessageSizeType:Control;
395 out_msg.AccessMode := in_msg.AccessMode;
396 }
397 }
398 }
399
400 action(c_issueUPGRADE, "c", desc="Issue GETX") {
401 peek(mandatoryQueue_in, RubyRequest) {
402 enqueue(requestNetwork_out, CoherenceMsg, request_latency) {
403 out_msg.addr := address;
404 out_msg.Class := CoherenceClass:UPGRADE;
405 out_msg.Sender := machineID;
406 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
407
408 DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
409 address, out_msg.Dest);
410 out_msg.MessageSize := MessageSizeType:Control;
411 out_msg.AccessMode := in_msg.AccessMode;
412 }
413 }
414 }
415
416 action(f_sendDataToL1, "f", desc="send data to the L2 cache") {
417 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
418 assert(is_valid(cache_entry));
419 out_msg.addr := address;
420 out_msg.Class := CoherenceClass:INV_DATA;
421 out_msg.DataBlk := cache_entry.DataBlk;
422 out_msg.Dirty := cache_entry.Dirty;
423 out_msg.Sender := machineID;
424 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
425 out_msg.MessageSize := MessageSizeType:Writeback_Data;
426 }
427 cache_entry.Dirty := false;
428 }
429
430 action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
431 peek(messgeBuffer_in, CoherenceMsg) {
432 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
433 out_msg.addr := address;
434 out_msg.Class := CoherenceClass:INV_ACK;
435 out_msg.Sender := machineID;
436 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
437 out_msg.MessageSize := MessageSizeType:Response_Control;
438 }
439 }
440 }
441
442 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
443 if (send_evictions) {
444 DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
445 sequencer.evictionCallback(address);
446 }
447 }
448
449 action(g_issuePUTX, "g", desc="send data to the L2 cache") {
450 enqueue(requestNetwork_out, CoherenceMsg, response_latency) {
451 assert(is_valid(cache_entry));
452 out_msg.addr := address;
453 out_msg.Class := CoherenceClass:PUTX;
454 out_msg.Dirty := cache_entry.Dirty;
455 out_msg.Sender:= machineID;
456 out_msg.Dest := createMachineID(MachineType:L1Cache, version);
457
458 if (cache_entry.Dirty) {
459 out_msg.MessageSize := MessageSizeType:Writeback_Data;
460 out_msg.DataBlk := cache_entry.DataBlk;
461 } else {
462 out_msg.MessageSize := MessageSizeType:Writeback_Control;
463 }
464 }
465 }
466
467 action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
468 assert(is_valid(cache_entry));
469 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
470 Dcache.setMRU(cache_entry);
471 sequencer.readCallback(address, cache_entry.DataBlk);
472 }
473
474 action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
475 assert(is_valid(cache_entry));
476 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
477 Icache.setMRU(cache_entry);
478 sequencer.readCallback(address, cache_entry.DataBlk);
479 }
480
481 action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
482 assert(is_valid(cache_entry));
483 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
484 Dcache.setMRU(cache_entry);
485 sequencer.readCallback(address, cache_entry.DataBlk, true);
486 }
487
488 action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
489 assert(is_valid(cache_entry));
490 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
491 Icache.setMRU(cache_entry);
492 sequencer.readCallback(address, cache_entry.DataBlk, true);
493 }
494
495 action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
496 assert(is_valid(cache_entry));
497 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
498 Dcache.setMRU(cache_entry);
499 sequencer.writeCallback(address, cache_entry.DataBlk);
500 cache_entry.Dirty := true;
501 }
502
503 action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
504 assert(is_valid(cache_entry));
505 DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
506 Dcache.setMRU(cache_entry);
507 sequencer.writeCallback(address, cache_entry.DataBlk, true);
508 cache_entry.Dirty := true;
509 }
510
511 action(i_allocateTBE, "i", desc="Allocate TBE (number of invalidates=0)") {
512 check_allocate(TBEs);
513 assert(is_valid(cache_entry));
514 TBEs.allocate(address);
515 set_tbe(TBEs[address]);
516 tbe.Dirty := cache_entry.Dirty;
517 tbe.DataBlk := cache_entry.DataBlk;
518 }
519
520 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
521 mandatoryQueue_in.dequeue(clockEdge());
522 }
523
524 action(l_popRequestQueue, "l",
525 desc="Pop incoming request queue and profile the delay within this virtual network") {
526 Tick delay := messgeBuffer_in.dequeue(clockEdge());
527 profileMsgDelay(2, ticksToCycles(delay));
528 }
529
530 action(o_popIncomingResponseQueue, "o",
531 desc="Pop Incoming Response queue and profile the delay within this virtual network") {
532 Tick delay := messgeBuffer_in.dequeue(clockEdge());
533 profileMsgDelay(1, ticksToCycles(delay));
534 }
535
536 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
537 TBEs.deallocate(address);
538 unset_tbe();
539 }
540
541 action(u_writeDataToCache, "u", desc="Write data to cache") {
542 peek(messgeBuffer_in, CoherenceMsg) {
543 assert(is_valid(cache_entry));
544 cache_entry.DataBlk := in_msg.DataBlk;
545 }
546 }
547
548 action(u_writeInstToCache, "ui", desc="Write data to cache") {
549 peek(messgeBuffer_in, CoherenceMsg) {
550 assert(is_valid(cache_entry));
551 cache_entry.DataBlk := in_msg.DataBlk;
552 }
553 }
554
555 action(ff_deallocateCacheBlock, "\f",
556 desc="Deallocate L1 cache block.") {
557 if (Dcache.isTagPresent(address)) {
558 Dcache.deallocate(address);
559 } else {
560 Icache.deallocate(address);
561 }
562 unset_cache_entry();
563 }
564
565 action(oo_allocateDCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
566 if (is_invalid(cache_entry)) {
567 set_cache_entry(Dcache.allocate(address, new Entry));
568 }
569 }
570
571 action(pp_allocateICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
572 if (is_invalid(cache_entry)) {
573 set_cache_entry(Icache.allocate(address, new Entry));
574 }
575 }
576
577 action(z_stallAndWaitMandatoryQueue, "\z", desc="recycle cpu request queue") {
578 stall_and_wait(mandatoryQueue_in, address);
579 }
580
581 action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
582 wakeUpAllBuffers(address);
583 }
584
585 action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
586 ++Icache.demand_misses;
587 }
588
589 action(uu_profileInstHit, "\uih", desc="Profile the demand miss") {
590 ++Icache.demand_hits;
591 }
592
593 action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
594 ++Dcache.demand_misses;
595 }
596
597 action(uu_profileDataHit, "\udh", desc="Profile the demand miss") {
598 ++Dcache.demand_hits;
599 }
600
601 //*****************************************************
602 // TRANSITIONS
603 //*****************************************************
604
605 // Transitions for Load/Store/Replacement/WriteBack from transient states
606 transition({Inst_IS, IS, IM, SM}, {Load, Ifetch, Store, L0_Replacement}) {
607 z_stallAndWaitMandatoryQueue;
608 }
609
610 // Transitions from Idle
611 transition(I, Load, IS) {
612 oo_allocateDCacheBlock;
613 i_allocateTBE;
614 a_issueGETS;
615 uu_profileDataMiss;
616 k_popMandatoryQueue;
617 }
618
619 transition(I, Ifetch, Inst_IS) {
620 pp_allocateICacheBlock;
621 i_allocateTBE;
622 a_issueGETS;
623 uu_profileInstMiss;
624 k_popMandatoryQueue;
625 }
626
627 transition(I, Store, IM) {
628 oo_allocateDCacheBlock;
629 i_allocateTBE;
630 b_issueGETX;
631 uu_profileDataMiss;
632 k_popMandatoryQueue;
633 }
634
635 transition({I, IS, IM, Inst_IS}, Inv) {
636 fi_sendInvAck;
637 l_popRequestQueue;
638 }
639
640 transition(SM, Inv, IM) {
641 fi_sendInvAck;
642 l_popRequestQueue;
643 }
644
645 // Transitions from Shared
646 transition({S,E,M}, Load) {
647 h_load_hit;
648 uu_profileDataHit;
649 k_popMandatoryQueue;
650 }
651
652 transition({S,E,M}, Ifetch) {
653 h_ifetch_hit;
654 uu_profileInstHit;
655 k_popMandatoryQueue;
656 }
657
658 transition(S, Store, SM) {
659 i_allocateTBE;
660 c_issueUPGRADE;
661 uu_profileDataMiss;
662 k_popMandatoryQueue;
663 }
664
665 transition(S, L0_Replacement, I) {
666 forward_eviction_to_cpu;
667 ff_deallocateCacheBlock;
668 }
669
670 transition(S, Inv, I) {
671 forward_eviction_to_cpu;
672 fi_sendInvAck;
673 ff_deallocateCacheBlock;
674 l_popRequestQueue;
675 }
676
677 // Transitions from Exclusive
678 transition({E,M}, Store, M) {
679 hh_store_hit;
680 uu_profileDataHit;
681 k_popMandatoryQueue;
682 }
683
684 transition(E, L0_Replacement, I) {
685 forward_eviction_to_cpu;
686 g_issuePUTX;
687 ff_deallocateCacheBlock;
688 }
689
690 transition(E, {Inv, Fwd_GETX}, I) {
691 // don't send data
692 forward_eviction_to_cpu;
693 fi_sendInvAck;
694 ff_deallocateCacheBlock;
695 l_popRequestQueue;
696 }
697
698 transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
699 f_sendDataToL1;
700 l_popRequestQueue;
701 }
702
703 // Transitions from Modified
704 transition(M, L0_Replacement, I) {
705 forward_eviction_to_cpu;
706 g_issuePUTX;
707 ff_deallocateCacheBlock;
708 }
709
710 transition(M, {Inv, Fwd_GETX}, I) {
711 forward_eviction_to_cpu;
712 f_sendDataToL1;
713 ff_deallocateCacheBlock;
714 l_popRequestQueue;
715 }
716
717 transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
718 f_sendDataToL1;
719 l_popRequestQueue;
720 }
721
722 transition(IS, Data, S) {
723 u_writeDataToCache;
724 hx_load_hit;
725 s_deallocateTBE;
726 o_popIncomingResponseQueue;
727 kd_wakeUpDependents;
728 }
729
730 transition(IS, Data_Exclusive, E) {
731 u_writeDataToCache;
732 hx_load_hit;
733 s_deallocateTBE;
734 o_popIncomingResponseQueue;
735 kd_wakeUpDependents;
736 }
737
738 transition(IS, Data_Stale, I) {
739 u_writeDataToCache;
740 hx_load_hit;
741 s_deallocateTBE;
742 ff_deallocateCacheBlock;
743 o_popIncomingResponseQueue;
744 kd_wakeUpDependents;
745 }
746
747 transition(Inst_IS, Data, S) {
748 u_writeInstToCache;
749 hx_ifetch_hit;
750 s_deallocateTBE;
751 o_popIncomingResponseQueue;
752 kd_wakeUpDependents;
753 }
754
755 transition(Inst_IS, Data_Exclusive, E) {
756 u_writeInstToCache;
757 hx_ifetch_hit;
758 s_deallocateTBE;
759 o_popIncomingResponseQueue;
760 kd_wakeUpDependents;
761 }
762
763 transition(Inst_IS, Data_Stale, I) {
764 u_writeInstToCache;
765 hx_ifetch_hit;
766 s_deallocateTBE;
767 ff_deallocateCacheBlock;
768 o_popIncomingResponseQueue;
769 kd_wakeUpDependents;
770 }
771
772 transition({IM,SM}, Data_Exclusive, M) {
773 u_writeDataToCache;
774 hhx_store_hit;
775 s_deallocateTBE;
776 o_popIncomingResponseQueue;
777 kd_wakeUpDependents;
778 }
779 }