3 * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 machine(L1Cache, "Directory protocol") {
37 MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="false";
38 MessageBuffer responseFromCache, network="To", virtual_network="2", ordered="false";
39 MessageBuffer unblockFromCache, network="To", virtual_network="3", ordered="false";
41 MessageBuffer forwardToCache, network="From", virtual_network="1", ordered="false";
42 MessageBuffer responseToCache, network="From", virtual_network="2", ordered="false";
45 enumeration(State, desc="Cache states", default="L1Cache_State_I") {
47 NP, desc="Not Present";
51 E, desc="Exclusive (clean)";
52 M, desc="Modified (dirty)";
53 MM, desc="Modified (dirty and locally modified)";
56 IM, "IM", desc="Issued GetX";
57 SM, "SM", desc="Issued GetX, we still have an old copy of the line";
58 OM, "OM", desc="Issued GetX, received data";
59 IS, "IS", desc="Issued GetS";
60 OI, "OI", desc="Issued PutO, waiting for ack";
61 MI, "MI", desc="Issued PutX, waiting for ack";
62 II, "II", desc="Issued PutX/O, saw Fwd_GETS or Fwd_GETX, waiting for ack";
66 enumeration(Event, desc="Cache events") {
67 Load, desc="Load request from the processor";
68 Ifetch, desc="I-fetch request from the processor";
69 Store, desc="Store request from the processor";
70 L2_Replacement, desc="Replacement";
71 L1_to_L2, desc="L1 to L2 transfer";
72 L2_to_L1D, desc="L2 to L1-Data transfer";
73 L2_to_L1I, desc="L2 to L1-Instruction transfer";
76 Own_GETX, desc="We observe our own GetX forwarded back to us";
77 Fwd_GETX, desc="A GetX from another processor";
78 Fwd_GETS, desc="A GetS from another processor";
79 Inv, desc="Invalidations from the directory";
82 Ack, desc="Received an ack message";
83 Data, desc="Received a data message, responder has a shared copy";
84 Exclusive_Data_Clean, desc="Received a data message, no other processor has it, data is clean";
85 Exclusive_Data_Dirty, desc="Received a data message, no other processor has it, data is dirty";
87 Writeback_Ack, desc="Writeback O.K. from directory";
88 Writeback_Nack, desc="Writeback not O.K. from directory";
91 All_acks, desc="Received all required data and message acks";
97 structure(Entry, desc="...", interface="AbstractCacheEntry") {
98 State CacheState, desc="cache state";
99 bool Dirty, desc="Is the data dirty (different than memory)?";
100 DataBlock DataBlk, desc="data for the block";
104 structure(TBE, desc="...") {
105 State TBEState, desc="Transient state";
106 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
107 bool Dirty, desc="Is the data dirty (different than memory)?";
108 int NumPendingMsgs, default="0", desc="Number of acks/data messages that this processor is waiting for";
111 external_type(CacheMemory) {
112 bool cacheAvail(Address);
113 Address cacheProbe(Address);
114 void allocate(Address);
115 void deallocate(Address);
116 Entry lookup(Address);
117 void changePermission(Address, AccessPermission);
118 bool isTagPresent(Address);
121 external_type(TBETable) {
123 void allocate(Address);
124 void deallocate(Address);
125 bool isPresent(Address);
128 MessageBuffer mandatoryQueue, abstract_chip_ptr="true", ordered="false";
129 Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
130 StoreBuffer storeBuffer, abstract_chip_ptr="true", constructor_hack="i";
132 TBETable TBEs, template_hack="<L1Cache_TBE>";
133 CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
134 CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
135 CacheMemory L2cacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L2"', abstract_chip_ptr="true";
137 Entry getCacheEntry(Address addr), return_by_ref="yes" {
138 if (L2cacheMemory.isTagPresent(addr)) {
139 return L2cacheMemory[addr];
140 } else if (L1DcacheMemory.isTagPresent(addr)) {
141 return L1DcacheMemory[addr];
143 return L1IcacheMemory[addr];
147 void changePermission(Address addr, AccessPermission permission) {
148 if (L2cacheMemory.isTagPresent(addr)) {
149 return L2cacheMemory.changePermission(addr, permission);
150 } else if (L1DcacheMemory.isTagPresent(addr)) {
151 return L1DcacheMemory.changePermission(addr, permission);
153 return L1IcacheMemory.changePermission(addr, permission);
157 bool isCacheTagPresent(Address addr) {
158 return (L2cacheMemory.isTagPresent(addr) || L1DcacheMemory.isTagPresent(addr) || L1IcacheMemory.isTagPresent(addr));
161 State getState(Address addr) {
162 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
163 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
164 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
166 if(TBEs.isPresent(addr)) {
167 return TBEs[addr].TBEState;
168 } else if (isCacheTagPresent(addr)) {
169 return getCacheEntry(addr).CacheState;
174 void setState(Address addr, State state) {
175 assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
176 assert((L1IcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
177 assert((L1DcacheMemory.isTagPresent(addr) && L2cacheMemory.isTagPresent(addr)) == false);
179 if (TBEs.isPresent(addr)) {
180 TBEs[addr].TBEState := state;
183 if (isCacheTagPresent(addr)) {
184 getCacheEntry(addr).CacheState := state;
186 if (state == State:E) {
187 assert(getCacheEntry(addr).Dirty == false);
190 if ((state == State:M) || (state == State:MM)) {
191 assert(getCacheEntry(addr).Dirty == true);
195 if (state == State:MM) {
196 changePermission(addr, AccessPermission:Read_Write);
197 } else if ((state == State:S) ||
198 (state == State:O) ||
199 (state == State:M) ||
200 (state == State:E) ||
201 (state == State:SM) ||
202 (state == State:OM)) {
203 changePermission(addr, AccessPermission:Read_Only);
205 changePermission(addr, AccessPermission:Invalid);
210 Event mandatory_request_type_to_event(CacheRequestType type) {
211 if (type == CacheRequestType:LD) {
213 } else if (type == CacheRequestType:IFETCH) {
215 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
218 error("Invalid CacheRequestType");
222 MessageBuffer triggerQueue, ordered="true";
226 out_port(requestNetwork_out, RequestMsg, requestFromCache);
227 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
228 out_port(unblockNetwork_out, ResponseMsg, unblockFromCache);
229 out_port(triggerQueue_out, TriggerMsg, triggerQueue);
234 in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
235 if (triggerQueue_in.isReady()) {
236 peek(triggerQueue_in, TriggerMsg) {
237 if (in_msg.Type == TriggerType:ALL_ACKS) {
238 trigger(Event:All_acks, in_msg.Address);
240 error("Unexpected message");
246 // Nothing from the request network
249 in_port(forwardToCache_in, RequestMsg, forwardToCache) {
250 if (forwardToCache_in.isReady()) {
251 peek(forwardToCache_in, RequestMsg) {
252 if (in_msg.Type == CoherenceRequestType:GETX) {
253 if (in_msg.Requestor == machineID) {
254 trigger(Event:Own_GETX, in_msg.Address);
256 trigger(Event:Fwd_GETX, in_msg.Address);
258 } else if (in_msg.Type == CoherenceRequestType:GETS) {
259 trigger(Event:Fwd_GETS, in_msg.Address);
260 } else if (in_msg.Type == CoherenceRequestType:INV) {
261 trigger(Event:Inv, in_msg.Address);
262 } else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
263 trigger(Event:Writeback_Ack, in_msg.Address);
264 } else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
265 trigger(Event:Writeback_Nack, in_msg.Address);
267 error("Unexpected message");
274 in_port(responseToCache_in, ResponseMsg, responseToCache) {
275 if (responseToCache_in.isReady()) {
276 peek(responseToCache_in, ResponseMsg) {
277 if (in_msg.Type == CoherenceResponseType:ACK) {
278 trigger(Event:Ack, in_msg.Address);
279 } else if (in_msg.Type == CoherenceResponseType:DATA) {
280 trigger(Event:Data, in_msg.Address);
281 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_CLEAN) {
282 trigger(Event:Exclusive_Data_Clean, in_msg.Address);
283 } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE_DIRTY) {
284 trigger(Event:Exclusive_Data_Dirty, in_msg.Address);
286 error("Unexpected message");
292 // Nothing from the unblock network
295 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
296 if (mandatoryQueue_in.isReady()) {
297 peek(mandatoryQueue_in, CacheMsg) {
299 // Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
301 if (in_msg.Type == CacheRequestType:IFETCH) {
302 // ** INSTRUCTION ACCESS ***
304 // Check to see if it is in the OTHER L1
305 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
306 // The block is in the wrong L1, try to write it to the L2
307 if (L2cacheMemory.cacheAvail(in_msg.Address)) {
308 trigger(Event:L1_to_L2, in_msg.Address);
310 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
314 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
315 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
316 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
318 if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
319 // L1 does't have the line, but we have space for it in the L1
320 if (L2cacheMemory.isTagPresent(in_msg.Address)) {
321 // L2 has it (maybe not with the right permissions)
322 trigger(Event:L2_to_L1I, in_msg.Address);
324 // We have room, the L2 doesn't have it, so the L1 fetches the line
325 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
328 // No room in the L1, so we need to make room
329 if (L2cacheMemory.cacheAvail(L1IcacheMemory.cacheProbe(in_msg.Address))) {
330 // The L2 has room, so we move the line from the L1 to the L2
331 trigger(Event:L1_to_L2, L1IcacheMemory.cacheProbe(in_msg.Address));
333 // The L2 does not have room, so we replace a line from the L2
334 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1IcacheMemory.cacheProbe(in_msg.Address)));
339 // *** DATA ACCESS ***
341 // Check to see if it is in the OTHER L1
342 if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
343 // The block is in the wrong L1, try to write it to the L2
344 if (L2cacheMemory.cacheAvail(in_msg.Address)) {
345 trigger(Event:L1_to_L2, in_msg.Address);
347 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address));
351 if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
352 // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
353 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
355 if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
356 // L1 does't have the line, but we have space for it in the L1
357 if (L2cacheMemory.isTagPresent(in_msg.Address)) {
358 // L2 has it (maybe not with the right permissions)
359 trigger(Event:L2_to_L1D, in_msg.Address);
361 // We have room, the L2 doesn't have it, so the L1 fetches the line
362 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
365 // No room in the L1, so we need to make room
366 if (L2cacheMemory.cacheAvail(L1DcacheMemory.cacheProbe(in_msg.Address))) {
367 // The L2 has room, so we move the line from the L1 to the L2
368 trigger(Event:L1_to_L2, L1DcacheMemory.cacheProbe(in_msg.Address));
370 // The L2 does not have room, so we replace a line from the L2
371 trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(L1DcacheMemory.cacheProbe(in_msg.Address)));
382 action(a_issueGETS, "a", desc="Issue GETS") {
383 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
384 out_msg.Address := address;
385 out_msg.Type := CoherenceRequestType:GETS;
386 out_msg.Requestor := machineID;
387 out_msg.Destination.add(map_Address_to_Directory(address));
388 out_msg.MessageSize := MessageSizeType:Request_Control;
389 // TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
393 action(b_issueGETX, "b", desc="Issue GETX") {
394 enqueue(requestNetwork_out, RequestMsg, latency="(ISSUE_LATENCY-1)") {
395 out_msg.Address := address;
396 out_msg.Type := CoherenceRequestType:GETX;
397 out_msg.Requestor := machineID;
398 out_msg.Destination.add(map_Address_to_Directory(address));
399 out_msg.MessageSize := MessageSizeType:Request_Control;
400 // TBEs[address].NumPendingMsgs := numberOfNodes(); // One from each other processor (n-1) plus the memory (+1)
404 action(d_issuePUTX, "d", desc="Issue PUTX") {
405 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
406 out_msg.Address := address;
407 out_msg.Type := CoherenceRequestType:PUTX;
408 out_msg.Requestor := machineID;
409 out_msg.Destination.add(map_Address_to_Directory(address));
410 out_msg.MessageSize := MessageSizeType:Writeback_Control;
414 action(dd_issuePUTO, "\d", desc="Issue PUTO") {
415 enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
416 out_msg.Address := address;
417 out_msg.Type := CoherenceRequestType:PUTO;
418 out_msg.Requestor := machineID;
419 out_msg.Destination.add(map_Address_to_Directory(address));
420 out_msg.MessageSize := MessageSizeType:Writeback_Control;
424 action(e_sendData, "e", desc="Send data from cache to requestor") {
425 peek(forwardToCache_in, RequestMsg) {
426 enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
427 out_msg.Address := address;
428 out_msg.Type := CoherenceResponseType:DATA;
429 out_msg.Sender := machineID;
430 out_msg.Destination.add(in_msg.Requestor);
431 out_msg.DataBlk := getCacheEntry(address).DataBlk;
432 out_msg.Dirty := getCacheEntry(address).Dirty;
433 out_msg.Acks := in_msg.Acks;
434 out_msg.MessageSize := MessageSizeType:Response_Data;
439 action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
440 peek(forwardToCache_in, RequestMsg) {
441 enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
442 out_msg.Address := address;
443 out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE_DIRTY;
444 out_msg.Sender := machineID;
445 out_msg.Destination.add(in_msg.Requestor);
446 out_msg.DataBlk := getCacheEntry(address).DataBlk;
447 out_msg.Dirty := getCacheEntry(address).Dirty;
448 out_msg.Acks := in_msg.Acks;
449 out_msg.MessageSize := MessageSizeType:Response_Data;
454 action(f_sendAck, "f", desc="Send ack from cache to requestor") {
455 peek(forwardToCache_in, RequestMsg) {
456 enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
457 out_msg.Address := address;
458 out_msg.Type := CoherenceResponseType:ACK;
459 out_msg.Sender := machineID;
460 out_msg.Destination.add(in_msg.Requestor);
461 out_msg.Acks := 0 - 1; // -1
462 out_msg.MessageSize := MessageSizeType:Response_Control;
467 action(g_sendUnblock, "g", desc="Send unblock to memory") {
468 enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
469 out_msg.Address := address;
470 out_msg.Type := CoherenceResponseType:UNBLOCK;
471 out_msg.Sender := machineID;
472 out_msg.Destination.add(map_Address_to_Directory(address));
473 out_msg.MessageSize := MessageSizeType:Unblock_Control;
477 action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
478 enqueue(unblockNetwork_out, ResponseMsg, latency="NULL_LATENCY") {
479 out_msg.Address := address;
480 out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
481 out_msg.Sender := machineID;
482 out_msg.Destination.add(map_Address_to_Directory(address));
483 out_msg.MessageSize := MessageSizeType:Unblock_Control;
487 action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
488 DEBUG_EXPR(getCacheEntry(address).DataBlk);
489 sequencer.readCallback(address, getCacheEntry(address).DataBlk);
492 action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
493 DEBUG_EXPR(getCacheEntry(address).DataBlk);
494 sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
495 getCacheEntry(address).Dirty := true;
498 action(i_allocateTBE, "i", desc="Allocate TBE") {
499 check_allocate(TBEs);
500 TBEs.allocate(address);
501 TBEs[address].DataBlk := getCacheEntry(address).DataBlk; // Data only used for writebacks
502 TBEs[address].Dirty := getCacheEntry(address).Dirty;
505 action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
506 triggerQueue_in.dequeue();
509 action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
510 mandatoryQueue_in.dequeue();
513 action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
514 forwardToCache_in.dequeue();
517 action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
518 peek(responseToCache_in, ResponseMsg) {
519 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
523 action(mm_decrementNumberOfMessages, "\m", desc="Decrement the number of messages for which we're waiting") {
524 peek(forwardToCache_in, RequestMsg) {
525 TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
529 action(n_popResponseQueue, "n", desc="Pop response queue") {
530 responseToCache_in.dequeue();
533 action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
534 if (TBEs[address].NumPendingMsgs == 0) {
535 enqueue(triggerQueue_out, TriggerMsg) {
536 out_msg.Address := address;
537 out_msg.Type := TriggerType:ALL_ACKS;
542 action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
543 peek(forwardToCache_in, RequestMsg) {
544 enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
545 out_msg.Address := address;
546 out_msg.Type := CoherenceResponseType:DATA;
547 out_msg.Sender := machineID;
548 out_msg.Destination.add(in_msg.Requestor);
549 out_msg.DataBlk := TBEs[address].DataBlk;
550 out_msg.Dirty := TBEs[address].Dirty;
551 out_msg.Acks := in_msg.Acks;
552 out_msg.MessageSize := MessageSizeType:Response_Data;
557 action(qq_sendDataFromTBEToMemory, "\q", desc="Send data from TBE to memory") {
558 enqueue(unblockNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
559 out_msg.Address := address;
560 out_msg.Sender := machineID;
561 out_msg.Destination.add(map_Address_to_Directory(address));
562 out_msg.Dirty := TBEs[address].Dirty;
563 if (TBEs[address].Dirty) {
564 out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY;
565 out_msg.DataBlk := TBEs[address].DataBlk;
566 out_msg.MessageSize := MessageSizeType:Writeback_Data;
568 out_msg.Type := CoherenceResponseType:WRITEBACK_CLEAN;
569 // NOTE: in a real system this would not send data. We send
570 // data here only so we can check it at the memory
571 out_msg.DataBlk := TBEs[address].DataBlk;
572 out_msg.MessageSize := MessageSizeType:Writeback_Control;
577 action(s_deallocateTBE, "s", desc="Deallocate TBE") {
578 TBEs.deallocate(address);
581 action(u_writeDataToCache, "u", desc="Write data to cache") {
582 peek(responseToCache_in, ResponseMsg) {
583 getCacheEntry(address).DataBlk := in_msg.DataBlk;
584 getCacheEntry(address).Dirty := in_msg.Dirty;
588 action(v_writeDataToCacheVerify, "v", desc="Write data to cache, assert it was same as before") {
589 peek(responseToCache_in, ResponseMsg) {
590 assert(getCacheEntry(address).DataBlk == in_msg.DataBlk);
591 getCacheEntry(address).DataBlk := in_msg.DataBlk;
592 getCacheEntry(address).Dirty := in_msg.Dirty;
596 action(kk_deallocateL1CacheBlock, "\k", desc="Deallocate cache block. Sets the cache to invalid, allowing a replacement in parallel with a fetch.") {
597 if (L1DcacheMemory.isTagPresent(address)) {
598 L1DcacheMemory.deallocate(address);
600 L1IcacheMemory.deallocate(address);
604 action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
605 if (L1DcacheMemory.isTagPresent(address) == false) {
606 L1DcacheMemory.allocate(address);
610 action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
611 if (L1IcacheMemory.isTagPresent(address) == false) {
612 L1IcacheMemory.allocate(address);
616 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
617 L2cacheMemory.allocate(address);
620 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
621 L2cacheMemory.deallocate(address);
624 action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
625 if (L1DcacheMemory.isTagPresent(address)) {
626 L2cacheMemory[address] := L1DcacheMemory[address];
628 L2cacheMemory[address] := L1IcacheMemory[address];
632 action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
633 if (L1DcacheMemory.isTagPresent(address)) {
634 L1DcacheMemory[address] := L2cacheMemory[address];
636 L1IcacheMemory[address] := L2cacheMemory[address];
640 action(uu_profileMiss, "\u", desc="Profile the demand miss") {
641 peek(mandatoryQueue_in, CacheMsg) {
642 profile_miss(in_msg, id);
646 action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
647 mandatoryQueue_in.recycle();
650 //*****************************************************
652 //*****************************************************
654 // Transitions for Load/Store/L2_Replacement from transient states
655 transition({IM, SM, OM, IS, OI, MI, II}, {Store, L2_Replacement}) {
656 zz_recycleMandatoryQueue;
659 transition({IM, IS, OI, MI, II}, {Load, Ifetch}) {
660 zz_recycleMandatoryQueue;
663 transition({IM, SM, OM, IS, OI, MI, II}, L1_to_L2) {
664 zz_recycleMandatoryQueue;
667 // Transitions moving data between the L1 and L2 caches
668 transition({I, S, O, E, M, MM}, L1_to_L2) {
669 vv_allocateL2CacheBlock;
670 ss_copyFromL1toL2; // Not really needed for state I
671 kk_deallocateL1CacheBlock;
674 transition({I, S, O, E, M, MM}, L2_to_L1D) {
675 ii_allocateL1DCacheBlock;
676 tt_copyFromL2toL1; // Not really needed for state I
677 rr_deallocateL2CacheBlock;
680 transition({I, S, O, E, M, MM}, L2_to_L1I) {
681 jj_allocateL1ICacheBlock;
682 tt_copyFromL2toL1; // Not really needed for state I
683 rr_deallocateL2CacheBlock;
686 // Transitions from Idle
687 transition({NP, I}, Load, IS) {
688 ii_allocateL1DCacheBlock;
695 transition({NP, I}, Ifetch, IS) {
696 jj_allocateL1ICacheBlock;
703 transition({NP, I}, Store, IM) {
704 ii_allocateL1DCacheBlock;
711 transition(I, L2_Replacement) {
712 rr_deallocateL2CacheBlock;
715 transition({NP, I}, Inv) {
720 // Transitions from Shared
721 transition({S, SM}, {Load, Ifetch}) {
726 transition(S, Store, SM) {
733 transition(S, L2_Replacement, I) {
734 rr_deallocateL2CacheBlock;
737 transition(S, Inv, I) {
742 // Transitions from Owned
743 transition({O, OM}, {Load, Ifetch}) {
748 transition(O, Store, OM) {
751 // p_decrementNumberOfMessagesByOne;
756 transition(O, L2_Replacement, OI) {
759 rr_deallocateL2CacheBlock;
762 transition(O, Fwd_GETX, I) {
767 transition(O, Fwd_GETS) {
772 // Transitions from MM
773 transition(MM, {Load, Ifetch}) {
778 transition(MM, Store) {
783 transition(MM, L2_Replacement, MI) {
786 rr_deallocateL2CacheBlock;
789 transition(MM, Fwd_GETX, I) {
794 transition(MM, Fwd_GETS, I) {
795 ee_sendDataExclusive;
799 // Transitions from M
800 transition({E, M}, {Load, Ifetch}) {
805 transition({E, M}, Store, MM) {
810 transition({E, M}, L2_Replacement, MI) {
813 rr_deallocateL2CacheBlock;
816 transition({E, M}, Fwd_GETX, I) {
821 transition({E, M}, Fwd_GETS, O) {
826 // Transitions from IM
828 transition(IM, Inv) {
833 transition(IM, Ack) {
834 m_decrementNumberOfMessages;
835 o_checkForCompletion;
839 transition(IM, Data, OM) {
841 m_decrementNumberOfMessages;
842 o_checkForCompletion;
846 // Transitions from SM
847 transition(SM, Inv, IM) {
852 transition(SM, Ack) {
853 m_decrementNumberOfMessages;
854 o_checkForCompletion;
858 transition(SM, Data, OM) {
859 v_writeDataToCacheVerify;
860 m_decrementNumberOfMessages;
861 o_checkForCompletion;
865 // Transitions from OM
866 transition(OM, Own_GETX) {
867 mm_decrementNumberOfMessages;
868 o_checkForCompletion;
872 transition(OM, Fwd_GETX, IM) {
877 transition(OM, Fwd_GETS, OM) {
882 transition(OM, Ack) {
883 m_decrementNumberOfMessages;
884 o_checkForCompletion;
888 transition(OM, All_acks, MM) {
890 gg_sendUnblockExclusive;
895 // Transitions from IS
897 transition(IS, Inv) {
902 transition(IS, Data, S) {
904 m_decrementNumberOfMessages;
911 transition(IS, Exclusive_Data_Clean, E) {
913 m_decrementNumberOfMessages;
915 gg_sendUnblockExclusive;
920 transition(IS, Exclusive_Data_Dirty, M) {
922 m_decrementNumberOfMessages;
924 gg_sendUnblockExclusive;
929 // Transitions from OI/MI
931 transition(MI, Fwd_GETS) {
932 q_sendDataFromTBEToCache;
936 transition(MI, Fwd_GETX, II) {
937 q_sendDataFromTBEToCache;
941 transition(OI, Fwd_GETS) {
942 q_sendDataFromTBEToCache;
946 transition(OI, Fwd_GETX, II) {
947 q_sendDataFromTBEToCache;
951 transition({OI, MI}, Writeback_Ack, I) {
952 qq_sendDataFromTBEToMemory;
957 transition(MI, Writeback_Nack, OI) {
958 // FIXME: This might cause deadlock by re-using the writeback
959 // channel, we should handle this case differently.
964 // Transitions from II
965 transition(II, Writeback_Ack, I) {
971 transition(II, Writeback_Nack, I) {
976 transition(II, Inv) {