2 machine(L1Cache, "MI Example L1 Cache")
3 : int cache_response_latency,
8 MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
9 MessageBuffer responseFromCache, network="To", virtual_network="1", ordered="true";
11 MessageBuffer forwardToCache, network="From", virtual_network="2", ordered="true";
12 MessageBuffer responseToCache, network="From", virtual_network="1", ordered="true";
15 enumeration(State, desc="Cache states") {
16 I, desc="Not Present/Invalid";
17 II, desc="Not Present/Invalid, issued PUT";
19 MI, desc="Modified, issued PUT";
20 MII, desc="Modified, issued PUTX, received nack";
22 IS, desc="Issued request for LOAD/IFETCH";
23 IM, desc="Issued request for STORE/ATOMIC";
27 enumeration(Event, desc="Cache events") {
30 Load, desc="Load request from processor";
31 Ifetch, desc="Ifetch request from processor";
32 Store, desc="Store request from processor";
34 Data, desc="Data from network";
35 Fwd_GETX, desc="Forward from network";
37 Inv, desc="Invalidate request from dir";
39 Replacement, desc="Replace a block";
40 Writeback_Ack, desc="Ack from the directory for a writeback";
41 Writeback_Nack, desc="Nack from the directory for a writeback";
44 // STRUCTURE DEFINITIONS
46 MessageBuffer mandatoryQueue, ordered="false";
47 Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
50 structure(Entry, desc="...", interface="AbstractCacheEntry") {
51 State CacheState, desc="cache state";
52 bool Dirty, desc="Is the data dirty (different than memory)?";
53 DataBlock DataBlk, desc="Data in the block";
57 external_type(CacheMemory) {
58 bool cacheAvail(Address);
59 Address cacheProbe(Address);
60 void allocate(Address, Entry);
61 void deallocate(Address);
62 Entry lookup(Address);
63 void changePermission(Address, AccessPermission);
64 bool isTagPresent(Address);
65 void profileMiss(CacheMsg);
69 structure(TBE, desc="...") {
70 State TBEState, desc="Transient state";
71 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
74 external_type(TBETable) {
76 void allocate(Address);
77 void deallocate(Address);
78 bool isPresent(Address);
84 CacheMemory cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
86 TBETable TBEs, template_hack="<L1Cache_TBE>";
91 Event mandatory_request_type_to_event(CacheRequestType type) {
92 if (type == CacheRequestType:LD) {
94 } else if (type == CacheRequestType:IFETCH) {
96 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
99 error("Invalid CacheRequestType");
104 State getState(Address addr) {
106 if (TBEs.isPresent(addr)) {
107 return TBEs[addr].TBEState;
109 else if (cacheMemory.isTagPresent(addr)) {
110 return cacheMemory[addr].CacheState;
117 void setState(Address addr, State state) {
119 if (TBEs.isPresent(addr)) {
120 TBEs[addr].TBEState := state;
123 if (cacheMemory.isTagPresent(addr)) {
124 cacheMemory[addr].CacheState := state;
125 if (state == State:M) {
126 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
128 cacheMemory.changePermission(addr, AccessPermission:Invalid);
136 out_port(requestNetwork_out, RequestMsg, requestFromCache);
137 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
139 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
140 if (forwardRequestNetwork_in.isReady()) {
141 peek(forwardRequestNetwork_in, RequestMsg) {
142 if (in_msg.Type == CoherenceRequestType:GETX) {
143 trigger(Event:Fwd_GETX, in_msg.Address);
145 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
146 trigger(Event:Writeback_Ack, in_msg.Address);
148 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
149 trigger(Event:Writeback_Nack, in_msg.Address);
151 else if (in_msg.Type == CoherenceRequestType:INV) {
152 trigger(Event:Inv, in_msg.Address);
155 error("Unexpected message");
161 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
162 if (responseNetwork_in.isReady()) {
163 peek(responseNetwork_in, ResponseMsg) {
164 if (in_msg.Type == CoherenceResponseType:DATA) {
165 trigger(Event:Data, in_msg.Address);
168 error("Unexpected message");
175 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
176 if (mandatoryQueue_in.isReady()) {
177 peek(mandatoryQueue_in, CacheMsg) {
180 if (cacheMemory.isTagPresent(in_msg.LineAddress) == false &&
181 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
182 // make room for the block
183 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress));
186 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
194 action(a_issueRequest, "a", desc="Issue a request") {
195 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
196 out_msg.Address := address;
197 out_msg.Type := CoherenceRequestType:GETX;
198 out_msg.Requestor := machineID;
199 out_msg.Destination.add(map_Address_to_Directory(address));
200 out_msg.MessageSize := MessageSizeType:Control;
204 action(b_issuePUT, "b", desc="Issue a PUT request") {
205 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
206 out_msg.Address := address;
207 out_msg.Type := CoherenceRequestType:PUTX;
208 out_msg.Requestor := machineID;
209 out_msg.Destination.add(map_Address_to_Directory(address));
210 out_msg.DataBlk := cacheMemory[address].DataBlk;
211 out_msg.MessageSize := MessageSizeType:Data;
216 action(e_sendData, "e", desc="Send data from cache to requestor") {
217 peek(forwardRequestNetwork_in, RequestMsg) {
218 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
219 out_msg.Address := address;
220 out_msg.Type := CoherenceResponseType:DATA;
221 out_msg.Sender := machineID;
222 out_msg.Destination.add(in_msg.Requestor);
223 out_msg.DataBlk := cacheMemory[address].DataBlk;
224 out_msg.MessageSize := MessageSizeType:Response_Data;
229 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
230 peek(forwardRequestNetwork_in, RequestMsg) {
231 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
232 out_msg.Address := address;
233 out_msg.Type := CoherenceResponseType:DATA;
234 out_msg.Sender := machineID;
235 out_msg.Destination.add(in_msg.Requestor);
236 out_msg.DataBlk := TBEs[address].DataBlk;
237 out_msg.MessageSize := MessageSizeType:Response_Data;
243 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
244 if (cacheMemory.isTagPresent(address) == false) {
245 cacheMemory.allocate(address, new Entry);
249 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
250 if (cacheMemory.isTagPresent(address) == true) {
251 cacheMemory.deallocate(address);
255 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
256 mandatoryQueue_in.dequeue();
259 action(n_popResponseQueue, "n", desc="Pop the response queue") {
260 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
263 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
264 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
267 action(p_profileMiss, "p", desc="Profile cache miss") {
268 peek(mandatoryQueue_in, CacheMsg) {
269 cacheMemory.profileMiss(in_msg);
273 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
274 DEBUG_EXPR(cacheMemory[address].DataBlk);
275 sequencer.readCallback(address, cacheMemory[address].DataBlk);
278 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
279 DEBUG_EXPR(cacheMemory[address].DataBlk);
280 sequencer.writeCallback(address, cacheMemory[address].DataBlk);
284 action(u_writeDataToCache, "u", desc="Write data to the cache") {
285 peek(responseNetwork_in, ResponseMsg) {
286 cacheMemory[address].DataBlk := in_msg.DataBlk;
291 action(v_allocateTBE, "v", desc="Allocate TBE") {
292 TBEs.allocate(address);
296 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
297 TBEs.deallocate(address);
300 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
301 TBEs[address].DataBlk := cacheMemory[address].DataBlk;
304 action(z_stall, "z", desc="stall") {
310 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
314 transition({IS, IM}, {Fwd_GETX, Inv}) {
318 transition(MI, Inv) {
319 o_popForwardedRequestQueue;
322 transition(M, Store) {
327 transition(M, {Load, Ifetch}) {
333 o_popForwardedRequestQueue;
336 transition(I, Store, IM) {
338 i_allocateL1CacheBlock;
344 transition(I, {Load, Ifetch}, IS) {
346 i_allocateL1CacheBlock;
352 transition(IS, Data, M) {
359 transition(IM, Data, M) {
366 transition(M, Fwd_GETX, I) {
368 o_popForwardedRequestQueue;
371 transition(I, Replacement) {
372 h_deallocateL1CacheBlock;
375 transition(M, {Replacement,Inv}, MI) {
378 x_copyDataFromCacheToTBE;
379 h_deallocateL1CacheBlock;
382 transition(MI, Writeback_Ack, I) {
384 o_popForwardedRequestQueue;
387 transition(MI, Fwd_GETX, II) {
389 o_popForwardedRequestQueue;
392 transition(MI, Writeback_Nack, MII) {
393 o_popForwardedRequestQueue;
396 transition(MII, Fwd_GETX, I) {
399 o_popForwardedRequestQueue;
402 transition(II, Writeback_Nack, I) {
404 o_popForwardedRequestQueue;