2 machine(L1Cache, "MI Example L1 Cache")
3 : int cache_response_latency,
8 MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
9 MessageBuffer responseFromCache, network="To", virtual_network="1", ordered="true";
11 MessageBuffer forwardToCache, network="From", virtual_network="2", ordered="true";
12 MessageBuffer responseToCache, network="From", virtual_network="1", ordered="true";
15 enumeration(State, desc="Cache states") {
16 I, desc="Not Present/Invalid";
17 II, desc="Not Present/Invalid, issued PUT";
19 MI, desc="Modified, issued PUT";
21 IS, desc="Issued request for LOAD/IFETCH";
22 IM, desc="Issued request for STORE/ATOMIC";
26 enumeration(Event, desc="Cache events") {
29 Load, desc="Load request from processor";
30 Ifetch, desc="Ifetch request from processor";
31 Store, desc="Store request from processor";
33 Data, desc="Data from network";
34 Fwd_GETX, desc="Forward from network";
36 Inv, desc="Invalidate request from dir";
38 Replacement, desc="Replace a block";
39 Writeback_Ack, desc="Ack from the directory for a writeback";
40 Writeback_Nack, desc="Nack from the directory for a writeback";
43 // STRUCTURE DEFINITIONS
45 MessageBuffer mandatoryQueue, ordered="false";
46 Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
49 structure(Entry, desc="...", interface="AbstractCacheEntry") {
50 State CacheState, desc="cache state";
51 bool Dirty, desc="Is the data dirty (different than memory)?";
52 DataBlock DataBlk, desc="Data in the block";
56 external_type(CacheMemory) {
57 bool cacheAvail(Address);
58 Address cacheProbe(Address);
59 void allocate(Address, Entry);
60 void deallocate(Address);
61 Entry lookup(Address);
62 void changePermission(Address, AccessPermission);
63 bool isTagPresent(Address);
64 void profileMiss(CacheMsg);
68 structure(TBE, desc="...") {
69 State TBEState, desc="Transient state";
70 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
73 external_type(TBETable) {
75 void allocate(Address);
76 void deallocate(Address);
77 bool isPresent(Address);
83 CacheMemory cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
85 TBETable TBEs, template_hack="<L1Cache_TBE>";
90 Event mandatory_request_type_to_event(CacheRequestType type) {
91 if (type == CacheRequestType:LD) {
93 } else if (type == CacheRequestType:IFETCH) {
95 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
98 error("Invalid CacheRequestType");
103 State getState(Address addr) {
105 if (TBEs.isPresent(addr)) {
106 return TBEs[addr].TBEState;
108 else if (cacheMemory.isTagPresent(addr)) {
109 return cacheMemory[addr].CacheState;
116 void setState(Address addr, State state) {
118 if (TBEs.isPresent(addr)) {
119 TBEs[addr].TBEState := state;
122 if (cacheMemory.isTagPresent(addr)) {
123 cacheMemory[addr].CacheState := state;
124 if (state == State:M) {
125 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
127 cacheMemory.changePermission(addr, AccessPermission:Invalid);
135 out_port(requestNetwork_out, RequestMsg, requestFromCache);
136 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
138 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
139 if (forwardRequestNetwork_in.isReady()) {
140 peek(forwardRequestNetwork_in, RequestMsg) {
141 if (in_msg.Type == CoherenceRequestType:GETX) {
142 trigger(Event:Fwd_GETX, in_msg.Address);
144 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
145 trigger(Event:Writeback_Ack, in_msg.Address);
147 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
148 trigger(Event:Writeback_Nack, in_msg.Address);
150 else if (in_msg.Type == CoherenceRequestType:INV) {
151 trigger(Event:Inv, in_msg.Address);
154 error("Unexpected message");
160 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
161 if (responseNetwork_in.isReady()) {
162 peek(responseNetwork_in, ResponseMsg) {
163 if (in_msg.Type == CoherenceResponseType:DATA) {
164 trigger(Event:Data, in_msg.Address);
167 error("Unexpected message");
174 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
175 if (mandatoryQueue_in.isReady()) {
176 peek(mandatoryQueue_in, CacheMsg) {
179 if (cacheMemory.isTagPresent(in_msg.LineAddress) == false &&
180 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
181 // make room for the block
182 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress));
185 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
193 action(a_issueRequest, "a", desc="Issue a request") {
194 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
195 out_msg.Address := address;
196 out_msg.Type := CoherenceRequestType:GETX;
197 out_msg.Requestor := machineID;
198 out_msg.Destination.add(map_Address_to_Directory(address));
199 out_msg.MessageSize := MessageSizeType:Control;
203 action(b_issuePUT, "b", desc="Issue a PUT request") {
204 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
205 out_msg.Address := address;
206 out_msg.Type := CoherenceRequestType:PUTX;
207 out_msg.Requestor := machineID;
208 out_msg.Destination.add(map_Address_to_Directory(address));
209 out_msg.DataBlk := cacheMemory[address].DataBlk;
210 out_msg.MessageSize := MessageSizeType:Data;
215 action(e_sendData, "e", desc="Send data from cache to requestor") {
216 peek(forwardRequestNetwork_in, RequestMsg) {
217 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
218 out_msg.Address := address;
219 out_msg.Type := CoherenceResponseType:DATA;
220 out_msg.Sender := machineID;
221 out_msg.Destination.add(in_msg.Requestor);
222 out_msg.DataBlk := cacheMemory[address].DataBlk;
223 out_msg.MessageSize := MessageSizeType:Response_Data;
228 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
229 peek(forwardRequestNetwork_in, RequestMsg) {
230 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
231 out_msg.Address := address;
232 out_msg.Type := CoherenceResponseType:DATA;
233 out_msg.Sender := machineID;
234 out_msg.Destination.add(in_msg.Requestor);
235 out_msg.DataBlk := TBEs[address].DataBlk;
236 out_msg.MessageSize := MessageSizeType:Response_Data;
242 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
243 if (cacheMemory.isTagPresent(address) == false) {
244 cacheMemory.allocate(address, new Entry);
248 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
249 if (cacheMemory.isTagPresent(address) == true) {
250 cacheMemory.deallocate(address);
254 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
255 mandatoryQueue_in.dequeue();
258 action(n_popResponseQueue, "n", desc="Pop the response queue") {
259 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
262 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
263 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
266 action(p_profileMiss, "p", desc="Profile cache miss") {
267 peek(mandatoryQueue_in, CacheMsg) {
268 cacheMemory.profileMiss(in_msg);
272 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
273 DEBUG_EXPR(cacheMemory[address].DataBlk);
274 sequencer.readCallback(address, cacheMemory[address].DataBlk);
277 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
278 DEBUG_EXPR(cacheMemory[address].DataBlk);
279 sequencer.writeCallback(address, cacheMemory[address].DataBlk);
283 action(u_writeDataToCache, "u", desc="Write data to the cache") {
284 peek(responseNetwork_in, ResponseMsg) {
285 cacheMemory[address].DataBlk := in_msg.DataBlk;
290 action(v_allocateTBE, "v", desc="Allocate TBE") {
291 TBEs.allocate(address);
295 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
296 TBEs.deallocate(address);
299 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
300 TBEs[address].DataBlk := cacheMemory[address].DataBlk;
303 action(z_stall, "z", desc="stall") {
309 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
313 transition({IS, IM}, {Fwd_GETX, Inv}) {
317 transition(MI, Inv) {
318 o_popForwardedRequestQueue;
321 transition(M, Store) {
326 transition(M, {Load, Ifetch}) {
332 o_popForwardedRequestQueue;
335 transition(I, Store, IM) {
337 i_allocateL1CacheBlock;
343 transition(I, {Load, Ifetch}, IS) {
345 i_allocateL1CacheBlock;
351 transition(IS, Data, M) {
358 transition(IM, Data, M) {
365 transition(M, Fwd_GETX, I) {
367 o_popForwardedRequestQueue;
370 transition(I, Replacement) {
371 h_deallocateL1CacheBlock;
374 transition(M, {Replacement,Inv}, MI) {
377 x_copyDataFromCacheToTBE;
378 h_deallocateL1CacheBlock;
381 transition(MI, Writeback_Ack, I) {
383 o_popForwardedRequestQueue;
386 transition(MI, Fwd_GETX, II) {
388 o_popForwardedRequestQueue;
391 transition(II, Writeback_Nack, I) {
393 o_popForwardedRequestQueue;