merged Tushar's bug fix with public repository changes
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1
2 machine(L1Cache, "MI Example L1 Cache")
3 : int cache_response_latency,
4 int issue_latency
5 {
6
7 // NETWORK BUFFERS
8 MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
9 MessageBuffer responseFromCache, network="To", virtual_network="1", ordered="true";
10
11 MessageBuffer forwardToCache, network="From", virtual_network="2", ordered="true";
12 MessageBuffer responseToCache, network="From", virtual_network="1", ordered="true";
13
14 // STATES
15 enumeration(State, desc="Cache states") {
16 I, desc="Not Present/Invalid";
17 II, desc="Not Present/Invalid, issued PUT";
18 M, desc="Modified";
19 MI, desc="Modified, issued PUT";
20
21 IS, desc="Issued request for LOAD/IFETCH";
22 IM, desc="Issued request for STORE/ATOMIC";
23 }
24
25 // EVENTS
26 enumeration(Event, desc="Cache events") {
27 // From processor
28
29 Load, desc="Load request from processor";
30 Ifetch, desc="Ifetch request from processor";
31 Store, desc="Store request from processor";
32
33 Data, desc="Data from network";
34 Fwd_GETX, desc="Forward from network";
35
36 Inv, desc="Invalidate request from dir";
37
38 Replacement, desc="Replace a block";
39 Writeback_Ack, desc="Ack from the directory for a writeback";
40 Writeback_Nack, desc="Nack from the directory for a writeback";
41 }
42
43 // STRUCTURE DEFINITIONS
44
45 MessageBuffer mandatoryQueue, ordered="false";
46 Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
47
48 // CacheEntry
49 structure(Entry, desc="...", interface="AbstractCacheEntry") {
50 State CacheState, desc="cache state";
51 bool Dirty, desc="Is the data dirty (different than memory)?";
52 DataBlock DataBlk, desc="Data in the block";
53 }
54
55
56 external_type(CacheMemory) {
57 bool cacheAvail(Address);
58 Address cacheProbe(Address);
59 void allocate(Address, Entry);
60 void deallocate(Address);
61 Entry lookup(Address);
62 void changePermission(Address, AccessPermission);
63 bool isTagPresent(Address);
64 void profileMiss(CacheMsg);
65 }
66
67 // TBE fields
68 structure(TBE, desc="...") {
69 State TBEState, desc="Transient state";
70 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
71 }
72
73 external_type(TBETable) {
74 TBE lookup(Address);
75 void allocate(Address);
76 void deallocate(Address);
77 bool isPresent(Address);
78 }
79
80
81 // STRUCTURES
82
83 CacheMemory cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
84
85 TBETable TBEs, template_hack="<L1Cache_TBE>";
86
87
88
89 // FUNCTIONS
90 Event mandatory_request_type_to_event(CacheRequestType type) {
91 if (type == CacheRequestType:LD) {
92 return Event:Load;
93 } else if (type == CacheRequestType:IFETCH) {
94 return Event:Ifetch;
95 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
96 return Event:Store;
97 } else {
98 error("Invalid CacheRequestType");
99 }
100 }
101
102
103 State getState(Address addr) {
104
105 if (TBEs.isPresent(addr)) {
106 return TBEs[addr].TBEState;
107 }
108 else if (cacheMemory.isTagPresent(addr)) {
109 return cacheMemory[addr].CacheState;
110 }
111 else {
112 return State:I;
113 }
114 }
115
116 void setState(Address addr, State state) {
117
118 if (TBEs.isPresent(addr)) {
119 TBEs[addr].TBEState := state;
120 }
121
122 if (cacheMemory.isTagPresent(addr)) {
123 cacheMemory[addr].CacheState := state;
124 if (state == State:M) {
125 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
126 } else {
127 cacheMemory.changePermission(addr, AccessPermission:Invalid);
128 }
129 }
130 }
131
132
133 // NETWORK PORTS
134
135 out_port(requestNetwork_out, RequestMsg, requestFromCache);
136 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
137
138 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
139 if (forwardRequestNetwork_in.isReady()) {
140 peek(forwardRequestNetwork_in, RequestMsg) {
141 if (in_msg.Type == CoherenceRequestType:GETX) {
142 trigger(Event:Fwd_GETX, in_msg.Address);
143 }
144 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
145 trigger(Event:Writeback_Ack, in_msg.Address);
146 }
147 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
148 trigger(Event:Writeback_Nack, in_msg.Address);
149 }
150 else if (in_msg.Type == CoherenceRequestType:INV) {
151 trigger(Event:Inv, in_msg.Address);
152 }
153 else {
154 error("Unexpected message");
155 }
156 }
157 }
158 }
159
160 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
161 if (responseNetwork_in.isReady()) {
162 peek(responseNetwork_in, ResponseMsg) {
163 if (in_msg.Type == CoherenceResponseType:DATA) {
164 trigger(Event:Data, in_msg.Address);
165 }
166 else {
167 error("Unexpected message");
168 }
169 }
170 }
171 }
172
173 // Mandatory Queue
174 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
175 if (mandatoryQueue_in.isReady()) {
176 peek(mandatoryQueue_in, CacheMsg) {
177
178
179 if (cacheMemory.isTagPresent(in_msg.LineAddress) == false &&
180 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
181 // make room for the block
182 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress));
183 }
184 else {
185 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
186 }
187 }
188 }
189 }
190
191 // ACTIONS
192
193 action(a_issueRequest, "a", desc="Issue a request") {
194 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
195 out_msg.Address := address;
196 out_msg.Type := CoherenceRequestType:GETX;
197 out_msg.Requestor := machineID;
198 out_msg.Destination.add(map_Address_to_Directory(address));
199 out_msg.MessageSize := MessageSizeType:Control;
200 }
201 }
202
203 action(b_issuePUT, "b", desc="Issue a PUT request") {
204 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
205 out_msg.Address := address;
206 out_msg.Type := CoherenceRequestType:PUTX;
207 out_msg.Requestor := machineID;
208 out_msg.Destination.add(map_Address_to_Directory(address));
209 out_msg.DataBlk := cacheMemory[address].DataBlk;
210 out_msg.MessageSize := MessageSizeType:Data;
211 }
212 }
213
214
215 action(e_sendData, "e", desc="Send data from cache to requestor") {
216 peek(forwardRequestNetwork_in, RequestMsg) {
217 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
218 out_msg.Address := address;
219 out_msg.Type := CoherenceResponseType:DATA;
220 out_msg.Sender := machineID;
221 out_msg.Destination.add(in_msg.Requestor);
222 out_msg.DataBlk := cacheMemory[address].DataBlk;
223 out_msg.MessageSize := MessageSizeType:Response_Data;
224 }
225 }
226 }
227
228 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
229 peek(forwardRequestNetwork_in, RequestMsg) {
230 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
231 out_msg.Address := address;
232 out_msg.Type := CoherenceResponseType:DATA;
233 out_msg.Sender := machineID;
234 out_msg.Destination.add(in_msg.Requestor);
235 out_msg.DataBlk := TBEs[address].DataBlk;
236 out_msg.MessageSize := MessageSizeType:Response_Data;
237 }
238 }
239 }
240
241
242 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
243 if (cacheMemory.isTagPresent(address) == false) {
244 cacheMemory.allocate(address, new Entry);
245 }
246 }
247
248 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
249 if (cacheMemory.isTagPresent(address) == true) {
250 cacheMemory.deallocate(address);
251 }
252 }
253
254 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
255 mandatoryQueue_in.dequeue();
256 }
257
258 action(n_popResponseQueue, "n", desc="Pop the response queue") {
259 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
260 }
261
262 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
263 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
264 }
265
266 action(p_profileMiss, "p", desc="Profile cache miss") {
267 peek(mandatoryQueue_in, CacheMsg) {
268 cacheMemory.profileMiss(in_msg);
269 }
270 }
271
272 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
273 DEBUG_EXPR(cacheMemory[address].DataBlk);
274 sequencer.readCallback(address, cacheMemory[address].DataBlk);
275 }
276
277 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
278 DEBUG_EXPR(cacheMemory[address].DataBlk);
279 sequencer.writeCallback(address, cacheMemory[address].DataBlk);
280 }
281
282
283 action(u_writeDataToCache, "u", desc="Write data to the cache") {
284 peek(responseNetwork_in, ResponseMsg) {
285 cacheMemory[address].DataBlk := in_msg.DataBlk;
286 }
287 }
288
289
290 action(v_allocateTBE, "v", desc="Allocate TBE") {
291 TBEs.allocate(address);
292 }
293
294
295 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
296 TBEs.deallocate(address);
297 }
298
299 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
300 TBEs[address].DataBlk := cacheMemory[address].DataBlk;
301 }
302
303 action(z_stall, "z", desc="stall") {
304 // do nothing
305 }
306
307 // TRANSITIONS
308
309 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
310 z_stall;
311 }
312
313 transition({IS, IM}, {Fwd_GETX, Inv}) {
314 z_stall;
315 }
316
317 transition(MI, Inv) {
318 o_popForwardedRequestQueue;
319 }
320
321 transition(M, Store) {
322 s_store_hit;
323 m_popMandatoryQueue;
324 }
325
326 transition(M, {Load, Ifetch}) {
327 r_load_hit;
328 m_popMandatoryQueue;
329 }
330
331 transition(I, Inv) {
332 o_popForwardedRequestQueue;
333 }
334
335 transition(I, Store, IM) {
336 v_allocateTBE;
337 i_allocateL1CacheBlock;
338 a_issueRequest;
339 p_profileMiss;
340 m_popMandatoryQueue;
341 }
342
343 transition(I, {Load, Ifetch}, IS) {
344 v_allocateTBE;
345 i_allocateL1CacheBlock;
346 a_issueRequest;
347 p_profileMiss;
348 m_popMandatoryQueue;
349 }
350
351 transition(IS, Data, M) {
352 u_writeDataToCache;
353 r_load_hit;
354 w_deallocateTBE;
355 n_popResponseQueue;
356 }
357
358 transition(IM, Data, M) {
359 u_writeDataToCache;
360 s_store_hit;
361 w_deallocateTBE;
362 n_popResponseQueue;
363 }
364
365 transition(M, Fwd_GETX, I) {
366 e_sendData;
367 o_popForwardedRequestQueue;
368 }
369
370 transition(I, Replacement) {
371 h_deallocateL1CacheBlock;
372 }
373
374 transition(M, {Replacement,Inv}, MI) {
375 v_allocateTBE;
376 b_issuePUT;
377 x_copyDataFromCacheToTBE;
378 h_deallocateL1CacheBlock;
379 }
380
381 transition(MI, Writeback_Ack, I) {
382 w_deallocateTBE;
383 o_popForwardedRequestQueue;
384 }
385
386 transition(MI, Fwd_GETX, II) {
387 ee_sendDataFromTBE;
388 o_popForwardedRequestQueue;
389 }
390
391 transition(II, Writeback_Nack, I) {
392 w_deallocateTBE;
393 o_popForwardedRequestQueue;
394 }
395 }
396