merge
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1
2 machine(L1Cache, "MI Example L1 Cache")
3 : int cache_response_latency,
4 int issue_latency
5 {
6
7 // NETWORK BUFFERS
8 MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
9 MessageBuffer responseFromCache, network="To", virtual_network="1", ordered="true";
10
11 MessageBuffer forwardToCache, network="From", virtual_network="2", ordered="true";
12 MessageBuffer responseToCache, network="From", virtual_network="1", ordered="true";
13
14 // STATES
15 enumeration(State, desc="Cache states") {
16 I, desc="Not Present/Invalid";
17 II, desc="Not Present/Invalid, issued PUT";
18 M, desc="Modified";
19 MI, desc="Modified, issued PUT";
20 MII, desc="Modified, issued PUTX, received nack";
21
22 IS, desc="Issued request for LOAD/IFETCH";
23 IM, desc="Issued request for STORE/ATOMIC";
24 }
25
26 // EVENTS
27 enumeration(Event, desc="Cache events") {
28 // From processor
29
30 Load, desc="Load request from processor";
31 Ifetch, desc="Ifetch request from processor";
32 Store, desc="Store request from processor";
33
34 Data, desc="Data from network";
35 Fwd_GETX, desc="Forward from network";
36
37 Inv, desc="Invalidate request from dir";
38
39 Replacement, desc="Replace a block";
40 Writeback_Ack, desc="Ack from the directory for a writeback";
41 Writeback_Nack, desc="Nack from the directory for a writeback";
42 }
43
44 // STRUCTURE DEFINITIONS
45
46 MessageBuffer mandatoryQueue, ordered="false";
47 Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
48
49 // CacheEntry
50 structure(Entry, desc="...", interface="AbstractCacheEntry") {
51 State CacheState, desc="cache state";
52 bool Dirty, desc="Is the data dirty (different than memory)?";
53 DataBlock DataBlk, desc="Data in the block";
54 }
55
56
57 external_type(CacheMemory) {
58 bool cacheAvail(Address);
59 Address cacheProbe(Address);
60 void allocate(Address, Entry);
61 void deallocate(Address);
62 Entry lookup(Address);
63 void changePermission(Address, AccessPermission);
64 bool isTagPresent(Address);
65 void profileMiss(CacheMsg);
66 }
67
68 // TBE fields
69 structure(TBE, desc="...") {
70 State TBEState, desc="Transient state";
71 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
72 }
73
74 external_type(TBETable) {
75 TBE lookup(Address);
76 void allocate(Address);
77 void deallocate(Address);
78 bool isPresent(Address);
79 }
80
81
82 // STRUCTURES
83
84 CacheMemory cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
85
86 TBETable TBEs, template_hack="<L1Cache_TBE>";
87
88
89
90 // FUNCTIONS
91 Event mandatory_request_type_to_event(CacheRequestType type) {
92 if (type == CacheRequestType:LD) {
93 return Event:Load;
94 } else if (type == CacheRequestType:IFETCH) {
95 return Event:Ifetch;
96 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
97 return Event:Store;
98 } else {
99 error("Invalid CacheRequestType");
100 }
101 }
102
103
104 State getState(Address addr) {
105
106 if (TBEs.isPresent(addr)) {
107 return TBEs[addr].TBEState;
108 }
109 else if (cacheMemory.isTagPresent(addr)) {
110 return cacheMemory[addr].CacheState;
111 }
112 else {
113 return State:I;
114 }
115 }
116
117 void setState(Address addr, State state) {
118
119 if (TBEs.isPresent(addr)) {
120 TBEs[addr].TBEState := state;
121 }
122
123 if (cacheMemory.isTagPresent(addr)) {
124 cacheMemory[addr].CacheState := state;
125 if (state == State:M) {
126 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
127 } else {
128 cacheMemory.changePermission(addr, AccessPermission:Invalid);
129 }
130 }
131 }
132
133
134 // NETWORK PORTS
135
136 out_port(requestNetwork_out, RequestMsg, requestFromCache);
137 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
138
139 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
140 if (forwardRequestNetwork_in.isReady()) {
141 peek(forwardRequestNetwork_in, RequestMsg) {
142 if (in_msg.Type == CoherenceRequestType:GETX) {
143 trigger(Event:Fwd_GETX, in_msg.Address);
144 }
145 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
146 trigger(Event:Writeback_Ack, in_msg.Address);
147 }
148 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
149 trigger(Event:Writeback_Nack, in_msg.Address);
150 }
151 else if (in_msg.Type == CoherenceRequestType:INV) {
152 trigger(Event:Inv, in_msg.Address);
153 }
154 else {
155 error("Unexpected message");
156 }
157 }
158 }
159 }
160
161 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
162 if (responseNetwork_in.isReady()) {
163 peek(responseNetwork_in, ResponseMsg) {
164 if (in_msg.Type == CoherenceResponseType:DATA) {
165 trigger(Event:Data, in_msg.Address);
166 }
167 else {
168 error("Unexpected message");
169 }
170 }
171 }
172 }
173
174 // Mandatory Queue
175 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
176 if (mandatoryQueue_in.isReady()) {
177 peek(mandatoryQueue_in, CacheMsg) {
178
179
180 if (cacheMemory.isTagPresent(in_msg.LineAddress) == false &&
181 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
182 // make room for the block
183 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress));
184 }
185 else {
186 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
187 }
188 }
189 }
190 }
191
192 // ACTIONS
193
194 action(a_issueRequest, "a", desc="Issue a request") {
195 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
196 out_msg.Address := address;
197 out_msg.Type := CoherenceRequestType:GETX;
198 out_msg.Requestor := machineID;
199 out_msg.Destination.add(map_Address_to_Directory(address));
200 out_msg.MessageSize := MessageSizeType:Control;
201 }
202 }
203
204 action(b_issuePUT, "b", desc="Issue a PUT request") {
205 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
206 out_msg.Address := address;
207 out_msg.Type := CoherenceRequestType:PUTX;
208 out_msg.Requestor := machineID;
209 out_msg.Destination.add(map_Address_to_Directory(address));
210 out_msg.DataBlk := cacheMemory[address].DataBlk;
211 out_msg.MessageSize := MessageSizeType:Data;
212 }
213 }
214
215
216 action(e_sendData, "e", desc="Send data from cache to requestor") {
217 peek(forwardRequestNetwork_in, RequestMsg) {
218 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
219 out_msg.Address := address;
220 out_msg.Type := CoherenceResponseType:DATA;
221 out_msg.Sender := machineID;
222 out_msg.Destination.add(in_msg.Requestor);
223 out_msg.DataBlk := cacheMemory[address].DataBlk;
224 out_msg.MessageSize := MessageSizeType:Response_Data;
225 }
226 }
227 }
228
229 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
230 peek(forwardRequestNetwork_in, RequestMsg) {
231 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
232 out_msg.Address := address;
233 out_msg.Type := CoherenceResponseType:DATA;
234 out_msg.Sender := machineID;
235 out_msg.Destination.add(in_msg.Requestor);
236 out_msg.DataBlk := TBEs[address].DataBlk;
237 out_msg.MessageSize := MessageSizeType:Response_Data;
238 }
239 }
240 }
241
242
243 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
244 if (cacheMemory.isTagPresent(address) == false) {
245 cacheMemory.allocate(address, new Entry);
246 }
247 }
248
249 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
250 if (cacheMemory.isTagPresent(address) == true) {
251 cacheMemory.deallocate(address);
252 }
253 }
254
255 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
256 mandatoryQueue_in.dequeue();
257 }
258
259 action(n_popResponseQueue, "n", desc="Pop the response queue") {
260 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
261 }
262
263 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
264 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
265 }
266
267 action(p_profileMiss, "p", desc="Profile cache miss") {
268 peek(mandatoryQueue_in, CacheMsg) {
269 cacheMemory.profileMiss(in_msg);
270 }
271 }
272
273 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
274 DEBUG_EXPR(cacheMemory[address].DataBlk);
275 sequencer.readCallback(address, cacheMemory[address].DataBlk);
276 }
277
278 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
279 DEBUG_EXPR(cacheMemory[address].DataBlk);
280 sequencer.writeCallback(address, cacheMemory[address].DataBlk);
281 }
282
283
284 action(u_writeDataToCache, "u", desc="Write data to the cache") {
285 peek(responseNetwork_in, ResponseMsg) {
286 cacheMemory[address].DataBlk := in_msg.DataBlk;
287 }
288 }
289
290
291 action(v_allocateTBE, "v", desc="Allocate TBE") {
292 TBEs.allocate(address);
293 }
294
295
296 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
297 TBEs.deallocate(address);
298 }
299
300 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
301 TBEs[address].DataBlk := cacheMemory[address].DataBlk;
302 }
303
304 action(z_stall, "z", desc="stall") {
305 // do nothing
306 }
307
308 // TRANSITIONS
309
310 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
311 z_stall;
312 }
313
314 transition({IS, IM}, {Fwd_GETX, Inv}) {
315 z_stall;
316 }
317
318 transition(MI, Inv) {
319 o_popForwardedRequestQueue;
320 }
321
322 transition(M, Store) {
323 s_store_hit;
324 m_popMandatoryQueue;
325 }
326
327 transition(M, {Load, Ifetch}) {
328 r_load_hit;
329 m_popMandatoryQueue;
330 }
331
332 transition(I, Inv) {
333 o_popForwardedRequestQueue;
334 }
335
336 transition(I, Store, IM) {
337 v_allocateTBE;
338 i_allocateL1CacheBlock;
339 a_issueRequest;
340 p_profileMiss;
341 m_popMandatoryQueue;
342 }
343
344 transition(I, {Load, Ifetch}, IS) {
345 v_allocateTBE;
346 i_allocateL1CacheBlock;
347 a_issueRequest;
348 p_profileMiss;
349 m_popMandatoryQueue;
350 }
351
352 transition(IS, Data, M) {
353 u_writeDataToCache;
354 r_load_hit;
355 w_deallocateTBE;
356 n_popResponseQueue;
357 }
358
359 transition(IM, Data, M) {
360 u_writeDataToCache;
361 s_store_hit;
362 w_deallocateTBE;
363 n_popResponseQueue;
364 }
365
366 transition(M, Fwd_GETX, I) {
367 e_sendData;
368 o_popForwardedRequestQueue;
369 }
370
371 transition(I, Replacement) {
372 h_deallocateL1CacheBlock;
373 }
374
375 transition(M, {Replacement,Inv}, MI) {
376 v_allocateTBE;
377 b_issuePUT;
378 x_copyDataFromCacheToTBE;
379 h_deallocateL1CacheBlock;
380 }
381
382 transition(MI, Writeback_Ack, I) {
383 w_deallocateTBE;
384 o_popForwardedRequestQueue;
385 }
386
387 transition(MI, Fwd_GETX, II) {
388 ee_sendDataFromTBE;
389 o_popForwardedRequestQueue;
390 }
391
392 transition(MI, Writeback_Nack, MII) {
393 o_popForwardedRequestQueue;
394 }
395
396 transition(MII, Fwd_GETX, I) {
397 ee_sendDataFromTBE;
398 w_deallocateTBE;
399 o_popForwardedRequestQueue;
400 }
401
402 transition(II, Writeback_Nack, I) {
403 w_deallocateTBE;
404 o_popForwardedRequestQueue;
405 }
406 }
407