ruby: Converted old ruby debug calls to M5 debug calls
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1
2 machine(L1Cache, "MI Example L1 Cache")
3 : Sequencer * sequencer,
4 CacheMemory * cacheMemory,
5 int cache_response_latency = 12,
6 int issue_latency = 2
7 {
8
9 // NETWORK BUFFERS
10 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true";
11 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true";
12
13 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true";
14 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true";
15
16 // STATES
17 enumeration(State, desc="Cache states") {
18 I, desc="Not Present/Invalid";
19 II, desc="Not Present/Invalid, issued PUT";
20 M, desc="Modified";
21 MI, desc="Modified, issued PUT";
22 MII, desc="Modified, issued PUTX, received nack";
23
24 IS, desc="Issued request for LOAD/IFETCH";
25 IM, desc="Issued request for STORE/ATOMIC";
26 }
27
28 // EVENTS
29 enumeration(Event, desc="Cache events") {
30 // From processor
31
32 Load, desc="Load request from processor";
33 Ifetch, desc="Ifetch request from processor";
34 Store, desc="Store request from processor";
35
36 Data, desc="Data from network";
37 Fwd_GETX, desc="Forward from network";
38
39 Inv, desc="Invalidate request from dir";
40
41 Replacement, desc="Replace a block";
42 Writeback_Ack, desc="Ack from the directory for a writeback";
43 Writeback_Nack, desc="Nack from the directory for a writeback";
44 }
45
46 // STRUCTURE DEFINITIONS
47
48 MessageBuffer mandatoryQueue, ordered="false";
49
50 // CacheEntry
51 structure(Entry, desc="...", interface="AbstractCacheEntry") {
52 State CacheState, desc="cache state";
53 bool Dirty, desc="Is the data dirty (different than memory)?";
54 DataBlock DataBlk, desc="Data in the block";
55 }
56
57
58 // TBE fields
59 structure(TBE, desc="...") {
60 State TBEState, desc="Transient state";
61 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
62 }
63
64 external_type(TBETable) {
65 TBE lookup(Address);
66 void allocate(Address);
67 void deallocate(Address);
68 bool isPresent(Address);
69 }
70
71
72 // STRUCTURES
73
74 TBETable TBEs, template_hack="<L1Cache_TBE>";
75
76
77
78 // FUNCTIONS
79 Event mandatory_request_type_to_event(CacheRequestType type) {
80 if (type == CacheRequestType:LD) {
81 return Event:Load;
82 } else if (type == CacheRequestType:IFETCH) {
83 return Event:Ifetch;
84 } else if ((type == CacheRequestType:ST) || (type == CacheRequestType:ATOMIC)) {
85 return Event:Store;
86 } else {
87 error("Invalid CacheRequestType");
88 }
89 }
90
91 Entry getCacheEntry(Address addr), return_by_ref="yes" {
92 return static_cast(Entry, cacheMemory[addr]);
93 }
94
95 State getState(Address addr) {
96
97 if (TBEs.isPresent(addr)) {
98 return TBEs[addr].TBEState;
99 }
100 else if (cacheMemory.isTagPresent(addr)) {
101 return getCacheEntry(addr).CacheState;
102 }
103 else {
104 return State:I;
105 }
106 }
107
108 void setState(Address addr, State state) {
109
110 if (TBEs.isPresent(addr)) {
111 TBEs[addr].TBEState := state;
112 }
113
114 if (cacheMemory.isTagPresent(addr)) {
115 getCacheEntry(addr).CacheState := state;
116 if (state == State:M) {
117 cacheMemory.changePermission(addr, AccessPermission:Read_Write);
118 } else {
119 cacheMemory.changePermission(addr, AccessPermission:Invalid);
120 }
121 }
122 }
123
124 GenericMachineType getNondirectHitMachType(MachineID sender) {
125 if (machineIDToMachineType(sender) == MachineType:L1Cache) {
126 //
127 // NOTE direct local hits should not call this
128 //
129 return GenericMachineType:L1Cache_wCC;
130 } else {
131 return ConvertMachToGenericMach(machineIDToMachineType(sender));
132 }
133 }
134
135
136 // NETWORK PORTS
137
138 out_port(requestNetwork_out, RequestMsg, requestFromCache);
139 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
140
141 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
142 if (forwardRequestNetwork_in.isReady()) {
143 peek(forwardRequestNetwork_in, RequestMsg, block_on="Address") {
144 if (in_msg.Type == CoherenceRequestType:GETX) {
145 trigger(Event:Fwd_GETX, in_msg.Address);
146 }
147 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
148 trigger(Event:Writeback_Ack, in_msg.Address);
149 }
150 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
151 trigger(Event:Writeback_Nack, in_msg.Address);
152 }
153 else if (in_msg.Type == CoherenceRequestType:INV) {
154 trigger(Event:Inv, in_msg.Address);
155 }
156 else {
157 error("Unexpected message");
158 }
159 }
160 }
161 }
162
163 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
164 if (responseNetwork_in.isReady()) {
165 peek(responseNetwork_in, ResponseMsg, block_on="Address") {
166 if (in_msg.Type == CoherenceResponseType:DATA) {
167 trigger(Event:Data, in_msg.Address);
168 }
169 else {
170 error("Unexpected message");
171 }
172 }
173 }
174 }
175
176 // Mandatory Queue
177 in_port(mandatoryQueue_in, CacheMsg, mandatoryQueue, desc="...") {
178 if (mandatoryQueue_in.isReady()) {
179 peek(mandatoryQueue_in, CacheMsg, block_on="LineAddress") {
180
181
182 if (cacheMemory.isTagPresent(in_msg.LineAddress) == false &&
183 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
184 // make room for the block
185 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress));
186 }
187 else {
188 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
189 }
190 }
191 }
192 }
193
194 // ACTIONS
195
196 action(a_issueRequest, "a", desc="Issue a request") {
197 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
198 out_msg.Address := address;
199 out_msg.Type := CoherenceRequestType:GETX;
200 out_msg.Requestor := machineID;
201 out_msg.Destination.add(map_Address_to_Directory(address));
202 out_msg.MessageSize := MessageSizeType:Control;
203 }
204 }
205
206 action(b_issuePUT, "b", desc="Issue a PUT request") {
207 enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
208 out_msg.Address := address;
209 out_msg.Type := CoherenceRequestType:PUTX;
210 out_msg.Requestor := machineID;
211 out_msg.Destination.add(map_Address_to_Directory(address));
212 out_msg.DataBlk := getCacheEntry(address).DataBlk;
213 out_msg.MessageSize := MessageSizeType:Data;
214 }
215 }
216
217
218 action(e_sendData, "e", desc="Send data from cache to requestor") {
219 peek(forwardRequestNetwork_in, RequestMsg) {
220 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
221 out_msg.Address := address;
222 out_msg.Type := CoherenceResponseType:DATA;
223 out_msg.Sender := machineID;
224 out_msg.Destination.add(in_msg.Requestor);
225 out_msg.DataBlk := getCacheEntry(address).DataBlk;
226 out_msg.MessageSize := MessageSizeType:Response_Data;
227 }
228 }
229 }
230
231 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
232 peek(forwardRequestNetwork_in, RequestMsg) {
233 enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
234 out_msg.Address := address;
235 out_msg.Type := CoherenceResponseType:DATA;
236 out_msg.Sender := machineID;
237 out_msg.Destination.add(in_msg.Requestor);
238 out_msg.DataBlk := TBEs[address].DataBlk;
239 out_msg.MessageSize := MessageSizeType:Response_Data;
240 }
241 }
242 }
243
244
245 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
246 if (cacheMemory.isTagPresent(address) == false) {
247 cacheMemory.allocate(address, new Entry);
248 }
249 }
250
251 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
252 if (cacheMemory.isTagPresent(address) == true) {
253 cacheMemory.deallocate(address);
254 }
255 }
256
257 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
258 mandatoryQueue_in.dequeue();
259 }
260
261 action(n_popResponseQueue, "n", desc="Pop the response queue") {
262 profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
263 }
264
265 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
266 profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
267 }
268
269 action(p_profileMiss, "p", desc="Profile cache miss") {
270 peek(mandatoryQueue_in, CacheMsg) {
271 cacheMemory.profileMiss(in_msg);
272 }
273 }
274
275 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
276 DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
277 sequencer.readCallback(address,
278 GenericMachineType:L1Cache,
279 getCacheEntry(address).DataBlk);
280 }
281
282 action(rx_load_hit, "rx", desc="External load completed.") {
283 peek(responseNetwork_in, ResponseMsg) {
284 DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
285 sequencer.readCallback(address,
286 getNondirectHitMachType(in_msg.Sender),
287 getCacheEntry(address).DataBlk);
288 }
289 }
290
291 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
292 DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
293 sequencer.writeCallback(address,
294 GenericMachineType:L1Cache,
295 getCacheEntry(address).DataBlk);
296 }
297
298 action(sx_store_hit, "sx", desc="External store completed.") {
299 peek(responseNetwork_in, ResponseMsg) {
300 DPRINTF(RubySlicc,"%s\n", getCacheEntry(address).DataBlk);
301 sequencer.writeCallback(address,
302 getNondirectHitMachType(in_msg.Sender),
303 getCacheEntry(address).DataBlk);
304 }
305 }
306
307 action(u_writeDataToCache, "u", desc="Write data to the cache") {
308 peek(responseNetwork_in, ResponseMsg) {
309 getCacheEntry(address).DataBlk := in_msg.DataBlk;
310 }
311 }
312
313
314 action(v_allocateTBE, "v", desc="Allocate TBE") {
315 TBEs.allocate(address);
316 }
317
318
319 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
320 TBEs.deallocate(address);
321 }
322
323 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
324 TBEs[address].DataBlk := getCacheEntry(address).DataBlk;
325 }
326
327 action(z_stall, "z", desc="stall") {
328 // do nothing
329 }
330
331 // TRANSITIONS
332
333 transition({IS, IM, MI, II}, {Load, Ifetch, Store, Replacement}) {
334 z_stall;
335 }
336
337 transition({IS, IM}, {Fwd_GETX, Inv}) {
338 z_stall;
339 }
340
341 transition(MI, Inv) {
342 o_popForwardedRequestQueue;
343 }
344
345 transition(M, Store) {
346 s_store_hit;
347 m_popMandatoryQueue;
348 }
349
350 transition(M, {Load, Ifetch}) {
351 r_load_hit;
352 m_popMandatoryQueue;
353 }
354
355 transition(I, Inv) {
356 o_popForwardedRequestQueue;
357 }
358
359 transition(I, Store, IM) {
360 v_allocateTBE;
361 i_allocateL1CacheBlock;
362 a_issueRequest;
363 p_profileMiss;
364 m_popMandatoryQueue;
365 }
366
367 transition(I, {Load, Ifetch}, IS) {
368 v_allocateTBE;
369 i_allocateL1CacheBlock;
370 a_issueRequest;
371 p_profileMiss;
372 m_popMandatoryQueue;
373 }
374
375 transition(IS, Data, M) {
376 u_writeDataToCache;
377 rx_load_hit;
378 w_deallocateTBE;
379 n_popResponseQueue;
380 }
381
382 transition(IM, Data, M) {
383 u_writeDataToCache;
384 sx_store_hit;
385 w_deallocateTBE;
386 n_popResponseQueue;
387 }
388
389 transition(M, Fwd_GETX, I) {
390 e_sendData;
391 o_popForwardedRequestQueue;
392 }
393
394 transition(I, Replacement) {
395 h_deallocateL1CacheBlock;
396 }
397
398 transition(M, {Replacement,Inv}, MI) {
399 v_allocateTBE;
400 b_issuePUT;
401 x_copyDataFromCacheToTBE;
402 h_deallocateL1CacheBlock;
403 }
404
405 transition(MI, Writeback_Ack, I) {
406 w_deallocateTBE;
407 o_popForwardedRequestQueue;
408 }
409
410 transition(MI, Fwd_GETX, II) {
411 ee_sendDataFromTBE;
412 o_popForwardedRequestQueue;
413 }
414
415 transition(MI, Writeback_Nack, MII) {
416 o_popForwardedRequestQueue;
417 }
418
419 transition(MII, Fwd_GETX, I) {
420 ee_sendDataFromTBE;
421 w_deallocateTBE;
422 o_popForwardedRequestQueue;
423 }
424
425 transition(II, Writeback_Nack, I) {
426 w_deallocateTBE;
427 o_popForwardedRequestQueue;
428 }
429 }
430