ruby: slicc: change the way configurable members are specified
[gem5.git] / src / mem / protocol / MI_example-cache.sm
1 /*
2 * Copyright (c) 2009-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2010-2012 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 machine(L1Cache, "MI Example L1 Cache")
31 : Sequencer * sequencer;
32 CacheMemory * cacheMemory;
33 Cycles cache_response_latency := 12;
34 Cycles issue_latency := 2;
35 bool send_evictions;
36 {
37
38 // NETWORK BUFFERS
39 MessageBuffer requestFromCache, network="To", virtual_network="2", ordered="true", vnet_type="request";
40 MessageBuffer responseFromCache, network="To", virtual_network="4", ordered="true", vnet_type="response";
41
42 MessageBuffer forwardToCache, network="From", virtual_network="3", ordered="true", vnet_type="forward";
43 MessageBuffer responseToCache, network="From", virtual_network="4", ordered="true", vnet_type="response";
44
45 // STATES
46 state_declaration(State, desc="Cache states") {
47 I, AccessPermission:Invalid, desc="Not Present/Invalid";
48 II, AccessPermission:Busy, desc="Not Present/Invalid, issued PUT";
49 M, AccessPermission:Read_Write, desc="Modified";
50 MI, AccessPermission:Busy, desc="Modified, issued PUT";
51 MII, AccessPermission:Busy, desc="Modified, issued PUTX, received nack";
52
53 IS, AccessPermission:Busy, desc="Issued request for LOAD/IFETCH";
54 IM, AccessPermission:Busy, desc="Issued request for STORE/ATOMIC";
55 }
56
57 // EVENTS
58 enumeration(Event, desc="Cache events") {
59 // From processor
60
61 Load, desc="Load request from processor";
62 Ifetch, desc="Ifetch request from processor";
63 Store, desc="Store request from processor";
64
65 Data, desc="Data from network";
66 Fwd_GETX, desc="Forward from network";
67
68 Inv, desc="Invalidate request from dir";
69
70 Replacement, desc="Replace a block";
71 Writeback_Ack, desc="Ack from the directory for a writeback";
72 Writeback_Nack, desc="Nack from the directory for a writeback";
73 }
74
75 // STRUCTURE DEFINITIONS
76
77 MessageBuffer mandatoryQueue, ordered="false";
78
79 // CacheEntry
80 structure(Entry, desc="...", interface="AbstractCacheEntry") {
81 State CacheState, desc="cache state";
82 bool Dirty, desc="Is the data dirty (different than memory)?";
83 DataBlock DataBlk, desc="Data in the block";
84 }
85
86 // TBE fields
87 structure(TBE, desc="...") {
88 State TBEState, desc="Transient state";
89 DataBlock DataBlk, desc="data for the block, required for concurrent writebacks";
90 }
91
92 structure(TBETable, external="yes") {
93 TBE lookup(Address);
94 void allocate(Address);
95 void deallocate(Address);
96 bool isPresent(Address);
97 }
98
99
100 // STRUCTURES
101 TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
102
103 // PROTOTYPES
104 void set_cache_entry(AbstractCacheEntry a);
105 void unset_cache_entry();
106 void set_tbe(TBE b);
107 void unset_tbe();
108 void profileMsgDelay(int virtualNetworkType, Cycles b);
109
110 Entry getCacheEntry(Address address), return_by_pointer="yes" {
111 return static_cast(Entry, "pointer", cacheMemory.lookup(address));
112 }
113
114 // FUNCTIONS
115 Event mandatory_request_type_to_event(RubyRequestType type) {
116 if (type == RubyRequestType:LD) {
117 return Event:Load;
118 } else if (type == RubyRequestType:IFETCH) {
119 return Event:Ifetch;
120 } else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
121 return Event:Store;
122 } else {
123 error("Invalid RubyRequestType");
124 }
125 }
126
127 State getState(TBE tbe, Entry cache_entry, Address addr) {
128
129 if (is_valid(tbe)) {
130 return tbe.TBEState;
131 }
132 else if (is_valid(cache_entry)) {
133 return cache_entry.CacheState;
134 }
135 else {
136 return State:I;
137 }
138 }
139
140 void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
141
142 if (is_valid(tbe)) {
143 tbe.TBEState := state;
144 }
145
146 if (is_valid(cache_entry)) {
147 cache_entry.CacheState := state;
148 }
149 }
150
151 AccessPermission getAccessPermission(Address addr) {
152 TBE tbe := TBEs[addr];
153 if(is_valid(tbe)) {
154 return L1Cache_State_to_permission(tbe.TBEState);
155 }
156
157 Entry cache_entry := getCacheEntry(addr);
158 if(is_valid(cache_entry)) {
159 return L1Cache_State_to_permission(cache_entry.CacheState);
160 }
161
162 return AccessPermission:NotPresent;
163 }
164
165 void setAccessPermission(Entry cache_entry, Address addr, State state) {
166 if (is_valid(cache_entry)) {
167 cache_entry.changePermission(L1Cache_State_to_permission(state));
168 }
169 }
170
171 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
172 TBE tbe := TBEs[addr];
173 if(is_valid(tbe)) {
174 return tbe.DataBlk;
175 }
176
177 return getCacheEntry(addr).DataBlk;
178 }
179
180 // NETWORK PORTS
181
182 out_port(requestNetwork_out, RequestMsg, requestFromCache);
183 out_port(responseNetwork_out, ResponseMsg, responseFromCache);
184
185 in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
186 if (forwardRequestNetwork_in.isReady()) {
187 peek(forwardRequestNetwork_in, RequestMsg, block_on="Addr") {
188
189 Entry cache_entry := getCacheEntry(in_msg.Addr);
190 TBE tbe := TBEs[in_msg.Addr];
191
192 if (in_msg.Type == CoherenceRequestType:GETX) {
193 trigger(Event:Fwd_GETX, in_msg.Addr, cache_entry, tbe);
194 }
195 else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
196 trigger(Event:Writeback_Ack, in_msg.Addr, cache_entry, tbe);
197 }
198 else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
199 trigger(Event:Writeback_Nack, in_msg.Addr, cache_entry, tbe);
200 }
201 else if (in_msg.Type == CoherenceRequestType:INV) {
202 trigger(Event:Inv, in_msg.Addr, cache_entry, tbe);
203 }
204 else {
205 error("Unexpected message");
206 }
207 }
208 }
209 }
210
211 in_port(responseNetwork_in, ResponseMsg, responseToCache) {
212 if (responseNetwork_in.isReady()) {
213 peek(responseNetwork_in, ResponseMsg, block_on="Addr") {
214
215 Entry cache_entry := getCacheEntry(in_msg.Addr);
216 TBE tbe := TBEs[in_msg.Addr];
217
218 if (in_msg.Type == CoherenceResponseType:DATA) {
219 trigger(Event:Data, in_msg.Addr, cache_entry, tbe);
220 }
221 else {
222 error("Unexpected message");
223 }
224 }
225 }
226 }
227
228 // Mandatory Queue
229 in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
230 if (mandatoryQueue_in.isReady()) {
231 peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
232
233 Entry cache_entry := getCacheEntry(in_msg.LineAddress);
234 if (is_invalid(cache_entry) &&
235 cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
236 // make room for the block
237 trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
238 getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
239 TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
240 }
241 else {
242 trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
243 cache_entry, TBEs[in_msg.LineAddress]);
244 }
245 }
246 }
247 }
248
249 // ACTIONS
250
251 action(a_issueRequest, "a", desc="Issue a request") {
252 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
253 out_msg.Addr := address;
254 out_msg.Type := CoherenceRequestType:GETX;
255 out_msg.Requestor := machineID;
256 out_msg.Destination.add(map_Address_to_Directory(address));
257 out_msg.MessageSize := MessageSizeType:Control;
258 }
259 }
260
261 action(b_issuePUT, "b", desc="Issue a PUT request") {
262 enqueue(requestNetwork_out, RequestMsg, issue_latency) {
263 assert(is_valid(cache_entry));
264 out_msg.Addr := address;
265 out_msg.Type := CoherenceRequestType:PUTX;
266 out_msg.Requestor := machineID;
267 out_msg.Destination.add(map_Address_to_Directory(address));
268 out_msg.DataBlk := cache_entry.DataBlk;
269 out_msg.MessageSize := MessageSizeType:Data;
270 }
271 }
272
273 action(e_sendData, "e", desc="Send data from cache to requestor") {
274 peek(forwardRequestNetwork_in, RequestMsg) {
275 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
276 assert(is_valid(cache_entry));
277 out_msg.Addr := address;
278 out_msg.Type := CoherenceResponseType:DATA;
279 out_msg.Sender := machineID;
280 out_msg.Destination.add(in_msg.Requestor);
281 out_msg.DataBlk := cache_entry.DataBlk;
282 out_msg.MessageSize := MessageSizeType:Response_Data;
283 }
284 }
285 }
286
287 action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
288 peek(forwardRequestNetwork_in, RequestMsg) {
289 enqueue(responseNetwork_out, ResponseMsg, cache_response_latency) {
290 assert(is_valid(tbe));
291 out_msg.Addr := address;
292 out_msg.Type := CoherenceResponseType:DATA;
293 out_msg.Sender := machineID;
294 out_msg.Destination.add(in_msg.Requestor);
295 out_msg.DataBlk := tbe.DataBlk;
296 out_msg.MessageSize := MessageSizeType:Response_Data;
297 }
298 }
299 }
300
301 action(i_allocateL1CacheBlock, "i", desc="Allocate a cache block") {
302 if (is_valid(cache_entry)) {
303 } else {
304 set_cache_entry(cacheMemory.allocate(address, new Entry));
305 }
306 }
307
308 action(h_deallocateL1CacheBlock, "h", desc="deallocate a cache block") {
309 if (is_valid(cache_entry)) {
310 cacheMemory.deallocate(address);
311 unset_cache_entry();
312 }
313 }
314
315 action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
316 mandatoryQueue_in.dequeue();
317 }
318
319 action(n_popResponseQueue, "n", desc="Pop the response queue") {
320 profileMsgDelay(1, responseNetwork_in.dequeue());
321 }
322
323 action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
324 profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
325 }
326
327 action(p_profileMiss, "pi", desc="Profile cache miss") {
328 ++cacheMemory.demand_misses;
329 }
330
331 action(p_profileHit, "ph", desc="Profile cache miss") {
332 ++cacheMemory.demand_hits;
333 }
334
335 action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
336 assert(is_valid(cache_entry));
337 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
338 sequencer.readCallback(address, cache_entry.DataBlk, false);
339 }
340
341 action(rx_load_hit, "rx", desc="External load completed.") {
342 peek(responseNetwork_in, ResponseMsg) {
343 assert(is_valid(cache_entry));
344 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
345 sequencer.readCallback(address, cache_entry.DataBlk, true,
346 machineIDToMachineType(in_msg.Sender));
347 }
348 }
349
350 action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
351 assert(is_valid(cache_entry));
352 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
353 sequencer.writeCallback(address, cache_entry.DataBlk, false);
354 }
355
356 action(sx_store_hit, "sx", desc="External store completed.") {
357 peek(responseNetwork_in, ResponseMsg) {
358 assert(is_valid(cache_entry));
359 DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
360 sequencer.writeCallback(address, cache_entry.DataBlk, true,
361 machineIDToMachineType(in_msg.Sender));
362 }
363 }
364
365 action(u_writeDataToCache, "u", desc="Write data to the cache") {
366 peek(responseNetwork_in, ResponseMsg) {
367 assert(is_valid(cache_entry));
368 cache_entry.DataBlk := in_msg.DataBlk;
369 }
370 }
371
372 action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
373 if (send_evictions) {
374 DPRINTF(RubySlicc, "Sending invalidation for %s to the CPU\n", address);
375 sequencer.evictionCallback(address);
376 }
377 }
378
379 action(v_allocateTBE, "v", desc="Allocate TBE") {
380 TBEs.allocate(address);
381 set_tbe(TBEs[address]);
382 }
383
384 action(w_deallocateTBE, "w", desc="Deallocate TBE") {
385 TBEs.deallocate(address);
386 unset_tbe();
387 }
388
389 action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
390 assert(is_valid(cache_entry));
391 assert(is_valid(tbe));
392 tbe.DataBlk := cache_entry.DataBlk;
393 }
394
395 action(z_stall, "z", desc="stall") {
396 // do nothing
397 }
398
399 // TRANSITIONS
400
401 transition({IS, IM, MI, II, MII}, {Load, Ifetch, Store, Replacement}) {
402 z_stall;
403 }
404
405 transition({IS, IM}, {Fwd_GETX, Inv}) {
406 z_stall;
407 }
408
409 transition(MI, Inv) {
410 o_popForwardedRequestQueue;
411 }
412
413 transition(M, Store) {
414 s_store_hit;
415 p_profileHit;
416 m_popMandatoryQueue;
417 }
418
419 transition(M, {Load, Ifetch}) {
420 r_load_hit;
421 p_profileHit;
422 m_popMandatoryQueue;
423 }
424
425 transition(I, Inv) {
426 o_popForwardedRequestQueue;
427 }
428
429 transition(I, Store, IM) {
430 v_allocateTBE;
431 i_allocateL1CacheBlock;
432 a_issueRequest;
433 p_profileMiss;
434 m_popMandatoryQueue;
435 }
436
437 transition(I, {Load, Ifetch}, IS) {
438 v_allocateTBE;
439 i_allocateL1CacheBlock;
440 a_issueRequest;
441 p_profileMiss;
442 m_popMandatoryQueue;
443 }
444
445 transition(IS, Data, M) {
446 u_writeDataToCache;
447 rx_load_hit;
448 w_deallocateTBE;
449 n_popResponseQueue;
450 }
451
452 transition(IM, Data, M) {
453 u_writeDataToCache;
454 sx_store_hit;
455 w_deallocateTBE;
456 n_popResponseQueue;
457 }
458
459 transition(M, Fwd_GETX, I) {
460 e_sendData;
461 forward_eviction_to_cpu;
462 o_popForwardedRequestQueue;
463 }
464
465 transition(I, Replacement) {
466 h_deallocateL1CacheBlock;
467 }
468
469 transition(M, {Replacement,Inv}, MI) {
470 v_allocateTBE;
471 b_issuePUT;
472 x_copyDataFromCacheToTBE;
473 forward_eviction_to_cpu;
474 h_deallocateL1CacheBlock;
475 }
476
477 transition(MI, Writeback_Ack, I) {
478 w_deallocateTBE;
479 o_popForwardedRequestQueue;
480 }
481
482 transition(MI, Fwd_GETX, II) {
483 ee_sendDataFromTBE;
484 o_popForwardedRequestQueue;
485 }
486
487 transition(MI, Writeback_Nack, MII) {
488 o_popForwardedRequestQueue;
489 }
490
491 transition(MII, Fwd_GETX, I) {
492 ee_sendDataFromTBE;
493 w_deallocateTBE;
494 o_popForwardedRequestQueue;
495 }
496
497 transition(II, Writeback_Nack, I) {
498 w_deallocateTBE;
499 o_popForwardedRequestQueue;
500 }
501 }